diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..3c1e1429 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,25 @@ +# This file contains editor configuration directives that match the preferred +# coding style for LCOV contributions. +# +# See https://editorconfig.org + +root = true + +[*] +indent_style = space +indent_size = 4 +tab_width = 8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +charset = utf-8 + +[*.sh] +indent_style = tab +indent_size = 8 +tab_width = 8 + +[{Makefile,*.mak}] +indent_style = tab +indent_size = 8 +tab_width = 8 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..dfdb8b77 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.sh text eol=lf diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..49baf655 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,29 @@ +# +# Copyright (c) 2024 Sebastian Pipping +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +version: 2 +updates: + + - package-ecosystem: "github-actions" + commit-message: + include: "scope" + prefix: "Actions" + directory: "/" + labels: + - "enhancement" + schedule: + interval: "weekly" diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 00000000..62e36d60 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,45 @@ +# +# Copyright (c) 2024 Sebastian Pipping +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +name: Enforce codespell-clean spelling + +on: + pull_request: + push: + schedule: + - cron: '0 14 * * 5' # Every Friday 2pm + workflow_dispatch: + +# Drop permissions to minimum for security +permissions: + contents: read + +jobs: + codespell: + name: Enforce codespell-clean spelling + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + + - uses: codespell-project/actions-codespell@v2 + with: + # "bu" is man page markup (file man/genhtml.1 and man/lcov.1) + # "MIS" is an abbreviation code of "Missed" (file bin/genhtml) + # "nd" is variable $nd (file bin/genhtml) + # "numbrs" is a variable name related to "branches" (file tests/bin/mkinfo) + # Words need to be (1) separated by a comma and (2) all lowercase! + ignore_words_list: bu,mis,nd,numbrs diff --git a/.github/workflows/run_test_suite.yml b/.github/workflows/run_test_suite.yml new file mode 100644 index 00000000..0e367761 --- /dev/null +++ b/.github/workflows/run_test_suite.yml @@ -0,0 +1,145 @@ +# +# Copyright (c) 2024 Sebastian Pipping +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +name: Run the test suite + +on: + pull_request: + push: + schedule: + - cron: '0 14 * * 5' # Every Friday 2pm + workflow_dispatch: + +# Drop permissions to minimum for security +permissions: + contents: read + +jobs: + test_suite: + name: "Run the test suite (GCC ${{ matrix.gcc }}, ${{ matrix.runs-on }})" + runs-on: ${{ matrix.runs-on }} + strategy: + fail-fast: false + matrix: + include: + - runs-on: ubuntu-24.04 + gcc: 9 + install: g++-9 gcc-9 cpp-9 + - runs-on: ubuntu-24.04 + gcc: 10 + install: g++-10 gcc-10 cpp-10 + # GCC 10 to 14 are assumed to behave "the same", + # so we are skipping GCC 11, 12, 13 here to save CI resources + - runs-on: ubuntu-24.04 + gcc: 14 + install: + - runs-on: ubuntu-24.04 + gcc: 15 + install: binutils g++-15 gcc-15 cpp-15 + steps: + - uses: actions/checkout@v5 + + - name: Add repository "ubuntu-toolchain-r" for GCC 15 + if: "${{ matrix.gcc == '15' }}" + run: | + set -x + # The repository is at home at https://launchpad.net/~ubuntu-toolchain-r/+archive/ubuntu/test . + # NOTE: plucky is 25.04 (not 24.04 LTS) + wget -O - 'https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xc8ec952e2a0e1fbdc5090f6a2c277a0a352154e5' | sudo apt-key add - + sudo add-apt-repository 'deb https://ppa.launchpadcontent.net/ubuntu-toolchain-r/test/ubuntu plucky main' + + - name: Install dependencies + run: |- + ubuntu_packages=( + # Perl runtime dependencies as documented in README + libcapture-tiny-perl # CPAN Capture::Tiny + libdatetime-perl # CPAN DateTime + libdevel-cover-perl # CPAN Devel::Cover + libdigest-md5-file-perl # CPAN Digest::MD5 + libfile-spec-native-perl # CPAN File::Spec + libjson-xs-perl # CPAN JSON::XS + # CPAN Memory::Process, see below + # CPAN Module::Load::Conditional + libscalar-list-utils-perl # CPAN Scalar::Util + # CPAN Time::HiRes + libtimedate-perl # CPAN TimeDate + + # Non-Perl runtime dependencies as documented in README + llvm # for command "llvm-profdata" + python3-coverage # PyPI coverage + python3-xlsxwriter # PyPI xlsxwriter + + # Additional dependencies for "make check" + libgd-perl # CPAN GD + ) + set -x + + sudo apt-get update + sudo apt-get install --no-install-recommends --yes -V "${ubuntu_packages[@]}" + + sudo perl -MCPAN -e 'install(Memory::Process)' # no package in Ubuntu + + - name: "Make GCC ${{ matrix.gcc }} systemwide default" + run: |- + set -x -o pipefail + if [[ "${{ matrix.install }}" != "" ]]; then + sudo apt-get update + sudo apt-get install --no-install-recommends --yes -V ${{ matrix.install }} + fi + + # Make requested version GCC and GCOV the system default + # before we have an easy way to fully divert "make check" + # off of the default commands + for i in cpp {,x86_64-linux-gnu-}{g++,gcc{,-{ar,nm,ranlib}},gcov{,-{dump,tool}},gfortran} lto-dump ; do + [[ -e /usr/bin/"${i}" ]] || continue + [[ -e /usr/bin/"${i}-${{ matrix.gcc }}" ]] || continue + sudo rm /usr/bin/"${i}" + sudo ln -s "${i}-${{ matrix.gcc }}" /usr/bin/"${i}" + "${i}" --version | head -n1 + done + + - name: make install + run: |- + set -x -o pipefail + make install PREFIX=/usr CFG_DIR=/etc DESTDIR="${PWD}/ROOT" + find ROOT/ | sort | xargs -r ls -ld + + - name: make uninstall + run: |- + set -x -o pipefail + make uninstall PREFIX=/usr CFG_DIR=/etc DESTDIR="${PWD}/ROOT" + find ROOT/ | sort | xargs -r ls -ld + diff -u0 <(echo 'total 0') <(ls -l ROOT/) # i.e. fail CI if leftovers + + - name: make check + run: |- + set -x -o pipefail + make check + + - name: Upload test log as an artifact + uses: actions/upload-artifact@v4 + with: + name: "lcov-${{ github.sha }}-${{ runner.os }}-GCC-${{ matrix.gcc }}-test-log" # .zip + path: tests/test.log + if-no-files-found: error + + - name: Upload test directory shrapnel as an artifact + uses: actions/upload-artifact@v4 + with: + name: "lcov-${{ github.sha }}-${{ runner.os }}-GCC-${{ matrix.gcc }}-shrapnel" # .zip + path: tests + #if-no-files-found: error diff --git a/.gitignore b/.gitignore index 51f95205..1fbea972 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,90 @@ *.info *.tar.gz *.rpm +*.tdy +*.orig +*.rej + +# ignore autogenerated test files +/tests/**/*.log +/tests/**/*.counts +/tests/**/*.info.gz +/tests/**/*.info.json +/tests/lcov/diff/diff +/tests/lcov/diff/new/prog +/tests/lcov/diff/old/prog +/tests/gendiffcov/simple/a.out +/tests/gendiffcov/simple/criteria.err +/tests/gendiffcov/simple/diff.txt +/tests/gendiffcov/simple/diff_broken.txt +/tests/gendiffcov/simple/diff_r.txt +/tests/gendiffcov/simple/navigation.err +/tests/gendiffcov/simple/test.cpp +/tests/lcov/add/prune +/tests/lcov/add/prune2 +/tests/lcov/add/prune3 +/tests/lcov/add/track +/tests/lcov/diff/patched_normalized.info-e +/tests/lcov/extract/a.out +/tests/lcov/extract/lcov.json +/tests/lcov/extract/testRC +/tests/gendiffcov/filter/brace.info.filtered +/tests/gendiffcov/filter/brace.info.orig +/tests/gendiffcov/insensitive/TEst.cpp.annotated +/tests/gendiffcov/insensitive/a.out +/tests/gendiffcov/insensitive/diff.txt +/tests/gendiffcov/synthesize/a.out +/tests/gendiffcov/synthesize/annotate.sh +/tests/gendiffcov/synthesize/test.cpp +/tests/gendiffcov/synthesize/test.cpp.annotated +/tests/genhtml/lambda/lambda +/tests/lcov/branch/macro +/tests/lcov/branch/no_macro +/tests/lcov/demangle/a.out +/tests/lcov/demangle/lcov.json +/tests/lcov/exception/a.out +/tests/lcov/exception/lcov.json +/tests/lcov/extract/rcOptBug +/tests/lcov/extract/rcOptBug.json +/tests/lcov/format/lcov.json +/tests/lcov/gcov-tool/test +/tests/perl2lcov/lcov.json + + +# ignore autogenerated test directories +/tests/genhtml/lambda/report/ +/tests/genhtml/out_full/ +/tests/genhtml/out_part1/ +/tests/genhtml/out_part2/ +/tests/genhtml/out_target/ +/tests/genhtml/out_zero/ +/tests/genhtml/relative/relative/ +/tests/src/ +/tests/gendiffcov/insensitive/differential/ +/tests/gendiffcov/simple/baseline-filter-dark/ +/tests/gendiffcov/simple/baseline-filter/ +/tests/gendiffcov/simple/baseline/ +/tests/gendiffcov/simple/criteria/ +/tests/gendiffcov/simple/current/ +/tests/gendiffcov/simple/differential--show-details--hier/ +/tests/gendiffcov/simple/differential--show-details/ +/tests/gendiffcov/simple/differential/ +/tests/gendiffcov/simple/differential_nobranch/ +/tests/gendiffcov/simple/elidePath/ +/tests/gendiffcov/simple/mismatchPath/ +/tests/gendiffcov/simple/mismatched/ +/tests/gendiffcov/simple/navigation/ +/tests/gendiffcov/simple/no_annotation/ +/tests/gendiffcov/simple/no_baseline/ +/tests/gendiffcov/simple/no_owners/ +/tests/gendiffcov/simple/noncode_differential--dark-mode/ +/tests/gendiffcov/simple/noncode_differential/ +/tests/gendiffcov/simple/reverse/ +/tests/gendiffcov/simple/reverse_nobranch/ +/tests/gendiffcov/synthesize/annotateErr/ +/tests/gendiffcov/synthesize/annotated/ +/tests/gendiffcov/synthesize/vanilla/ +/tests/lcov/extract/separate/ +/tests/perl2lcov/cover_genhtml/ +/tests/perl2lcov/cover_one/ +/tests/perl2lcov/perl2lcov_report/ diff --git a/.perltidyrc b/.perltidyrc new file mode 100644 index 00000000..b06d43c3 --- /dev/null +++ b/.perltidyrc @@ -0,0 +1,153 @@ +# This file contains perltidy configuration directives that match the preferred +# Perl coding style for LCOV contributions. +# +# See https://perltidy.sourceforge.net + +# I/O control +--add-terminal-newline # -atnl [=default] +--backup-file-extension="bak" # -bext="bak" [=default] +--character-encoding="utf8" # -enc="utf8" +--format="tidy" # -fmt="tidy" [=default] +--iterations=1 # -it=1 [=default] +--nologfile # -nlog [=default] +--output-line-ending="unix" # -ole="unix" +--noquiet # -nq [=default] +--nouse-unicode-gcstring # -ngcs [=default] +--nowarning-output # -nw [=default] + +# Basic formatting options +--nocheck-syntax # -nsyn [=default] +--default-tabsize=8 # -dt=8 [=default] +--extended-syntax # -xs [=default] +--indent-columns=4 # -i=4 [=default] +--maximum-line-length=80 # -l=80 [=default] +--perl-syntax-check-flags="-c -T" # -pscf="-c -T" [=default] +--notabs # -nt [=default] + +# HTML options +--html-entities # -hent [=default] +--html-table-of-contents # -toc [=default] + +# pod2html options +--nohtml # --nohtml [=default] +--nopod2html # -npod + +# Debugging +--fuzzy-line-length # -fll [=default] +--maximum-file-size-mb=10 # -maxfs=10 [=default] +--maximum-level-errors=1 # -maxle=1 [=default] +--maximum-unexpected-errors=0 # -maxue=0 [=default] +--nomemoize # -nmem +--recombine # --recombine [=default] +--short-concatenation-item-length=8 # -scl=8 [=default] +--noshow-options # -nopt [=default] +--timestamp # -ts [=default] + +# Code indentation control +--nobrace-left-and-indent # -nbli [=default] +--closing-brace-indentation=0 # -cbi=0 [=default] +--closing-paren-indentation=0 # -cpi=0 [=default] +--closing-square-bracket-indentation=0 # -csbi=0 [=default] +--continuation-indentation=4 # -ci=4 +--noextended-continuation-indentation # -nxci [=default] +--extended-line-up-parentheses # -xlp +--line-up-parentheses # -lp +--nooutdent-labels # -nola +--nooutdent-long-quotes # -nolq + +# Whitespace control +--add-semicolons # -asc [=default] +--add-whitespace # -aws [=default] +--block-brace-tightness=0 # -bbt=0 [=default] +--brace-tightness=2 # -bt=2 +--delete-old-whitespace # -dws +--delete-semicolons # -dsm [=default] +--function-paren-vertical-alignment # -fpva [=default] +--keyword-paren-inner-tightness=1 # -kpit=1 [=default] +--logical-padding # -lop [=default] +# HGC: prefer space around '.' operator. +# My eyesight is such that they disappear otherwise. +--want-left-space="." # -wls="." +--want-right-space="." # -wrs="." +--paren-tightness=2 # -pt=2 +--nospace-for-semicolon # -nsfs +--space-prototype-paren=0 # -spp=0 +--nospace-terminal-semicolon # -nsts +--square-bracket-tightness=2 # -sbt=2 +--square-bracket-vertical-tightness=1 # -sbvt=1 +--square-bracket-vertical-tightness-closing=0 # -sbvtc=0 [=default] +--notrim-qw # -ntqw +--valign-block-comments # -vbc [=default] +--valign-code # -vc [=default] +--valign-exclusion-list="if , . : ?" # -vxl="if , . : ?" +--valign-side-comments # -vsc [=default] + +# Comment controls +--closing-side-comment-else-flag=0 # -csce=0 [=default] +--closing-side-comment-interval=6 # -csci=6 [=default] +--closing-side-comment-maximum-text=20 # -csct=20 [=default] +--closing-side-comments-balanced # -cscb [=default] +--code-skipping # -cs [=default] +--noformat-skipping # -nfs +--hanging-side-comments # -hsc [=default] +--indent-block-comments # -ibc [=default] +--minimum-space-to-comment=4 # -msc=4 [=default] +--non-indenting-braces # -nib [=default] +--nooutdent-long-comments # -nolc +--static-block-comments # -sbc [=default] +--nostatic-side-comments # -nssc [=default] + +# Linebreak controls +--add-newlines # -anl [=default] +--block-brace-vertical-tightness=0 # -bbvt=0 [=default] +--brace-vertical-tightness=0 # -bvt=0 [=default] +--brace-vertical-tightness-closing=0 # -bvtc=0 [=default] +--break-after-all-operators # -baao +--break-after-labels=0 # -bal=0 [=default] +--break-before-hash-brace=0 # -bbhb=0 [=default] +--break-before-hash-brace-and-indent=0 # -bbhbi=0 [=default] +--break-before-paren=0 # -bbp=0 [=default] +--break-before-paren-and-indent=0 # -bbpi=0 [=default] +--break-before-square-bracket=0 # -bbsb=0 [=default] +--break-before-square-bracket-and-indent=0 # -bbsbi=0 [=default] +--cuddled-break-option=1 # -cbo=1 [=default] +--cuddled-else # -cuddled-blocks +--delete-old-newlines # -dnl [=default] +--one-line-block-nesting=0 # -olbn=0 [=default] +--one-line-block-semicolons=2 # -olbs=2 +--opening-brace-always-on-right # -bar +--opening-sub-brace-on-new-line # -sbl +--paren-vertical-tightness=2 # -pvt=2 +--paren-vertical-tightness-closing=2 # -pvtc=2 +--space-backslash-quote=1 # -sbq=1 [=default] +--weld-nested-containers # -wn + +# Controlling list formatting +--comma-arrow-breakpoints=5 # -cab=5 [=default] +--maximum-fields-per-table=80 # -mft=80 + +# Retaining or ignoring existing line breaks +--break-at-old-attribute-breakpoints # -boa [=default] +--break-at-old-keyword-breakpoints # -bok [=default] +--break-at-old-logical-breakpoints # -bol [=default] +--break-at-old-ternary-breakpoints # -bot [=default] + +# Blank line control +--blank-lines-before-packages=1 # -blbp=1 [=default] +--blank-lines-before-subs=1 # -blbs=1 [=default] +--blanks-before-blocks # -bbb [=default] +--noblanks-before-comments # -nbbc +--keep-old-blank-lines=1 # -kbl=1 [=default] +--keyword-group-blanks-after=1 # -kgba=1 [=default] +--keyword-group-blanks-before=1 # -kgbb=1 [=default] +--nokeyword-group-blanks-delete # -nkgbd [=default] +--nokeyword-group-blanks-inside # -nkgbi [=default] +--keyword-group-blanks-repeat-count=0 # -kgbr=0 [=default] +--keyword-group-blanks-size=5 # -kgbs=5 [=default] +--long-block-line-count=8 # -lbl=8 [=default] +--maximum-consecutive-blank-lines=1 # -mbl=1 [=default] + +# Other controls +--look-for-autoloader # -lal [=default] +--look-for-selfloader # -lsl [=default] +--pass-version-line # -pvl [=default] diff --git a/CONTRIBUTING b/CONTRIBUTING index 6890789b..5994a6c4 100644 --- a/CONTRIBUTING +++ b/CONTRIBUTING @@ -8,6 +8,7 @@ example: * Fixes for code or documentation * Performance and compatibility improvements * Functional enhancements + * New and/or improved testcases There are some rules that these contributions must follow to be acceptable for inclusion: @@ -16,8 +17,8 @@ inclusion: 2. The contribution must follow a particular format. 3. The contribution must be signed. -Once you have made sure that your contribution follows these rules, send it via -e-mail to the LTP coverage mailing list [1]. +Once you have made sure that your contribution follows these rules, open a +pull request for the LCOV code repository [1]. Signing your work @@ -31,7 +32,7 @@ end of the explanation of a patch: By signing a patch, you certify the following: By making a contribution to the LTP GCOV extension (LCOV) on - http://ltp.sourceforge.net, I certify that: + https://github.com/linux-test-project/lcov, I certify that: a) The contribution was created by me and I have the right to submit it under the terms and conditions of the open source license @@ -52,17 +53,84 @@ Project goals ============= The goal of LCOV is to provide a set of command line tools that can be used to -collect, process and visualize code coverage data as produced by the gcov tool -that is part of the GNU Compiler Collection (GCC) [2]. +collect, process, and visualize code coverage data in an easy-to-use way, +suitable for deployment in projects of a wide range of sizes - in particular, +deployment in automated CI/CD systems and large projects implemented using +multiple languages. +LCOV is based on existing environment-specific profiling mechanisms including, +but not limited to, the gcov tool that is part of the GNU Compiler Collection +(GCC) [2]. If you have an idea for a contribution but are unsure if it aligns with the -project goals, feel free to discuss the idea on the LTP coverage mailing -list [1]. +project goals, feel free to discuss the idea using the issue tracker on the +LCOV code repository site [1]. Contribution format =================== +Coding style: +------------- + +The lcov project maintainers try to adhere to a common set of code formatting +rules. While no one ever agrees with anyone else's choices, almost everyone +agrees that consistency is better than its lack. + +To this end, the lcov project uses 'perltidy' [3] to format our code. +Perl-Tidy-20221112 has been tested and is known to work. Newer versions +may also work, but some older versions are known not to. + +Please execute + make checkstyle MODE=full UPDATE=true +on your code and verify correctness of the reformatting (if any) before +submitting a pull request. + +Testing: +-------- + +At minimum, please execute + + $ make test + +and verify that all testcases pass, then execute + + $ make clean + +and verify that all shrapnel has been removed - before submitting a +pull request. + +It is highly, highly requested that PRs containing new features be +accompanied by tests for those features. Lack of tests may substantially +delay review and/or acceptance of your contribution. + +It is recommended to check code coverage of your contribution. +The lcov project currently uses Devel::Cover to measure perl code coverage, +then 'perl2lcov' and 'py2lcov' and lcov to generate a coverage report: + + $ make COVERAGE=1 test + +and then check coverage by pointing your browser to + ./tests/lcov_coveragee/index.html + +Note that certain version combinations of gcc, perl, and Devel::Cover +will crash during test execution. To work around the issue, some tests +ignore these errors if passed the "--keep-going" flag: + + $ make COVERAGE=1 TESTCASE_ARGS=--keep-going test + +Pull requests which add tests for existing features or which enhance +existing tests are actively encouraged. +The lcov maintainers are painfully aware that the code coverage of the +lcov test suite on the lcov code base is embarrassingly low. + +Note that existing tests and/or test drivers may themselves need to be +modified in order to work correctly on other platforms and with other +toolchains. Pull requests which address issues of this nature are +also actively encouraged. + +Submitting a change: +-------------------- + To contribute a change, please create a patch using 'git format-patch'. Alternatively you can use the diff utility with the following command line options: @@ -87,7 +155,8 @@ With your Signed-off-by, you certify the rules stated in section "Signing your work". --- +-- -[1] ltp-coverage@lists.sourceforge.net -[2] http://gcc.gnu.org +[1] https://github.com/linux-test-project/lcov +[2] https://gcc.gnu.org +[3] https://metacpan.org/dist/Perl-Tidy/view/bin/perltidy diff --git a/Makefile b/Makefile index d890bbc6..cbaec736 100644 --- a/Makefile +++ b/Makefile @@ -8,25 +8,91 @@ # and the lcov.rpm file. Just make sure to adjust the VERSION # and RELEASE variables below - both version and date strings # will be updated in all necessary files. +# - checkstyle: check source files for coding style issues +# MODE=(full|diff) [UPDATE=1] # - clean: remove all generated files -# +# - release: finalize release and create git tag for specified VERSION +# - test: run regression tests. +# additional Make variables: +# COVERGAGE=1 +# - enable perl coverage data collection +# TESTCASE_ARGS=string +# - pass these arguments to testcase script +# Sample args: +# --update - overwrite GOLD file with +# result +# --parallel n - use --parallel flag +# --home path - path to lcov script +# --llvm - use LLVM rather than gcc +# --keep-going - don't stop on error +# --verbose - echo commands to test.log +# Note that not all tests have been updated to use +# all flags VERSION := $(shell bin/get_version.sh --version) RELEASE := $(shell bin/get_version.sh --release) FULL := $(shell bin/get_version.sh --full) -# Set this variable during 'make install' to specify the Perl interpreter used in +# Set this variable during 'make install' to specify the interpreters used in # installed scripts, or leave empty to keep the current interpreter. -export LCOV_PERL_PATH := /usr/bin/perl +export LCOV_PERL_PATH := /usr/bin/perl +export LCOV_PYTHON_PATH := /usr/bin/python3 PREFIX := /usr/local +FIRST_CHAR = $(shell echo "$(DESTDIR)$(PREFIX)" | cut -c 1) +ifneq ("$(FIRST_CHAR)", "/") +$(error "DESTDIR + PREFIX expected to be absolute path - found $(FIRST_CHAR)") +endif + CFG_DIR := $(PREFIX)/etc BIN_DIR := $(PREFIX)/bin +LIB_DIR := $(PREFIX)/lib/lcov MAN_DIR := $(PREFIX)/share/man +SHARE_DIR := $(PREFIX)/share/lcov/ +SCRIPT_DIR := $(SHARE_DIR)/support-scripts + +CFG_INST_DIR := $(DESTDIR)$(CFG_DIR) +BIN_INST_DIR := $(DESTDIR)$(BIN_DIR) +LIB_INST_DIR := $(DESTDIR)$(LIB_DIR) +MAN_INST_DIR := $(DESTDIR)$(MAN_DIR) +SHARE_INST_DIR := $(DESTDIR)$(SHARE_DIR) +SCRIPT_INST_DIR := $(SHARE_INST_DIR)/support-scripts + TMP_DIR := $(shell mktemp -d) -FILES := $(wildcard bin/*) $(wildcard man/*) README Makefile \ - $(wildcard rpm/*) lcovrc +FILES := README Makefile lcovrc \ + $(wildcard bin/*) $(wildcard example/*) $(wildcard lib/*) \ + $(wildcard man/*) $(wildcard rpm/*) $(wildcard scripts/*) +DIST_CONTENT := CONTRIBUTING COPYING README Makefile lcovrc \ + bin example lib man rpm scripts tests + +EXES = \ + lcov genhtml geninfo genpng gendesc \ + perl2lcov py2lcov xml2lcov xml2lcovutil.py \ + llvm2lcov +# there may be both public and non-public user scripts - so lets not show +# any of their names +SCRIPTS = $(shell ls scripts | grep -v -E '([\#\~]|\.orig|\.bak|\.BAK)' ) +LIBS = lcovutil.pm +MAN_SECTIONS = 1 5 +# similarly, lets not talk about man pages +MANPAGES = $(foreach s, $(MAN_SECTIONS), $(foreach m, $(shell cd man ; ls *.$(s)), man$(s)/$(m))) + +# Program for checking coding style +CHECKSTYLE = $(CURDIR)/bin/checkstyle.sh + +INSTALL = install +FIX = $(realpath bin/fix.pl) +RM = rm -f +RMDIR = rmdir + +export V +ifeq ("${V}","1") + echocmd= +else + echocmd=echo $1 ; +.SILENT: +endif .PHONY: all info clean install uninstall rpms test @@ -38,89 +104,224 @@ info: @echo " uninstall : delete binaries and man pages from DESTDIR (default /)" @echo " dist : create packages (RPM, tarball) ready for distribution" @echo " check : perform self-tests" + @echo " checkstyle: check source files for coding style issues" + @echo " release : finalize release and create git tag for specified VERSION" + @echo " test : same as 'make check'" clean: - rm -f lcov-*.tar.gz - rm -f lcov-*.rpm - make -C example clean - make -C tests -s clean + $(call echocmd," CLEAN lcov") + $(RM) lcov-*.tar.gz lcov-*.rpm + $(RM) -r ./bin/__pycache__ + $(MAKE) -C example -s clean + $(MAKE) -C tests -s clean + find . -name '*.tdy' -o -name '*.orig' | xargs rm -f install: - bin/install.sh bin/lcov $(DESTDIR)$(BIN_DIR)/lcov -m 755 - bin/install.sh bin/genhtml $(DESTDIR)$(BIN_DIR)/genhtml -m 755 - bin/install.sh bin/geninfo $(DESTDIR)$(BIN_DIR)/geninfo -m 755 - bin/install.sh bin/genpng $(DESTDIR)$(BIN_DIR)/genpng -m 755 - bin/install.sh bin/gendesc $(DESTDIR)$(BIN_DIR)/gendesc -m 755 - bin/install.sh man/lcov.1 $(DESTDIR)$(MAN_DIR)/man1/lcov.1 -m 644 - bin/install.sh man/genhtml.1 $(DESTDIR)$(MAN_DIR)/man1/genhtml.1 -m 644 - bin/install.sh man/geninfo.1 $(DESTDIR)$(MAN_DIR)/man1/geninfo.1 -m 644 - bin/install.sh man/genpng.1 $(DESTDIR)$(MAN_DIR)/man1/genpng.1 -m 644 - bin/install.sh man/gendesc.1 $(DESTDIR)$(MAN_DIR)/man1/gendesc.1 -m 644 - bin/install.sh man/lcovrc.5 $(DESTDIR)$(MAN_DIR)/man5/lcovrc.5 -m 644 - bin/install.sh lcovrc $(DESTDIR)$(CFG_DIR)/lcovrc -m 644 - bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/lcov $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/genhtml $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/geninfo $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/genpng $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/gendesc $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/lcov.1 $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/genhtml.1 $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/geninfo.1 $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/genpng.1 $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/gendesc.1 $(VERSION) $(RELEASE) $(FULL) - bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man5/lcovrc.5 $(VERSION) $(RELEASE) $(FULL) + $(INSTALL) -d -m 755 $(BIN_INST_DIR) + for b in $(EXES) ; do \ + $(call echocmd," INSTALL $(BIN_INST_DIR)/$$b") \ + $(INSTALL) -m 755 bin/$$b $(BIN_INST_DIR)/$$b ; \ + $(FIX) --version $(VERSION) --release $(RELEASE) \ + --libdir $(LIB_DIR) --bindir $(BIN_DIR) \ + --fixver --fixlibdir --fixbindir \ + --exec $(BIN_INST_DIR)/$$b ; \ + done + $(INSTALL) -d -m 755 $(SCRIPT_INST_DIR) + for s in $(SCRIPTS) ; do \ + $(call echocmd," INSTALL $(SCRIPT_INST_DIR)/$$s") \ + $(INSTALL) -m 755 scripts/$$s $(SCRIPT_INST_DIR)/$$s ; \ + $(FIX) --version $(VERSION) --release $(RELEASE) \ + --libdir $(LIB_DIR) --bindir $(BIN_DIR) \ + --fixver --fixlibdir \ + --fixscriptdir --scriptdir $(SCRIPT_DIR) \ + --exec $(SCRIPT_INST_DIR)/$$s ; \ + done + $(INSTALL) -d -m 755 $(LIB_INST_DIR) + for l in $(LIBS) ; do \ + $(call echocmd," INSTALL $(LIB_INST_DIR)/$$l") \ + $(INSTALL) -m 644 lib/$$l $(LIB_INST_DIR)/$$l ; \ + $(FIX) --version $(VERSION) --release $(RELEASE) \ + --libdir $(LIB_DIR) --bindir $(BIN_DIR) \ + --fixver --fixlibdir --fixbindir \ + --exec $(LIB_INST_DIR)/$$l ; \ + done + for section in $(MAN_SECTIONS) ; do \ + DEST=$(MAN_INST_DIR)/man$$section ; \ + $(INSTALL) -d -m 755 $$DEST ; \ + for m in man/*.$$section ; do \ + F=`basename $$m` ; \ + $(call echocmd," INSTALL $$DEST/$$F") \ + $(INSTALL) -m 644 man/$$F $$DEST/$$F ; \ + $(FIX) --version $(VERSION) --fixver --fixdate \ + --fixscriptdir --scriptdir $(SCRIPT_DIR) \ + --manpage $$DEST/$$F ; \ + done ; \ + done + mkdir -p $(SHARE_INST_DIR) + for d in example tests ; do \ + ( cd $$d ; make clean ) ; \ + find $$d -type d -exec mkdir -p "$(SHARE_INST_DIR){}" \; ; \ + find $$d -type f -exec $(INSTALL) -Dm 644 "{}" "$(SHARE_INST_DIR){}" \; ; \ + done ; + @chmod -R ugo+x $(SHARE_INST_DIR)/tests/bin + @find $(SHARE_INST_DIR)/tests \( -name '*.sh' -o -name '*.pl' \) -exec chmod ugo+x {} \; + $(INSTALL) -d -m 755 $(CFG_INST_DIR) + $(call echocmd," INSTALL $(CFG_INST_DIR)/lcovrc") + $(INSTALL) -m 644 lcovrc $(CFG_INST_DIR)/lcovrc + $(call echocmd," done INSTALL") + uninstall: - bin/install.sh --uninstall bin/lcov $(DESTDIR)$(BIN_DIR)/lcov - bin/install.sh --uninstall bin/genhtml $(DESTDIR)$(BIN_DIR)/genhtml - bin/install.sh --uninstall bin/geninfo $(DESTDIR)$(BIN_DIR)/geninfo - bin/install.sh --uninstall bin/genpng $(DESTDIR)$(BIN_DIR)/genpng - bin/install.sh --uninstall bin/gendesc $(DESTDIR)$(BIN_DIR)/gendesc - bin/install.sh --uninstall man/lcov.1 $(DESTDIR)$(MAN_DIR)/man1/lcov.1 - bin/install.sh --uninstall man/genhtml.1 $(DESTDIR)$(MAN_DIR)/man1/genhtml.1 - bin/install.sh --uninstall man/geninfo.1 $(DESTDIR)$(MAN_DIR)/man1/geninfo.1 - bin/install.sh --uninstall man/genpng.1 $(DESTDIR)$(MAN_DIR)/man1/genpng.1 - bin/install.sh --uninstall man/gendesc.1 $(DESTDIR)$(MAN_DIR)/man1/gendesc.1 - bin/install.sh --uninstall man/lcovrc.5 $(DESTDIR)$(MAN_DIR)/man5/lcovrc.5 - bin/install.sh --uninstall lcovrc $(DESTDIR)$(CFG_DIR)/lcovrc + for b in $(EXES) ; do \ + $(call echocmd," UNINST $(BIN_INST_DIR)/$$b") \ + $(RM) $(BIN_INST_DIR)/$$b ; \ + done + $(RMDIR) $(BIN_INST_DIR) || true + # .../lib/lcov installed by us - so safe to remove + $(call echocmd," UNINST $(LIB_INST_DIR)") + $(RM) -r $(LIB_INST_DIR) + $(call echocmd," UNINST $(shell dirname $(LIB_INST_DIR)) (if empty)") + $(RMDIR) `dirname $(LIB_INST_DIR)` || true + # .../share/lcov installed by us - so safe to remove + $(call echocmd," UNINST $(SHARE_INST_DIR)") + $(RM) -r $(SHARE_INST_DIR) + $(call echocmd," UNINST $(MAN_INST_DIR) pages") + for section in $(MAN_SECTIONS) ; do \ + DEST=$(MAN_INST_DIR)/man$$section ; \ + for m in man/*.$$section ; do \ + F=`basename $$m` ; \ + $(RM) $$DEST/$$F ; \ + done ; \ + $(RMDIR) $$DEST || true; \ + done + $(call echocmd," UNINST $(MAN_INST_DIR) (if empty)") + $(RMDIR) $(MAN_INST_DIR) || true; + $(call echocmd," UNINST $(shell dirname $(SHARE_INST_DIR)) (if empty)") + $(RMDIR) `dirname $(SHARE_INST_DIR)` + $(call echocmd," UNINST $(CFG_INST_DIR)/lcovrc") + $(RM) $(CFG_INST_DIR)/lcovrc + $(RMDIR) $(CFG_INST_DIR) || true + $(call echocmd," UNINST $(DESTDIR)/$(PREFIX)") + $(RMDIR) $(DESTDIR)$(PREFIX) || true dist: lcov-$(VERSION).tar.gz lcov-$(VERSION)-$(RELEASE).noarch.rpm \ lcov-$(VERSION)-$(RELEASE).src.rpm lcov-$(VERSION).tar.gz: $(FILES) - mkdir $(TMP_DIR)/lcov-$(VERSION) - cp -r * $(TMP_DIR)/lcov-$(VERSION) - bin/copy_dates.sh . $(TMP_DIR)/lcov-$(VERSION) - make -C $(TMP_DIR)/lcov-$(VERSION) clean - bin/updateversion.pl $(TMP_DIR)/lcov-$(VERSION) $(VERSION) $(RELEASE) $(FULL) - bin/get_changes.sh > $(TMP_DIR)/lcov-$(VERSION)/CHANGES + $(call echocmd," DIST lcov-$(VERSION).tar.gz") + $(RM) -r $(TMP_DIR)/lcov-$(VERSION) + mkdir -p $(TMP_DIR)/lcov-$(VERSION) + cp -r $(DIST_CONTENT) $(TMP_DIR)/lcov-$(VERSION) + ./bin/copy_dates.sh . $(TMP_DIR)/lcov-$(VERSION) + $(MAKE) -s -C $(TMP_DIR)/lcov-$(VERSION) clean >/dev/null + cd $(TMP_DIR)/lcov-$(VERSION) ; \ + $(FIX) --version $(VERSION) --release $(RELEASE) \ + --verfile .version --fixver --fixdate \ + $(patsubst %,bin/%,$(EXES)) $(patsubst %,scripts/%,$(SCRIPTS)) \ + $(patsubst %,lib/%,$(LIBS)) \ + $(patsubst %,man/%,$(notdir $(MANPAGES))) README rpm/lcov.spec + ./bin/get_changes.sh > $(TMP_DIR)/lcov-$(VERSION)/CHANGES || true cd $(TMP_DIR) ; \ tar cfz $(TMP_DIR)/lcov-$(VERSION).tar.gz lcov-$(VERSION) \ --owner root --group root mv $(TMP_DIR)/lcov-$(VERSION).tar.gz . - rm -rf $(TMP_DIR) + $(RM) -r $(TMP_DIR) lcov-$(VERSION)-$(RELEASE).noarch.rpm: rpms lcov-$(VERSION)-$(RELEASE).src.rpm: rpms rpms: lcov-$(VERSION).tar.gz - mkdir $(TMP_DIR) + $(call echocmd," DIST lcov-$(VERSION)-$(RELEASE).noarch.rpm") + mkdir -p $(TMP_DIR) mkdir $(TMP_DIR)/BUILD mkdir $(TMP_DIR)/RPMS mkdir $(TMP_DIR)/SOURCES mkdir $(TMP_DIR)/SRPMS cp lcov-$(VERSION).tar.gz $(TMP_DIR)/SOURCES - cd $(TMP_DIR)/BUILD ; \ - tar xfz $(TMP_DIR)/SOURCES/lcov-$(VERSION).tar.gz \ - lcov-$(VERSION)/rpm/lcov.spec + ( \ + cd $(TMP_DIR)/BUILD ; \ + tar xfz ../SOURCES/lcov-$(VERSION).tar.gz \ + lcov-$(VERSION)/rpm/lcov.spec \ + ) rpmbuild --define '_topdir $(TMP_DIR)' --define '_buildhost localhost' \ + --define "_target_os linux" \ --undefine vendor --undefine packager \ - -ba $(TMP_DIR)/BUILD/lcov-$(VERSION)/rpm/lcov.spec + -ba $(TMP_DIR)/BUILD/lcov-$(VERSION)/rpm/lcov.spec --quiet mv $(TMP_DIR)/RPMS/noarch/lcov-$(VERSION)-$(RELEASE).noarch.rpm . + $(call echocmd," DIST lcov-$(VERSION)-$(RELEASE).src.rpm") mv $(TMP_DIR)/SRPMS/lcov-$(VERSION)-$(RELEASE).src.rpm . - rm -rf $(TMP_DIR) + $(RM) -r $(TMP_DIR) + +ifeq ($(COVERAGE), 1) +# write to .../tests/cover_db +export COVER_DB := $(shell echo `pwd`/tests/cover_db) +export PYCOV_DB := $(shell echo `pwd`/tests/pycov.dat) +export HTML_RPT := $(shell echo `pwd`/lcov_coverage) +#export LCOV_FORCE_PARALLEL = 1 +endif +export TESTCASE_ARGS test: check +# for COVERAGE mode check: run once with LCOV_FORCE_PARALLEL=1 and +# once without - so we can merge the result check: - @make -s -C tests check + if [ "x$(COVERAGE)" != 'x' ] ; then \ + if [ ! -d $(COVER_DB) ]; then \ + mkdir $(COVER_DB) ; \ + fi ; \ + echo "*** Run once, force parallel ***" ; \ + LCOV_FORCE_PARALLEL=1 $(MAKE) -s -C tests check LCOV_HOME=`pwd` ; \ + echo "*** Run again, no force ***" ; \ + fi + @$(MAKE) -s -C tests check LCOV_HOME=`pwd` + @if [ "x$(COVERAGE)" != 'x' ] ; then \ + $(MAKE) -s -C example LCOV_HOME=`pwd`; \ + $(MAKE) -s -C tests report ; \ + fi + +# Files to be checked for coding style issue issues - +# - anything containing "#!/usr/bin/env perl" or the like +# - anything named *.pm - expected to be perl module +# ... as long as the name doesn't end in .tdy or .orig +checkstyle: +ifeq ($(MODE),full) + @echo "Checking source files for coding style issues (MODE=full):" +else + @echo "Checking changes in source files for coding style issues (MODE=diff):" +endif + @RC=0 ; \ + CHECKFILES=`find . -path ./.git -prune -o \( \( -type f -exec grep -q '^#!.*perl' {} \; \) -o -name '*.pm' \) -not \( -name '*.tdy' -o -name '*.orig' -o -name '*~' \) -print `; \ + for FILE in $$CHECKFILES ; do \ + $(CHECKSTYLE) "$$FILE"; \ + if [ 0 != $$? ] ; then \ + RC=1; \ + echo "saw mismatch for $$FILE"; \ + if [ -f $$FILE.tdy -a "$(UPDATE)x" != 'x' ]; then \ + echo "updating $$FILE"; \ + mv $$FILE $$FILE.orig; \ + mv $$FILE.tdy $$FILE ; \ + fi \ + fi \ + done ; \ + exit $$RC + +release: + @if [ "$(origin VERSION)" != "command line" ] ; then echo "Please specify new version number, e.g. VERSION=1.16" >&2 ; exit 1 ; fi + @if [ -n "$$(git status --porcelain 2>&1)" ] ; then echo "The repository contains uncommitted changes" >&2 ; exit 1 ; fi + @if [ -n "$$(git tag -l v$(VERSION))" ] ; then echo "A tag for the specified version already exists (v$(VERSION))" >&2 ; exit 1 ; fi + @echo "Preparing release tag for version $(VERSION)" + git checkout master + bin/copy_dates.sh . . + $(FIX) --version $(VERSION) --release $(RELEASE) \ + --fixver --fixdate $(patsubst %,man/%,$(notdir $(MANPAGES))) \ + README rpm/lcov.spec + git commit -a -s -m "lcov: Finalize release $(VERSION)" + git tag v$(VERSION) -m "LCOV version $(VERSION)" + @echo "**********************************************" + @echo "Release tag v$(VERSION) successfully created" + @echo "Next steps:" + @echo " - Review resulting commit and tag" + @echo " - Publish with: git push origin master v$(VERSION)" + @echo "**********************************************" + diff --git a/README b/README index ad53c3cb..dc133dfd 100644 --- a/README +++ b/README @@ -1,35 +1,57 @@ ------------------------------------------------- - README file for the LTP GCOV extension (LCOV) - -- Last changes: 2019-02-28 - +- Last changes: 2024-12-25 ------------------------------------------------- Description ----------- - LCOV is an extension of GCOV, a GNU tool which provides information about - what parts of a program are actually executed (i.e. "covered") while running - a particular test case. The extension consists of a set of Perl scripts - which build on the textual GCOV output to implement the following enhanced + LCOV is a tool to manipulate and display information about what parts of a + program are actually executed (i.e. "covered") while running a particular test + case or set of testcases. LCOV consists of a set of Perl scripts which build on + the text output of various coverage tools - e.g., gcov, llvm-cov, Coverage.py, + Cobertura, Devel::Cover, Jacoco, etc. - to implement the following enhanced functionality: - * HTML based output: coverage rates are additionally indicated using bar - graphs and specific colors. + * HTML based output: coverage rates are indicated using bar + graphs and specific colors in a hyperlinked coverage report, intended + to enable the user to quickly diagnose and address coverage issues. * Support for large projects: overview pages allow quick browsing of - coverage data by providing three levels of detail: directory view, - file view and source code view. + coverage data by providing a hierarchical directory structure + view, a flat list of all source files in the project, or a three-level + detail view: directory, file and source code view. + + * Support for multiple languages - including C/C++, Perl, and Python. LCOV was initially designed to support Linux kernel coverage measurements, but works as well for coverage measurements on standard user space applications. + LCOV supports differential coverage, as well as date- and owner-binning. + See: + https://arxiv.org/abs/2008.07947 + or + https://ieeexplore.ieee.org/document/9438597 + for a detailed explanation of the concepts and several possible use models. + + A video presentation of the basic ideas can be found at + http://doi.org/10.5281/zenodo.4653252 + + In addition, several other features and capabilities are available. See + section 6, below, for a brief description - and also see the man pages and + the test cases. + Further README contents ----------------------- 1. Included files 2. Installing LCOV - 3. An example of how to access kernel coverage data - 4. An example of how to access coverage data for a user space program - 5. Questions and Comments + 3. Dependencies + 4. An example of how to access kernel coverage data + 5. An example of how to access coverage data for a user space program + 6. LCOV features + 7. Questions and Comments + 8. Filing a new issue @@ -40,20 +62,25 @@ Further README contents bin/lcov - Tool for capturing LCOV coverage data bin/genhtml - Tool for creating HTML output from LCOV data bin/gendesc - Tool for creating description files as used by genhtml + bin/perl2lcov - Tool to translate Perl Devel::Cover data to lcov format + bin/llvm2lcov - Tool to translate LLVM 'llvm-cov' JSON data to LCOV format + bin/py2lcov - Tool to translate Python Coverage.py to lcov format + bin/xml2lcov - Tool to translate Cobertura-like XML coverage data + to lcov format bin/geninfo - Internal tool (creates LCOV data files) bin/genpng - Internal tool (creates png overviews of source files) - bin/install.sh - Internal tool (takes care of un-/installing) + lcovrc - LCOV configuration file man - Directory containing man pages for included tools example - Directory containing an example to demonstrate LCOV - lcovrc - LCOV configuration file + tests - Directory containing lcov regression tests Makefile - Makefile providing 'install' and 'uninstall' targets 2. Installing LCOV ------------------ The LCOV package is available as either RPM or tarball from: - - http://ltp.sourceforge.net/coverage/lcov.php + + https://github.com/linux-test-project/lcov/releases To install the tarball, unpack it to a directory and run: @@ -67,15 +94,89 @@ Change to the resulting lcov directory and type: make install +The default install location is /usr/local. Note that you may need to +have superuser permissions to write into system directories. + +To install in a different location - for example, your home directory, run: + + make PREFIX=$HOME/my_lcov install + +your PREFIX should be an absolute path. + +To run the LCOV regression test suite on your installation: + + $ cp -r $LCOV_HOME/share/test path/to/myTestDir + $ cd path/to/myTestDir + $ make [COVERAGE=1] + +If desired, you can collect coverage data for the LCOV module by setting +the COVERAGE makefile variable. +Note that the Devel::Cover package must be installed if COVERAGE is enabled +or if you want to use the perl2lcov utility. +To view the collected coverage information, point your browser to +.../lcov_coverage/index.html after running the tests. + +Note that the testcases are primarily intended to test LCOV functionality +and not to be easily readable tutorial examples. + +3. Dependencies: +---------------- + +The lcov module is implemented primarily in Perl - and requires both a +moderately up-to-date Perl installation and multiple Perl packages. + +These perl packages include: + + - Capture::Tiny + - DateTime + - Devel::Cover + - Digest::MD5 + - File::Spec + - at least one flavor of JSON module. + In order of performance/preference: + - JSON::XS + - Cpanel::JSON::XS + - JSON::PP + - JSON + - Memory::Process + - Module::Load::Conditional + - Scalar::Util + - Time::HiRes + - TimeDate + +If your system is missing any of these, then you may be able to install them +via: + + $ perl -MCPAN -e 'install()' + +You will very likely need superuser access to be able to install Perl +modules. + +Some of the applications provided with the lcov module are written +in Python - and may require additional Python packages. +In particular, 'xlsxwriter' is required in order to generate any +of the spreadsheet reports. + +To measure Python code coverage, users will need Python packages: + + - Coverage.py + +In addition, contributors will need: + + - perltidy + +Your platform may support other mechanisms to install and/or update +required packages. + + -3. An example of how to access kernel coverage data ---------------------------------------------------- -Requirements: get and install the gcov-kernel package from +4. An example of how to access Linux kernel coverage data +--------------------------------------------------------- +Requirements: Follow the Linux kernel coverage setup instructions at: - http://sourceforge.net/projects/ltp + https://docs.kernel.org/dev-tools/gcov.html -Copy the resulting gcov kernel module file to either the system wide modules -directory or the same directory as the Perl scripts. As root, do the following: +As root, do the following: a) Resetting counters @@ -92,44 +193,367 @@ directory or the same directory as the Perl scripts. As root, do the following: Point the web browser of your choice to the resulting index.html file. -4. An example of how to access coverage data for a user space program +5. An example of how to access coverage data for a user space program --------------------------------------------------------------------- -Requirements: compile the program in question using GCC with the options --fprofile-arcs and -ftest-coverage. During linking, make sure to specify --lgcov or -coverage. -Assuming the compile directory is called "appdir", do the following: + a) Capture current coverage state to a file: - a) Resetting counters + i) C/C++ code: - lcov --directory appdir --zerocounters + Compile your program using the '--coverage' GCC or LLVM + option. During linking, make sure to specify '--coverage': - b) Capturing the current coverage state to a file + $ gcc -o myTest --coverage simple.c + OR + $ gcc -c file1.c file2.c ... --coverage + $ gcc -o myOtherTest --coverage file1.o file2.o .... - lcov --directory appdir --capture --output-file app.info + Alternately, LLVM users can use the 'profdata path' (rather than the + 'gcov path') to collect coverage data from their C/C++ code. See + https://github.com/linux-test-project/lcov/discussions/234 for more + information. - Note that this step only works after the application has - been started and stopped at least once. Otherwise lcov will - abort with an error mentioning that there are no data/.gcda files. + Run your testcase at least once: - c) Getting HTML output + $ path/to/my/testcase/myTest - genhtml app.info + Capture the current coverage state to a file: -Point the web browser of your choice to the resulting index.html file. + $ lcov --directory path/to/my/testcase --capture --output-file app.info + + (LLVM users using the 'profdata path' will use a somewhat different + command for this step - see the discussion referenced above.) + + If you want to collect Modified Condition / Decision Coverage (MD/DC) + date, then: + - you must use gcc/14.2 (or newer), or LLVM/18 (or newer) + - your GCC compile- and link command line must include flag + '-fcondition-coverage'. + - LLVM users must use the 'profdata path' for coverage data collection, + and your compile command line must include + '-fprofile-inst-generate -fcoverage-mapping -fcoverage-mcdc'. + See the above referenced discussion for details. + - your lcov and genhtml command line must include flag + '--mcdc-coverage' + See the '--mcdc-coverage' section in the genhtml and geninfo man pages. + + Note that runtime coverage data exists only after the application has + been started and stopped at least once. Otherwise, no data will be found + and lcov will abort with an error mentioning that there are no + data/.gcda files. + + The coverage runtime emits data (the .gcda files) in an atexit + callback. If your application exits abnormally or crashes before + the callback is executed, then no coverage data will be available. + + For further information on the gcc profiling mechanism, please + consult the gcov man page. + + See 'man lcov' for more information - especially if your build/test + environment is not trivial. + + ii) Python code: + + - install the Coverage.py module + + - execute your testcase to produce python coverage data: + + $ COVERAGE_FILE=./pycov.dat coverage run --append --branch \ + myPythonScript [my script args] + + - translate Python coverage data to LCOV format: + + $ py2lcov -o pycov.info [py2lcov_options] pycov.dat [x.dat]+ + + See 'py2lcov --help' and the Coverage.py documentation for more + information. + + iii) Perl code: + + - install the Devel::Cover module + + - execute your testcase to produce perl coverage data: + + $ perl -MDevel::Cover=-db,perlcov_db,-coverage,statement,branch,condition,subroutine,-silent,1 myPerlTest.pl [my script args] + + - translate Perl coverage data to LCOV format: + + $ perl2lcov --output perlcov.info perlcov_db [perl2lcov options] + + See 'perl2lcov --help' and the Devel::Cover documentation for more + information. + + iv) XML data (for example, generated by Cobertura): + + - translate XM coverage data to LCOV format: + + $ xml2lcov --output myData.info coverage.xml [xml2lcov options] + + See 'xml2lcov --help' and the Cobertura documentation for more + information. + + b) Generate an HTML coverage report: -Please note that independently of where the application is installed or -from which directory it is run, the --directory statement needs to -point to the directory in which the application was compiled. + Generate an HTML report, combining all of your LCOV data files: -For further information on the gcc profiling mechanism, please also -consult the gcov man page. + $ genhtml -o html_report app.info pycov.info perlcov.info + Point the web browser of your choice to the resulting file: + html_report/index.html. -5. Questions and comments + See 'man genhtml' for more details. + + c) Generate a differential coverage report: + + See the example in .../example (run "make test_differential") + as well as the examples in .../tests/gendiffcov. + + +6. LCOV Features: +----------------- + +LCOV features and capabilities fall into 7 major categories: + + a) Categorization + + This refers primarily to differential coverage categorization as + well as date- and owner-binning. See https://arxiv.org/abs/2008.07947 + or https://ieeexplore.ieee.org/document/9438597 for a detailed + description of the concepts. + + Differential categorization and binning are orthogonal in the sense + that you can generate differential report without binning as well + as 'vanilla' coverage reports with binning. See the above papers + and the genhtml man page for details. + + Related options: + --baseline-file, --diff-file, --annotate-script, --select-script + --date-bins, --date-labels --new-file-as-baseline, + --elide-path-mismatch + + b) Error handling + + A generic - but very simple - error handler has been added to the + lcov tool suite. The error handler is used to report exceptions, + and provides a mechanism for the user to ignore the particular + message if desired. Note that ignoring certain errors can cause + subsequent errors and/or can result in inconsistent or confusing + coverage reports. + See the genhtml/lcov/geninfo man pages for details. + + Note that some errors are unrecoverable - and cannot be suppressed or + ignored. + + Related options: + --ignore-error, --expect-message-count, --keep-going, --msg-log + + c) Navigation and display: + + Navigation aids such as hyperlinks to the first uncovered region, + to the next uncovered region, etc. have been implemented. Similarly, + new tables, new columns, and new links between tables enable the + user to identify the author of particular code (covered or not + covered), as well as the time period when the code was written. + + Collectively, these features help the user to quickly identify the + cause of code coverage issues, and to then decide what to do. + + An option to generate a 'hierarchical' coverage report (which follows + the source code directory structure) or 'flat' (all files in top level + of two-level report) as well as various other small features (tooltip + popups, user-specified HTML header, footer, and table labels, etc.) are + also available. + + See the genhtml man page for some details, as well as the + 'gendiffcov/simple' testcases for some examples. + + Related options: + --baseline-title, --baseline-date, --current-date, + --flat, --hierarchical, + --show-owners, --show-noncode, --show-navigation, --show-proportion, + --suppress-aliases --simplify-script + + d) Data manipulation + + Filters are used to suppress or remove certain coverage artifacts - + for example, branches generated by the compiler (e.g., for exception + handling). These artifacts can overwhelm the user code and obscure + coverage features that are interesting to the user. + + Other options are used to focus on or to exclude certain sections + of code, as well as to do regexp replacement of file names - possibly + using case-insensitive comparison. + (Path munging is useful primarily when the build structure does + not exactly match the layout in your revision control system; this + is common in large projects with reusable components.) + + During coverage data capture, the --build-directory option can be used + to specify a search path, to find the .gcno (compile-time coverage data) + file corresponding to a particular .gcda (runtime coverage data) file. + Similarly, the --source-directory option can be used to specify a + search path for source files. + + See the lcov/geninfo/genhtml man pages for a detailed description of + the available filters and manipulation features. + + Related options: + --include, --exclude, --erase-functions, --omit-lines, + --substitute, --filter + --build-directory --source-directory + + e) Callbacks/customization + + The user can supply callbacks which are used to: + + i) interface with the revision control system + Sample scripts: + - Perforce: see 'p4diff', 'p4annotate.pm', 'p4annotate' + - Git: see 'gitdiff', 'gitblame.pm', 'gitblame' + ii) verify that source code versions are compatible, and + Sample scripts: see 'get_signature', 'P4version.pm', 'getp4version', + 'gitversion', 'gitversion.pm', and 'batchGitVersion.pm' + iii) enforce a desired code coverage criteria + Sample script: criteria.pm/criteria + iv) find source files in more complicated environments - where + simple substitutions become complicated or unweildy. + v) select a subset of coverage data to display - e.g., to + use in a code review which wants to concentrate on only + the changes caused by a particular commit or range of commits, + or to review changes in a particular release. + Sample script: select.pm + vi) keep track of environment and other settings - to aid + infrastructure debugging in more complicated use cases. + vii) compress the 'function detail' table to improve readability + by shortening long C++ template and function names. + + The callback may be any desired script or executable - but there + may be performance advantages if it is written as a Perl module. + + See the genhtml/lcov/geninfo man pages for details. + + Note that the various sample scripts are found in the source code + 'scripts' directory, but are installed in the + $LCOV_HOME/share/lcov/support-scripts directory of the release. + + Related options: + --annotate-script, --criteria-script, --version-script + --resolve-script, --select-script, --context-script + --simplify-script + + f) Performance + + lcov/genhtml/geninfo have been refactored to parallelize computation + across multiple cores, if requested. + In general, this provides speedup that is nearly linear in the number + of cores. + There is also an option to throttle parallelism to not exceed peak + memory consumption constraints, as well as options to enable simple + profile data collection - so you can see where time is going and + thus to hint at potential optimizations. The 'spreadsheet.py' + script can be used to view generated profile data. + + There are several configuration file options which can be used to + tweak certain parallelization parameters to optimize performance + for your environment in cases that the default behaviour is suboptimal. + See the lcovrc man page for more information. + + See the genhtml/lcov/geninfo man pages for details + + Related options: --parallel, --memory, --profile + + g) Language/tool support + + Added 'llvm2lcov', 'py2lcov', 'perl2lcov' and 'xml2lcov' scripts. + + - llvm2lcov: + + translates JSON coverage data generated by 'llvm-cov export -format=text ...' + to lcov format. + + See "llvm2lcov --help" for brief instruction on how to use the + translator. Note that llvm2lcov uses a similar set of command line + and configuration file options as lcov, genhtml, and geninfo. + + - py2lcov: + + translates python Coverage.py XML data to lcov format. + + See the Coverage.py documentation at https://coverage.readthedocs.io, + as well as ".../py2lcov --help" + + - perl2lcov + + translates Perl Devel::Cover data to lcov format. + + See the Devel::Cover documentation at + https://metacpan.org/pod/Devel::Cover + to find out how to generate coverage data for Perl code. + + See "perl2lcov --help" for brief instructions on how to + use the translator. + Note that perl2lcov uses a similar set of command line and + config file options as lcov, genhtml, and geninfo. + + - xml2lcov + + translates XML coverage data to lcov format. + The XML data may come from Cobertura or similar tools. + + See "xml2lcov --help" for brief instructions on how to use + the translator. + See the Coburtura documentation for directions on how to + generate XML data. + + Other languages can be integrated using a similar approach. + +In general, the new features and options are implemented uniformly in lcov, +genhtml, and geninfo. Most of the features can be enabled/disabled +using either command line options or by setting defaults in your 'lcovrc' +file. See the lcovrc man page for details. + + +7. Questions and comments ------------------------- See the included man pages for more information on how to use the LCOV tools. -Please email further questions or comments regarding this tool to the -LTP Mailing list at ltp-coverage@lists.sourceforge.net - +In case of further questions, feel free to open a new issue or discussion using +the issue tracker on the LCOV code repository site at: + + https://github.com/linux-test-project/lcov + + +8. Filing a new issue +--------------------- +Before filing a new issue - and if you are using an LCOV release (as opposed +to using a clone of the github repo) - please verify whether the issue is +still present in the LCOV master version. See section 2, above for +directions on how to clone and install the most up-to-date LCOV version. + +If possible, please include a testcase which illustrates the problem +when you file an issue. +Please describe your environment (platform, compiler, perl, and python +versions, etc.). Please include a detailed description of the issue: +what you were trying to do (your goal - not the mechanics of your +procedure), what you did (the mechanics of your procedure), the result +you wanted to see vs. what actually happened. +Depending on the issue, your testcase may need to include source code and +compile/link command lines, directions for how to run your example, the +command lines used to capture and generate your lcov reports, etc. +In other cases, the captured '.info' files may be sufficient to reproduce +the issue. +When in doubt: more is better than less. + +If you cannot include a testcase - e.g., because you feel that it is +senstitive or proprietary - then your detailed description is even more +important. +Note that, without an example, it may be difficult or impossible to +diagnose or fix the problem. + +Bear in mind that you are asking for help from volunteers. Your +priority might not be their priority. Civility, consideration and politeness +go a long way. + +Please check back and to verify the fix and close the issue once it has +been addressed. +Again: remember that you are asking for help from volunteers. +Make sure that you are doing your part. diff --git a/bin/checkstyle.sh b/bin/checkstyle.sh new file mode 100755 index 00000000..2b0c614f --- /dev/null +++ b/bin/checkstyle.sh @@ -0,0 +1,186 @@ +#!/bin/bash +# +# checkstyle.sh FILENAME(S) +# +# Check specified files for coding style issues. The mode of checking is +# selected using the MODE environment variable: +# +# MODE: +# diff Report only issues on code changed since git HEAD (default) +# full Report all issues +# +# Note: Currently checking is restricted to Perl files + +# Return absolute path +function realpath() { + local path="$1" + + if [[ -d "$path" ]] ; then + echo "$(cd "$path" && pwd)" + else + echo "$(cd "$(dirname "$path")" && pwd)/$(basename "$1")" + fi +} + +# Return path relative to TOOLDIR +function relpath() { + local inpath="$1" outpath + + outpath="${1##$TOOLDIR/}" + + echo "$outpath" + + # Let caller know if path was inside TOOLDIR + [[ "$inpath" != "$outpath" ]] +} + +# Terminate with an error message +function die() { + echo "Error: $*" >&2 + exit 1 +} + +# List source lines that violate the style guide rules. If an original file +# is supplied, only report new findings. +function report() { + local file="$1" tidy="$2" origfile="${3:-}" origtidy="${4:-}" + local diffopts mark origmark newmark lineno continuation newline + local tag line + + # Mark offending lines. An offending line is a line in the original + # file that was removed and replaced with a fixed line by the tidy + # script. + diffopts=( + '--old-line-format' '-%L' # Mark changed lines + '--new-line-format' '' # Remove fixed lines + '--unchanged-line-format' ' %L' # Leave other lines + '--minimal' + ) + + mark="$TEMPDIR/${file##*/}.marked" + diff "${diffopts[@]}" "$file" "$tidy" >"$mark" + + origmark="$TEMPDIR/${file##*/}.origmarked" + if [[ -n "$origfile" ]] ; then + # Use original version as baseline + diff "${diffopts[@]}" "$origfile" "$origtidy" >"$origmark" + else + # Use full file as baseline + sed -e 's/^/ /g' "$file" >"$origmark" + fi + + # Mark lines that are added compared to baseline + diffopts=( + '--old-line-format' '' # Remove old lines + '--new-line-format' '+%L' # Mark changed lines + '--unchanged-line-format' ' %L' # Leave other lines + '--minimal' + ) + + newmark="$TEMPDIR/${file##*/}.newmarked" + diff "${diffopts[@]}" "$origmark" "$mark" >"$newmark" + + if ! grep -q '^+-' "$newmark" ; then + echo "$file has no obvious style issues" + return 0 + fi + + if [[ -z "$origfile" ]] ; then + echo "Listing all findings in $file:" + else + echo "Listing findings in $file since git ref $GITBASE:" + fi + echo + + # Display groups of newly introduced offending lines + lineno=1 + continuation=0 + newline=0 + while read -r line ; do + tag=${line:0:2} + line=${line:2} + if [[ "$tag" == "+-" ]] ; then + # + means: introduced after baseline + # - means: an offending line + if [[ "$continuation" -eq 0 ]] ; then + if [[ "$newline" -eq 1 ]] ; then + echo + newline=0 + fi + echo "In $file line $lineno:" + continuation=1 + fi + echo "+${line//$'\t'/^I}" + else + if [[ "$continuation" -eq 1 ]] ; then + newline=1 + continuation=0 + fi + fi + (( lineno++ )) + done < "$newmark" + + echo + echo "$file has style issues, please review (see also $tidy)." >&2 + + return 1 +} + +TOOLDIR="$(realpath "$(dirname "$0")"/..)" + +PERLTIDY="${PERLTIDY:-perltidy}" +PERLTIDYRC="${PERLTIDYRC:-$TOOLDIR/.perltidyrc}" + +# HEAD is default baseline +GITBASE=${GITBASE:-HEAD} + +# diff is default mode +MODE=${MODE:-diff} +#MODE=${MODE@L} + +case "$MODE" in +"diff") [[ ! -d "$TOOLDIR/.git" ]] && die "Not in a git repository" ;; +"full") ;; +*) die "Unknown checking mode '$MODE'" ;; +esac + +TEMPDIR=$(mktemp -d) || die "Could not create temporary directory" +trap "rm -rf '$TEMPDIR'" exit + +RC=0 +for FILE in "${@}" ; do + BASE=${FILE##*/} + TIDY="$FILE.tdy" + + if [[ "$MODE" == "diff" ]] ; then + GITFILE="$TEMPDIR/$BASE.git" + GITTIDY="$TEMPDIR/$BASE.git.tdy" + + RELFILE="$(relpath "$(realpath "$FILE")")" || + die "$FILE is outside of git repository" + + git show "$GITBASE:$RELFILE" >"$GITFILE" || + die "No version of $FILE found in git ref $GITBASE" + + "$PERLTIDY" --profile="$PERLTIDYRC" "$GITFILE" -o "$GITTIDY" || + die "Could not check git version of $FILE" + + "$PERLTIDY" --profile="$PERLTIDYRC" "$FILE" -o "$TIDY" || + die "Could not check $FILE" + + report "$FILE" "$TIDY" "$GITFILE" "$GITTIDY" + else + "$PERLTIDY" --profile="$PERLTIDYRC" "$FILE" -o "$TIDY" || + die "Could not check $FILE" + + report "$FILE" "$TIDY" + fi + + if [[ $? -ne 0 ]] ; then + RC=1 + else + rm -f "$TIDY" + fi +done + +exit $RC diff --git a/bin/copy_dates.sh b/bin/copy_dates.sh index aef5f5ed..405e0de2 100755 --- a/bin/copy_dates.sh +++ b/bin/copy_dates.sh @@ -17,7 +17,9 @@ fi [ -d "$SOURCE/.git" ] ; NOGIT=$? -echo "Copying modification/commit times from $SOURCE to $TARGET" +if [[ -n "$V" ]] && [[ "$V" -gt 0 ]] ; then + echo "Copying modification/commit times from $SOURCE to $TARGET" +fi cd "$SOURCE" || exit 1 find * -type f | while read FILENAME ; do diff --git a/bin/fix.pl b/bin/fix.pl new file mode 100755 index 00000000..ecfd6598 --- /dev/null +++ b/bin/fix.pl @@ -0,0 +1,312 @@ +#!/usr/bin/env perl +# +# Usage: fix.pl [FILE TYPE] [OPTIONS] FILE(s) +# +# Apply file-type specific fixups to the specified list of FILES. If no +# file type is specified, the type is automatically determined from file +# contents and name. +# +# FILE TYPE +# --manpage Specified files are man pages +# --exec Specified files are executable tools +# --text Specified files are text files +# --spec Specified files are RPM spec files +# +# OPTIONS +# --help Print this text, then exit +# --verfile FILENAME Write version information to FILENAME +# --version VERSION Use VERSION as version +# --release RELEASE Use RELEASE as release +# --libdir PATH Use PATH as library path +# --bindir PATH Use PATH as executable path +# --scriptdir PATH Use PATH as script path +# --fixinterp Replace /usr/bin/env interpreter references with values +# specified by LCOV_PERL_PATH and LCOV_PYTHON_PATH +# --fixdate Replace dates references with the value specified by +# SOURCE_DATA_EPOCH or the latest file modification time +# --fixver Replace version references with values specified by +# --version and --release +# --fixlibdir Replace library path references with value specified +# by --libdir +# --fixbindir Replace executable path references with value specified +# by --bindir +# --fixscriptdir Replace script path references with value specified by +# --scriptdir + +use strict; +use warnings; + +use Getopt::Long; + +my ($opt_man, $opt_exec, $opt_text, $opt_spec, + $opt_verfile, $opt_version, $opt_release, $opt_libdir, + $opt_bindir, $opt_scriptdir, $opt_fixinterp, $opt_fixdate, + $opt_fixver, $opt_fixlibdir, $opt_fixbindir, $opt_fixscriptdir); +my $verbose = $ENV{"V"}; + +sub get_file_info($) +{ + my ($filename) = @_; + my ($sec, $min, $hour, $year, $month, $day); + my @stat; + + die("Error: cannot stat $filename: $!\n") if (!-e $filename); + + @stat = stat($filename); + my $epoch = int($ENV{SOURCE_DATE_EPOCH} || $stat[9]); + $epoch = $stat[9] if $stat[9] < $epoch; + ($sec, $min, $hour, $day, $month, $year) = gmtime($epoch); + $year += 1900; + $month += 1; + + return (sprintf("%04d-%02d-%02d", $year, $month, $day), + $epoch, sprintf("%o", $stat[2] & 07777)); +} + +sub update_man_page($$) +{ + my ($source, $date_string) = @_; + + if ($opt_fixver) { + die("$0: Missing option --version\n") if (!defined($opt_version)); + + $source =~ s/\"LCOV\s+\d+\.\d+\"/\"LCOV $opt_version\"/mg; + } + + if ($opt_fixdate) { + $date_string =~ s/-/\\-/g; + $source =~ s/\d\d\d\d\\\-\d\d\\\-\d\d/$date_string/mg; + } + + if ($opt_fixscriptdir) { + die("$0: Missing option --scriptdir\n") if (!defined($opt_scriptdir)); + $source =~ s/^(.ds\s+scriptdir\s).*$/$1$opt_scriptdir/mg; + } + + return $source; +} + +sub update_perl($) +{ + my ($source) = @_; + my $path = $ENV{"LCOV_PERL_PATH"}; + + if ($opt_fixver) { + die("$0: Missing option --version\n") if (!defined($opt_version)); + die("$0: Missing option --release\n") if (!defined($opt_release)); + + $source =~ + s/^(our\s+\$VERSION\s*=).*$/$1 "$opt_version-$opt_release";/mg; + } + + if ($opt_fixinterp && defined($path) && $path ne "") { + $source =~ s/^#!.*perl.*\n/#!$path\n/ + unless $source =~ @^#!/usr/bin/env perl$@; + } + + if ($opt_fixlibdir) { + die("$0: Missing option --libdir\n") if (!defined($opt_libdir)); + + $source =~ s/^use FindBin;\n//mg; + $source =~ s/"\$FindBin::RealBin[\/.]+lib"/"$opt_libdir"/mg; + } + + if ($opt_fixbindir) { + die("$0: Missing option --bindir\n") if (!defined($opt_bindir)); + + $source =~ s/^use FindBin;\n//mg; + $source =~ s/"\$FindBin::RealBin"/"$opt_bindir"/mg; + } + + if ($opt_fixscriptdir) { + die("$0: Missing option --scriptdir\n") if (!defined($opt_scriptdir)); + + $source =~ s/^use FindBin;\n//mg; + $source =~ s/"\$FindBin::RealBin"/"$opt_scriptdir"/mg; + } + + return $source; +} + +sub update_python($) +{ + my ($source) = @_; + my $path = $ENV{"LCOV_PYTHON_PATH"}; + + if ($opt_fixinterp && defined($path) && $path ne "") { + $source =~ s/^#!.*python.*\n/#!$path\n/ + unless $source =~ @^#!/usr/bin/env python3?$@; + } + + return $source; +} + +sub update_txt_file($$) +{ + my ($source, $date_string) = @_; + + if ($opt_fixdate) { + $source =~ s/(Last\s+changes:\s+)\d\d\d\d-\d\d-\d\d/$1$date_string/mg; + } + + return $source; +} + +sub update_spec_file($) +{ + my ($source) = @_; + + if ($opt_fixver) { + die("$0: Missing option --version\n") if (!defined($opt_version)); + die("$0: Missing option --release\n") if (!defined($opt_release)); + + $source =~ s/^(Version:\s*)\d+\.\d+.*$/$1$opt_version/mg; + $source =~ s/^(Release:\s*).*$/$1$opt_release/mg; + } + + return $source; +} + +sub write_version_file($$$) +{ + my ($filename, $version, $release) = @_; + my $fd; + + die("$0: Missing option --version\n") if (!defined($version)); + die("$0: Missing option --release\n") if (!defined($release)); + + open($fd, ">", $filename) or die("Error: cannot write $filename: $!\n"); + print($fd "VERSION=$version\n"); + print($fd "RELEASE=$release\n"); + close($fd); +} + +sub guess_filetype($$) +{ + my ($filename, $data) = @_; + + return "exec" + if ($data =~ /^#!/ || + $filename =~ /\.pl$/ || + $filename =~ /\.pm$/); + return "manpage" if ($data =~ /^\.TH/m || $filename =~ /\.\d$/); + return "spec" if ($data =~ /^%install/m || $filename =~ /\.spec$/); + return "text" if ($data =~ /^[-=]+/m); + + return ""; +} + +sub usage() +{ + my ($fd, $do); + + open($fd, "<", $0) || return; + while (my $line = <$fd>) { + if (!$do && $line =~ /Usage/) { + $do = 1; + } + if ($do) { + last if ($line !~ s/^# ?//); + print($line); + } + } + close($fd); +} + +sub main() +{ + my $opt_help; + + if (!GetOptions("help" => \$opt_help, + "manpage" => \$opt_man, + "exec" => \$opt_exec, + "text" => \$opt_text, + "spec" => \$opt_spec, + "verfile=s" => \$opt_verfile, + "version=s" => \$opt_version, + "release=s" => \$opt_release, + "libdir=s" => \$opt_libdir, + "bindir=s" => \$opt_bindir, + "scriptdir=s" => \$opt_scriptdir, + "fixinterp" => \$opt_fixinterp, + "fixdate" => \$opt_fixdate, + "fixver" => \$opt_fixver, + "fixlibdir" => \$opt_fixlibdir, + "fixbindir" => \$opt_fixbindir, + "fixscriptdir" => \$opt_fixscriptdir, + )) { + print(STDERR "Use $0 --help to get usage information.\n"); + exit(1); + } + + if ($opt_help) { + usage(); + exit(0); + } + + if (defined($opt_verfile)) { + print("Updating version file $opt_verfile\n") if ($verbose); + write_version_file($opt_verfile, $opt_version, $opt_release); + } + + for my $filename (@ARGV) { + my ($fd, $source, $original, $guess); + my @date = get_file_info($filename); + + next if (-d $filename); + + open($fd, "<", $filename) or die("Error: cannot open $filename\n"); + local ($/); + $source = $original = <$fd>; + close($fd); + + if (!$opt_man && !$opt_exec && !$opt_text && !$opt_spec) { + $guess = guess_filetype($filename, $source); + } else { + $guess = ""; + } + + if ($opt_man || $guess eq "manpage") { + print("Updating man page $filename\n") if ($verbose); + $source = update_man_page($source, $date[0]); + } + if ($opt_exec || $guess eq "exec") { + print("Updating bin tool $filename\n") if ($verbose); + if ($filename =~ /\.pm$/ || $source =~ /^[^\n]*perl[^\n]*\n/) { + $source = update_perl($source); + } elsif ($filename =~ /\.py$/ || + $source =~ /^[^\n]*python[^\n]*\n/) { + $source = update_python($source); + } + } + if ($opt_text || $guess eq "text") { + print("Updating text file $filename\n") if ($verbose); + $source = update_txt_file($source, $date[0]); + } + if ($opt_spec || $guess eq "spec") { + print("Updating spec file $filename\n") if ($verbose); + $source = update_spec_file($source); + } + + if ($source ne $original) { + open($fd, ">", "$filename.new") || + die("Error: cannot create $filename.new\n"); + print($fd $source); + close($fd); + + chmod(oct($date[2]), "$filename.new") or + die("Error: chmod failed for $filename\n"); + unlink($filename) or + die("Error: cannot remove $filename\n"); + rename("$filename.new", "$filename") or + die("Error: cannot rename $filename.new to $filename\n"); + } + + utime($date[1], $date[1], $filename) or + warn("Warning: cannot update modification time for $filename\n"); + } +} + +main(); + +exit(0); diff --git a/bin/gendesc b/bin/gendesc index 97cde00f..bd936aef 100755 --- a/bin/gendesc +++ b/bin/gendesc @@ -10,7 +10,7 @@ # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. +# General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see @@ -25,7 +25,7 @@ # For each test case: # # -# +# # Actual description may consist of several lines. By default, output is # written to stdout. Test names consist of alphanumeric characters # including _ and -. @@ -37,24 +37,21 @@ use strict; use warnings; -use File::Basename; +use File::Basename; use Getopt::Long; use Cwd qw/abs_path/; +use FindBin; +use lib "$FindBin::RealBin/../lib"; +use lcovutil qw ($tool_name $tool_dir $lcov_version $lcov_url + die_handler warn_handler); # Constants -our $tool_dir = abs_path(dirname($0)); -our $lcov_version = 'LCOV version '.`$tool_dir/get_version.sh --full`; -our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php"; -our $tool_name = basename($0); - +# (now imported from lcovutil.pm) # Prototypes sub print_usage(*); sub gen_desc(); -sub warn_handler($); -sub die_handler($); - # Global variables our $help; @@ -62,52 +59,45 @@ our $version; our $output_filename; our $input_filename; - # # Code entry point # $SIG{__WARN__} = \&warn_handler; -$SIG{__DIE__} = \&die_handler; +$SIG{__DIE__} = \&die_handler; # Parse command line options if (!GetOptions("output-filename=s" => \$output_filename, - "version" =>\$version, - "help|?" => \$help - )) -{ - print(STDERR "Use $tool_name --help to get usage information\n"); - exit(1); + "version" => \$version, + "help|?" => \$help +)) { + print(STDERR "Use $tool_name --help to get usage information\n"); + exit(1); } $input_filename = $ARGV[0]; # Check for help option -if ($help) -{ - print_usage(*STDOUT); - exit(0); +if ($help) { + print_usage(*STDOUT); + exit(0); } # Check for version option -if ($version) -{ - print("$tool_name: $lcov_version\n"); - exit(0); +if ($version) { + print("$tool_name: $lcov_version\n"); + exit(0); } - # Check for input filename -if (!$input_filename) -{ - die("No input filename specified\n". - "Use $tool_name --help to get usage information\n"); +if (!$input_filename) { + die("No input filename specified\n" . + "Use $tool_name --help to get usage information\n"); } # Do something gen_desc(); - # # print_usage(handle) # @@ -116,23 +106,23 @@ gen_desc(); sub print_usage(*) { - local *HANDLE = $_[0]; + local *HANDLE = $_[0]; - print(HANDLE <", $output_filename) - or die("ERROR: cannot create $output_filename!\n"); - } - else - { - *OUTPUT_HANDLE = *STDOUT; - } - - # Process all lines in input file - while () - { - chomp($_); - - if (/^(\w[\w-]*)(\s*)$/) - { - # Matched test name - # Name starts with alphanum or _, continues with - # alphanum, _ or - - print(OUTPUT_HANDLE "TN: $1\n"); - $empty_line = "ignore"; - } - elsif (/^(\s+)(\S.*?)\s*$/) - { - # Matched test description - if ($empty_line eq "insert") - { - # Write preserved empty line - print(OUTPUT_HANDLE "TD: \n"); - } - print(OUTPUT_HANDLE "TD: $2\n"); - $empty_line = "observe"; - } - elsif (/^\s*$/) - { - # Matched empty line to preserve paragraph separation - # inside description text - if ($empty_line eq "observe") - { - $empty_line = "insert"; - } - } - } - - # Close output file if defined - if ($output_filename) - { - close(OUTPUT_HANDLE); - } - - close(INPUT_HANDLE); -} - -sub warn_handler($) -{ - my ($msg) = @_; - - warn("$tool_name: $msg"); -} - -sub die_handler($) -{ - my ($msg) = @_; - - die("$tool_name: $msg"); + local *INPUT_HANDLE; + local *OUTPUT_HANDLE; + my $empty_line = "ignore"; + + open(INPUT_HANDLE, "<", $input_filename) or + die("cannot open $input_filename!\n"); + + # Open output file for writing + if ($output_filename) { + open(OUTPUT_HANDLE, ">", $output_filename) or + die("cannot create $output_filename!\n"); + } else { + *OUTPUT_HANDLE = *STDOUT; + } + + # Process all lines in input file + while () { + chomp($_); + + if (/^(\w[\w-]*)(\s*)$/) { + # Matched test name + # Name starts with alphanum or _, continues with + # alphanum, _ or - + print(OUTPUT_HANDLE "TN: $1\n"); + $empty_line = "ignore"; + } elsif (/^(\s+)(\S.*?)\s*$/) { + # Matched test description + if ($empty_line eq "insert") { + # Write preserved empty line + print(OUTPUT_HANDLE "TD: \n"); + } + print(OUTPUT_HANDLE "TD: $2\n"); + $empty_line = "observe"; + } elsif (/^\s*$/) { + # Matched empty line to preserve paragraph separation + # inside description text + if ($empty_line eq "observe") { + $empty_line = "insert"; + } + } + } + + # Close output file if defined + if ($output_filename) { + close(OUTPUT_HANDLE); + } + + close(INPUT_HANDLE); } diff --git a/bin/genhtml b/bin/genhtml index 62a0fc46..ea0d5de0 100755 --- a/bin/genhtml +++ b/bin/genhtml @@ -10,7 +10,7 @@ # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. +# General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see @@ -62,26 +62,78 @@ # 2008-08-13 / Peter Oberparleiter: modified function coverage # implementation (now enabled per default), # introduced sorting option (enabled per default) +# April/May 2020 / Henry Cox/Steven Dovich - Mediatek, inc +# Add support for differential line coverage categorization, +# date- and owner- binning. +# June/July 2020 / Henry Cox - Mediatek, inc +# Add support for differential branch coverage categorization, +# Add a bunch of navigation features - href to next code block +# of type T, of type T in date- or owner bin B, etc. +# Add sorted tables for date/owner bin summaries. +# Ocober 2020 / Henry Cox - Mediatek, inc +# Add "--hierarchical" display option. # use strict; use warnings; + use File::Basename; -use File::Temp qw(tempfile); -use Getopt::Long; +use File::Copy; +use File::Path; +use File::Spec; +use File::Temp; +use Scalar::Util qw/looks_like_number/; use Digest::MD5 qw(md5_base64); -use Cwd qw/abs_path cwd/; - +use Cwd qw/abs_path realpath cwd/; +use DateTime; +#use Regexp::Common qw(time); # damn - not installed +use Date::Parse; +use FileHandle; +use Carp; +use Storable qw(dclone); +use FindBin; +use Time::HiRes; # for profiling +use Storable; +use POSIX; +use Data::Dumper; + +use lib "$FindBin::RealBin/../lib"; +use lcovutil qw (set_tool_name define_errors parse_ignore_errors + $tool_name $tool_dir $lcov_version $lcov_url + ignorable_error + $ERROR_MISMATCH $ERROR_SOURCE $ERROR_BRANCH $ERROR_FORMAT + $ERROR_EMPTY $ERROR_VERSION $ERROR_UNUSED $ERROR_PACKAGE + $ERROR_CORRUPT $ERROR_NEGATIVE $ERROR_COUNT $ERROR_UNSUPPORTED + $ERROR_DEPRECATED $ERROR_INCONSISTENT_DATA $ERROR_CALLBACK + $ERROR_RANGE $ERROR_PATH + $ERROR_PARALLEL $ERROR_CHILD report_parallel_error + report_exit_status + summarize_messages + $br_coverage $func_coverage + info $verbose init_verbose_flag debug $debug $devnull + parseOptions + strip_directories + parse_cov_filters summarize_cov_filters + $FILTER_BRANCH_NO_COND $FILTER_LINE_CLOSE_BRACE @cov_filter + rate get_overall_line $default_precision check_precision + die_handler warn_handler parse_w3cdtf); # Global constants -our $title = "LCOV - code coverage report"; -our $tool_dir = abs_path(dirname($0)); -our $lcov_version = 'LCOV version '.`$tool_dir/get_version.sh --full`; -our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php"; -our $tool_name = basename($0); - -# Specify coverage rate default precision -our $default_precision = 1; +our $title = "LCOV - differential code coverage report"; +lcovutil::set_tool_name(basename($0)); + +our $debugScheduler = 0; + +# if false, then keep track of only enough information to be able to +# produce a valid HTML report. +# - Do not keep track of everything - say, to enable serialize/deserialize +# of the complete coverage DB. +# - In practice: this means to throw away the 'FileDetails' structure after +# the source file HTML has been constructed. 'FileDetails' is the lion's +# share of the memory footprint. +# This has the effect of reducing memory footprint and improving parallel +# performance. +our $buildSerializableDatabase = 0; # Specify coverage rate limits (in %) for classifying file entries # HI: $hi_limit <= rate <= 100 graph color: green @@ -89,9 +141,13 @@ our $default_precision = 1; # LO: 0 <= rate < $med_limit graph color: red # For line coverage/all coverage types if not specified -our $hi_limit = 90; +our $hi_limit = 90; our $med_limit = 75; +# For line coverage +our $ln_hi_limit; +our $ln_med_limit; + # For function coverage our $fn_hi_limit; our $fn_med_limit; @@ -100,6 +156,10 @@ our $fn_med_limit; our $br_hi_limit; our $br_med_limit; +# For MC/DC coverage +our $mcdc_hi_limit; +our $mcdc_med_limit; + # Width of overview image our $overview_width = 80; @@ -126,119 +186,102 @@ our $line_field_width = 12; # Width for branch coverage information in the source code view our $br_field_width = 16; +# Width for MC/DC coverage information in the source code view +our $mcdc_field_width = 14; + +# Width for owner name in the source code view +our $owner_field_width = 20; + +# Width for block age in the source code view +our $age_field_width = 5; + +# Width for TLA entry in the source code view +our $tla_field_width = 3; + # Internal Constants # Header types -our $HDR_DIR = 0; -our $HDR_FILE = 1; -our $HDR_SOURCE = 2; -our $HDR_TESTDESC = 3; -our $HDR_FUNC = 4; +our $HDR_DIR = 0; +our $HDR_FILE = 1; +our $HDR_SOURCE = 2; +our $HDR_TESTDESC = 3; +our $HDR_FUNC = 4; # Sort types -our $SORT_FILE = 0; -our $SORT_LINE = 1; -our $SORT_FUNC = 2; -our $SORT_BRANCH = 3; +our $SORT_FILE = 0; +our $SORT_LINE = 1; +our $SORT_FUNC = 2; +our $SORT_BRANCH = 3; +our $SORT_MCDC = 4; +# function detail sort types +our $SORT_MISSING_LINE = 5; # by number of not-hit lines in function +our $SORT_MISSING_BRANCH = 6; # by number of not-hit branches in function +our $SORT_MISSING_MCDC = 7; # by number of not-hit MC/DC expressions in function # Fileview heading types -our $HEAD_NO_DETAIL = 1; -our $HEAD_DETAIL_HIDDEN = 2; -our $HEAD_DETAIL_SHOWN = 3; +our $HEAD_NO_DETAIL = 1; +our $HEAD_DETAIL_HIDDEN = 2; +our $HEAD_DETAIL_SHOWN = 3; # Additional offsets used when converting branch coverage data to HTML -our $BR_LEN = 3; -our $BR_OPEN = 4; -our $BR_CLOSE = 5; - -# Branch data combination types -our $BR_SUB = 0; -our $BR_ADD = 1; - -# Block value used for unnamed blocks -our $UNNAMED_BLOCK = vec(pack('b*', 1 x 32), 0, 32); - -# Error classes which users may specify to ignore during processing -our $ERROR_SOURCE = 0; -our %ERROR_ID = ( - "source" => $ERROR_SOURCE, -); +our $BR_LEN = -3; +our $BR_OPEN = -2; # 2nd last element +our $BR_CLOSE = -1; # last element # Data related prototypes sub print_usage(*); sub gen_html(); sub html_create($$); -sub process_dir($); -sub process_file($$$); -sub info(@); -sub read_info_file($); -sub get_info_entry($); -sub set_info_entry($$$$$$$$$;$$$$$$); +sub process_file($$$$$); +sub compute_title($$); sub get_prefix($@); sub shorten_prefix($); -sub get_dir_list(@); sub get_relative_base_path($); sub read_testfile($); -sub get_date_string(); -sub create_sub_dir($); -sub subtract_counts($$); -sub add_counts($$); -sub apply_baseline($$); +sub get_date_string($); sub remove_unused_descriptions(); -sub get_found_and_hit($); -sub get_affecting_tests($$$); -sub combine_info_files($$); -sub merge_checksums($$$); -sub combine_info_entries($$$); +sub get_affecting_tests($$$$); sub apply_prefix($@); -sub system_no_output($@); -sub read_config($); -sub apply_config($); sub get_html_prolog($); sub get_html_epilog($); -sub write_dir_page($$$$$$$$$$$$$$$$$); +#sub write_dir_page($$$$$$$;$); +sub write_summary_pages($$$$$$$$); sub classify_rate($$$$); -sub combine_brcount($$$;$); -sub get_br_found_and_hit($); -sub warn_handler($); -sub die_handler($); -sub parse_ignore_errors(@); sub parse_dir_prefix(@); -sub rate($$;$$$); - # HTML related prototypes sub escape_html($); +sub escape_id($); sub get_bar_graph_code($$$); sub write_png_files(); sub write_htaccess_file(); sub write_css_file(); -sub write_description_file($$$$$$$); -sub write_function_table(*$$$$$$$$$$); +sub write_description_file($$); +sub write_function_table(*$$$$$$$$$$$$); sub write_html(*$); sub write_html_prolog(*$$); sub write_html_epilog(*$;$); -sub write_header(*$$$$$$$$$$); +sub write_header(*$$$$$$$); sub write_header_prolog(*$); sub write_header_line(*@); sub write_header_epilog(*$); -sub write_file_table(*$$$$$$$); -sub write_file_table_prolog(*$@); -sub write_file_table_entry(*$$$@); -sub write_file_table_detail_entry(*$@); +sub write_file_table(*$$$$$$); +sub write_file_table_prolog(*$$$@); +sub write_file_table_entry(*$$@); +sub write_file_table_detail_entry(*$$$$@); sub write_file_table_epilog(*); sub write_test_table_prolog(*$); sub write_test_table_entry(*$$); sub write_test_table_epilog(*); -sub write_source($$$$$$$); -sub write_source_prolog(*); -sub write_source_line(*$$$$$); +sub write_source($$$$$$$$); +sub write_source_prolog(*$$$); +sub write_source_line(*$$$$$$$); sub write_source_epilog(*); sub write_frameset(*$$$); @@ -246,2129 +289,7977 @@ sub write_overview_line(*$$$); sub write_overview(*$$$$); # External prototype (defined in genpng) -sub gen_png($$$@); +sub gen_png($$$$$@); + +sub simplify_function_name($); + +package SummaryInfo; + +our @selectCallbackScript; +our $selectCallback; +our @cleanDirectoryList; + +our @tlaPriorityOrder = ("UNC", + "LBC", + "UIC", + "UBC", + + "GBC", + "GIC", + "GNC", + "CBC", + + "EUB", + "ECB", + "DUB", + "DCB",); + +our %tlaLocation = ("UNC" => 1, + "LBC" => 3, + "UIC" => 3, + "UBC" => 3, + + "GBC" => 3, + "GIC" => 3, + "GNC" => 1, + "CBC" => 3, + + "EUB" => 3, + "ECB" => 3, + "DUB" => 2, + "DCB" => 2,); + +our %tlaToTitle = ("UNC" => "Uncovered New Code (+ => 0):\n" . + "Newly added code is not tested", + "LBC" => "Lost Baseline Coverage (1 => 0):\n" . + "Unchanged code is no longer tested", + "UIC" => "Uncovered Included Code (# => 0):\n" . + "Previously unused code is untested", + "UBC" => "Uncovered Baseline Code (0 => 0):\n" . + "Unchanged code was untested before, is untested now", + + "GBC" => "Gained Baseline Coverage (0 => 1):\n" . + "Unchanged code is tested now", + "GIC" => "Gained Included Coverage (# => 1):\n" . + "Previously unused code is tested now", + "GNC" => "Gained New Coverage (+ => 1):\n" . + "Newly added code is tested", + "CBC" => "Covered Baseline Code (1 => 1):\n" . + "Unchanged code was tested before and is still tested", + + "EUB" => "Excluded Uncovered Baseline (0 => #):\n" . + "Previously untested code is unused now", + "ECB" => "Excluded Covered Baseline (1 => #):\n" . + "Previously tested code is unused now", + "DUB" => "Deleted Uncovered Baseline (0 => -):\n" . + "Previously untested code has been deleted", + "DCB" => "Deleted Covered Baseline (1 => -):\n" . + "Previously tested code has been deleted",); + +our %tlaToLegacy = ("UNC" => "Missed", + "GNC" => "Hit",); + +our %tlaToLegacySrcLabel = ("UNC" => "MIS", + "GNC" => "HIT",); + +our @defaultCutpoints = (7, 30, 180); +our @cutpoints; +our @ageGroupHeader; +our %ageHeaderToBin; + +our @truncateOwnerTableLevels; # default: truncate everywhere if enabled +our $ownerTableElements; # default: do not truncate +our $compactSummaryTables = 1; # on by default + +use constant { + TYPE => 0, + NAME => 1, + PARENT => 2, + RELATIVE_DIR => 3, + FULL_DIR => 4, + LINE_DATA => 5, + BRANCH_DATA => 6, + MCDC_DATA => 7, + FUNCTION_DATA => 8, + + FILE_DETAILS => 9, # SourceFile struct - only used for 'file' type + SOURCES => 9, # used by top and directory types + IS_ABSOLUTE => 10, # used by directory type only + + # coverage data list for type + DATA => 0, + AGE => 1, + OWNERS => 2, # not used by Function coverage - no owner +}; + +sub type2str +{ + my $t = shift; + return 'line' if ($t == LINE_DATA); + return 'branch' if ($t == BRANCH_DATA); + return 'MC/DC' if ($t == MCDC_DATA); + die("unexpected type '$t'") unless ($t == FUNCTION_DATA); + return 'function'; +} + +sub _initCounts +{ + my %hash; + foreach my $key ('found', 'hit', 'GNC', 'UNC', 'CBC', 'GBC', + 'LBC', 'UBC', 'ECB', 'EUB', 'GIC', 'UIC', + 'DCB', 'DUB' + ) { + $hash{$key} = 0; + } + return \%hash; +} +sub noBaseline +{ + # no baseline - so we will have only 'UIC' and 'GIC' code + # legacy display order is 'hit' followed by 'not hit' + @tlaPriorityOrder = ('GNC', 'UNC'); + %tlaToTitle = ('UNC' => 'Not Hit', + 'GNC' => 'Hit',); +} -# Global variables & initialization -our %info_data; # Hash containing all data from .info file -our @opt_dir_prefix; # Array of prefixes to remove from all sub directories -our @dir_prefix; -our %test_description; # Hash containing test descriptions if available -our $date = get_date_string(); - -our @info_filenames; # List of .info files to use as data source -our $test_title; # Title for output as written to each page header -our $output_directory; # Name of directory in which to store output -our $base_filename; # Optional name of file containing baseline data -our $desc_filename; # Name of file containing test descriptions -our $css_filename; # Optional name of external stylesheet file to use -our $quiet; # If set, suppress information messages -our $help; # Help option flag -our $version; # Version option flag -our $show_details; # If set, generate detailed directory view -our $no_prefix; # If set, do not remove filename prefix -our $func_coverage; # If set, generate function coverage statistics -our $no_func_coverage; # Disable func_coverage -our $br_coverage; # If set, generate branch coverage statistics -our $no_br_coverage; # Disable br_coverage -our $sort = 1; # If set, provide directory listings with sorted entries -our $no_sort; # Disable sort -our $frames; # If set, use frames for source code view -our $keep_descriptions; # If set, do not remove unused test case descriptions -our $no_sourceview; # If set, do not create a source code view for each file -our $highlight; # If set, highlight lines covered by converted data only -our $legend; # If set, include legend in output -our $tab_size = 8; # Number of spaces to use in place of tab -our $config; # Configuration file contents -our $html_prolog_file; # Custom HTML prolog file (up to and including ) -our $html_epilog_file; # Custom HTML epilog file (from onwards) -our $html_prolog; # Actual HTML prolog -our $html_epilog; # Actual HTML epilog -our $html_ext = "html"; # Extension for generated HTML files -our $html_gzip = 0; # Compress with gzip -our $demangle_cpp = 0; # Demangle C++ function names -our $demangle_cpp_tool = "c++filt"; # Default demangler for C++ function names -our $demangle_cpp_params = ""; # Extra parameters for demangling -our @opt_ignore_errors; # Ignore certain error classes during processing -our @ignore; -our $opt_config_file; # User-specified configuration file location -our %opt_rc; -our $opt_missed; # List/sort lines by missed counts -our $charset = "UTF-8"; # Default charset for HTML pages -our @fileview_sortlist; -our @fileview_sortname = ("", "-sort-l", "-sort-f", "-sort-b"); -our @funcview_sortlist; -our @rate_name = ("Lo", "Med", "Hi"); -our @rate_png = ("ruby.png", "amber.png", "emerald.png"); -our $lcov_func_coverage = 1; -our $lcov_branch_coverage = 0; -our $rc_desc_html = 0; # lcovrc: genhtml_desc_html +sub setAgeGroups +{ + #my $numGroups = scalar(@_) + 1; + @cutpoints = sort({ $a <=> $b } @_); + if (@ageGroupHeader) { + # labels were specified by user + @ageGroupHeader = split($lcovutil::split_char, + join($lcovutil::split_char, @ageGroupHeader)); + goto done + if (scalar(@ageGroupHeader) == scalar(@cutpoints) + 1); + # mismatched number - generate warning + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "expected number of 'age' labels to match 'date-bin' cutpoints"); + # if message ignored, then assign default labels + } + @ageGroupHeader = (); + my $prefix = "[.."; + foreach my $days (@cutpoints) { + my $header = $prefix . $days . "] days"; + push(@ageGroupHeader, $header); + $prefix = "(" . $days . ","; + } + push(@ageGroupHeader, "(" . $cutpoints[-1] . "..) days"); + done: + %ageHeaderToBin = (); + my $bin = 0; + foreach my $header (@ageGroupHeader) { + $ageHeaderToBin{$header} = $bin; + ++$bin; + } +} -our $cwd = cwd(); # Current working directory +sub findAgeBin +{ + my $age = shift; + defined($age) or die("undefined age"); + my $bin; + for ($bin = 0; $bin <= $#cutpoints; $bin++) { + last + if ($age <= $cutpoints[$bin]); + } + return $bin; +} +sub new +{ + my ($class, $type, $name, $is_absolute_dir) = @_; + defined($name) || $type eq 'top' or + die("SummaryInfo name should be defined, except at top-level"); + my $self = [$type, # 'type' expected to be one of 'file', 'directory', 'top' + $name, + undef, # parent + undef, # relative dir, + undef, # full directory path, + [undef, [], {}], # line data + [undef, [], {}], # branch data + [undef, [], {}], # MC/DC data + [undef, []] # function data + ]; + if ($type eq 'file') { + $self->[FILE_DETAILS] = undef; # will be SourceFile object + } else { + $self->[SOURCES] = {}; + $self->[IS_ABSOLUTE] = $is_absolute_dir + if ($type eq 'directory'); + } + + for my $type (LINE_DATA, BRANCH_DATA, MCDC_DATA, FUNCTION_DATA) { + $self->[$type]->[DATA] = _initCounts(); + # no age data unless annotations are enabled + next unless @SourceFile::annotateScript; + my $ageList = $self->[$type]->[AGE]; + foreach my $i (0 .. $#cutpoints + 1) { + my $h = _initCounts(); + $h->{_LB} = ($i == 0) ? undef : $cutpoints[$i - 1]; + $h->{_UB} = ($i == $#cutpoints + 1) ? undef : $cutpoints[$i]; + $h->{_INDEX} = $i; + push(@$ageList, $h); + } + } + + bless $self, $class; + + return $self; +} +# deserialization: copy the coverage portion of the undumped data +sub copyGuts +{ + my ($self, $that) = @_; + my @copy = (RELATIVE_DIR, FULL_DIR, LINE_DATA, BRANCH_DATA, + MCDC_DATA, FUNCTION_DATA); + if ($that->[TYPE] eq 'file') { + push(@copy, FILE_DETAILS); + } elsif ($that->[TYPE] eq 'directory') { + push(@copy, IS_ABSOLUTE); + } + for my $key (@copy) { + $self->[$key] = $that->[$key]; + } +} + +sub name +{ + my $self = shift; + return $self->[NAME]; +} + +sub unsetDirs +{ + my $self = shift; + # need to reset after fork failure - we set the values + # just before forking, but now have to put them back. + die('bad usage: unsetDirs()') + unless (defined($self->[FULL_DIR]) && + defined($self->[RELATIVE_DIR])); + $self->[FULL_DIR] = undef; + $self->[RELATIVE_DIR] = undef; +} + +sub relativeDir +{ + my ($self, $dir_string) = @_; + die("bad usage: relativeDir(" . + (defined($dir_string) ? $dir_string : '') . ') current: ' + . + (defined($self->[RELATIVE_DIR]) ? $self->[RELATIVE_DIR] : '') + ) + unless ((!defined($dir_string) && defined($self->[RELATIVE_DIR])) || + (defined($dir_string) && !defined($self->[RELATIVE_DIR]))); + $self->[RELATIVE_DIR] = $dir_string + if defined($dir_string); + return $self->[RELATIVE_DIR]; +} + +sub fullDir +{ + my ($self, $dir_string) = @_; + die("bad usage fullDir()") + unless ((!defined($dir_string) && defined($self->[FULL_DIR])) || + (defined($dir_string) && !defined($self->[FULL_DIR]))); + $self->[FULL_DIR] = $dir_string + if defined($dir_string); + return $self->[FULL_DIR]; +} + +sub type +{ + my $self = shift; + return $self->[TYPE]; +} + +sub is_directory +{ + my ($self, $is_absolute) = @_; + return ( + $self->type() eq 'directory' ? + ((defined($is_absolute) && $is_absolute) ? $self->[IS_ABSOLUTE] : 1) + : + 0); +} + +sub parent +{ + my $self = shift; + return $self->[PARENT]; +} + +sub setParent +{ + my ($self, $parent) = @_; + die("expected parent dir") + unless (ref($parent) eq "SummaryInfo" && + ($main::flat ? 'top' : 'directory') eq $parent->type()); + $self->[PARENT] = $parent; +} + +sub sources +{ + my $self = shift; + die("bad usage") if $self->type() eq 'file'; + return keys(%{$self->[SOURCES]}); +} + +sub fileDetails +{ + my ($self, $data) = @_; + $self->type() eq 'file' or die("source details only available for file"); + !(defined($data) && defined($self->[FILE_DETAILS])) or + die("attempt to set details in initialized struct"); + !defined($data) || ref($data) eq 'SourceFile' or + die("unexpected data arg " . ref($data)); + $self->[FILE_DETAILS] = $data + if defined($data); + return $self->[FILE_DETAILS]; +} + +sub get_sorted_keys +{ + # sort_type in ($SORT_FILE, $SORT_LINE, $SORT_FUNC, $SORT_BRANC, $SORT_MCDC) + my ($self, $sort_type, $include_dirs) = @_; + die("invalid usage") if $self->type() eq 'file'; + + my $sources = $self->[SOURCES]; + + my @keys = $self->sources(); + my @l; + foreach my $k (@keys) { + my $data = $sources->{$k}; + next + if ($data->type() eq 'directory' && + (!defined($include_dirs) || + 0 == $include_dirs)); + push(@l, $k); + } + if ($sort_type == $SORT_FILE) { + # alphabetic + return sort(@l); + } + my $covtype; + if ($sort_type == $SORT_LINE) { + # Sort by number of instrumented lines without coverage + $covtype = LINE_DATA; + } elsif ($sort_type == $SORT_FUNC) { + # Sort by number of instrumented functions without coverage + $covtype = FUNCTION_DATA; + } elsif ($sort_type == $SORT_MCDC) { + # Sort by number of MC/DC points without coverage + $covtype = MCDC_DATA; + } else { + die("unexpected sort type $sort_type") + unless ($sort_type == $SORT_BRANCH); + # Sort by number of instrumented branches without coverage + $covtype = BRANCH_DATA; + } + + if ($main::opt_missed) { + # sort by directory first then secondary key + return + sort({ + my $da = $sources->{$a}; + my $db = $sources->{$b}; + # directories then files if list includes both + $da->type() cmp $db->type() or + $db->get_missed($covtype) + <=> $da->get_missed($covtype) or + # sort alphabetically in case of tie + $da->name() cmp $db->name() + } @l); + } else { + return + sort({ + my $da = $sources->{$a}; + my $db = $sources->{$b}; + $da->type() cmp $db->type() or + $da->get_rate($covtype) <=> $db->get_rate($covtype) or + $da->name() cmp $db->name() + } @l); + } +} + +sub get_source +{ + my ($self, $name) = @_; + die("bad usage") if $self->type() eq 'file'; + return + exists($self->[SOURCES]->{$name}) ? $self->[SOURCES]->{$name} : undef; +} + +sub remove_source +{ + my ($self, $name) = @_; + die("bad usage") if $self->type() eq 'file'; + delete $self->[SOURCES]->{$name}; +} + +sub get +{ + my ($self, $key, $type) = @_; + $type = LINE_DATA + if !defined($type); + + my $hash = $self->[$type]->[DATA]; + if ($key eq "missed") { + my $missed = 0; + foreach my $k ('UBC', 'UNC', 'UIC', 'LBC') { + $missed += $hash->{$k} + if (exists($hash->{$k})); + } + return $missed; + } else { + die("unexpected 'get' key $key") + unless exists($hash->{$key}); + return $hash->{$key}; + } +} + +# Return a relative value for the specified found&hit values +# which is used for sorting the corresponding entries in a +# file list. # -# Code entry point -# +sub get_rate +{ + my ($self, $covtype) = @_; + + my $hash = $self->[$covtype]->[DATA]; + my $found = $hash->{found}; + my $hit = $hash->{hit}; + + if ($found == 0) { + #return 100; + return 1000; + } + #return (100.0 * $hit) / $found; + return int($hit * 1000 / $found) * 10 + 2 - (1 / $found); +} -$SIG{__WARN__} = \&warn_handler; -$SIG{__DIE__} = \&die_handler; +sub get_missed +{ + my ($self, $covtype) = @_; -# Check command line for a configuration file name -Getopt::Long::Configure("pass_through", "no_auto_abbrev"); -GetOptions("config-file=s" => \$opt_config_file, - "rc=s%" => \%opt_rc); -Getopt::Long::Configure("default"); - -{ - # Remove spaces around rc options - my %new_opt_rc; - - while (my ($key, $value) = each(%opt_rc)) { - $key =~ s/^\s+|\s+$//g; - $value =~ s/^\s+|\s+$//g; - - $new_opt_rc{$key} = $value; - } - %opt_rc = %new_opt_rc; -} - -# Read configuration file if available -if (defined($opt_config_file)) { - $config = read_config($opt_config_file); -} elsif (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc")) -{ - $config = read_config($ENV{"HOME"}."/.lcovrc"); -} -elsif (-r "/etc/lcovrc") -{ - $config = read_config("/etc/lcovrc"); -} elsif (-r "/usr/local/etc/lcovrc") -{ - $config = read_config("/usr/local/etc/lcovrc"); -} - -if ($config || %opt_rc) -{ - # Copy configuration file and --rc values to variables - apply_config({ - "genhtml_css_file" => \$css_filename, - "genhtml_hi_limit" => \$hi_limit, - "genhtml_med_limit" => \$med_limit, - "genhtml_line_field_width" => \$line_field_width, - "genhtml_overview_width" => \$overview_width, - "genhtml_nav_resolution" => \$nav_resolution, - "genhtml_nav_offset" => \$nav_offset, - "genhtml_keep_descriptions" => \$keep_descriptions, - "genhtml_no_prefix" => \$no_prefix, - "genhtml_no_source" => \$no_sourceview, - "genhtml_num_spaces" => \$tab_size, - "genhtml_highlight" => \$highlight, - "genhtml_legend" => \$legend, - "genhtml_html_prolog" => \$html_prolog_file, - "genhtml_html_epilog" => \$html_epilog_file, - "genhtml_html_extension" => \$html_ext, - "genhtml_html_gzip" => \$html_gzip, - "genhtml_precision" => \$default_precision, - "genhtml_function_hi_limit" => \$fn_hi_limit, - "genhtml_function_med_limit" => \$fn_med_limit, - "genhtml_function_coverage" => \$func_coverage, - "genhtml_branch_hi_limit" => \$br_hi_limit, - "genhtml_branch_med_limit" => \$br_med_limit, - "genhtml_branch_coverage" => \$br_coverage, - "genhtml_branch_field_width" => \$br_field_width, - "genhtml_sort" => \$sort, - "genhtml_charset" => \$charset, - "genhtml_desc_html" => \$rc_desc_html, - "genhtml_demangle_cpp" => \$demangle_cpp, - "genhtml_demangle_cpp_tool" => \$demangle_cpp_tool, - "genhtml_demangle_cpp_params" => \$demangle_cpp_params, - "genhtml_missed" => \$opt_missed, - "lcov_function_coverage" => \$lcov_func_coverage, - "lcov_branch_coverage" => \$lcov_branch_coverage, - }); + my $hash = $self->[$covtype]->[DATA]; + my $found = $hash->{found}; + my $hit = $hash->{hit}; + + return $found - $hit; } -# Copy related values if not specified -$fn_hi_limit = $hi_limit if (!defined($fn_hi_limit)); -$fn_med_limit = $med_limit if (!defined($fn_med_limit)); -$br_hi_limit = $hi_limit if (!defined($br_hi_limit)); -$br_med_limit = $med_limit if (!defined($br_med_limit)); -$func_coverage = $lcov_func_coverage if (!defined($func_coverage)); -$br_coverage = $lcov_branch_coverage if (!defined($br_coverage)); +sub contains_owner +{ + my ($self, $owner) = @_; + return exists($self->[LINE_DATA]->[OWNERS]->{$owner}); +} -# Parse command line options -if (!GetOptions("output-directory|o=s" => \$output_directory, - "title|t=s" => \$test_title, - "description-file|d=s" => \$desc_filename, - "keep-descriptions|k" => \$keep_descriptions, - "css-file|c=s" => \$css_filename, - "baseline-file|b=s" => \$base_filename, - "prefix|p=s" => \@opt_dir_prefix, - "num-spaces=i" => \$tab_size, - "no-prefix" => \$no_prefix, - "no-sourceview" => \$no_sourceview, - "show-details|s" => \$show_details, - "frames|f" => \$frames, - "highlight" => \$highlight, - "legend" => \$legend, - "quiet|q" => \$quiet, - "help|h|?" => \$help, - "version|v" => \$version, - "html-prolog=s" => \$html_prolog_file, - "html-epilog=s" => \$html_epilog_file, - "html-extension=s" => \$html_ext, - "html-gzip" => \$html_gzip, - "function-coverage" => \$func_coverage, - "no-function-coverage" => \$no_func_coverage, - "branch-coverage" => \$br_coverage, - "no-branch-coverage" => \$no_br_coverage, - "sort" => \$sort, - "no-sort" => \$no_sort, - "demangle-cpp" => \$demangle_cpp, - "ignore-errors=s" => \@opt_ignore_errors, - "config-file=s" => \$opt_config_file, - "rc=s%" => \%opt_rc, - "precision=i" => \$default_precision, - "missed" => \$opt_missed, - )) -{ - print(STDERR "Use $tool_name --help to get usage information\n"); - exit(1); -} else { - # Merge options - if ($no_func_coverage) { - $func_coverage = 0; - } - if ($no_br_coverage) { - $br_coverage = 0; - } +sub owners +{ + # return possibly empty list of line owners in this file + # - filter only those which have 'missed' lines + my ($self, $showAll, $covType) = @_; + + (!defined($covType) || + $covType == LINE_DATA || + $covType == BRANCH_DATA || + $covType eq MCDC_DATA) or + die("unsupported coverage type '$covType'"); + + my $hash = $self->[defined($covType) ? $covType : LINE_DATA]->[OWNERS]; + + return keys(%$hash) + if $showAll; + + my @rtn; + OWNER: + foreach my $name (keys(%$hash)) { + my $h = $hash->{$name}; + foreach my $tla ('UNC', 'UBC', 'UIC', 'LBC') { + if (exists($h->{$tla})) { + die("unexpected 0 (zero) value for $tla of $name in $self->path()" + ) if (0 == $h->{$tla}); + push(@rtn, $name); + next OWNER; + } + } + } + return @rtn; +} - # Merge sort options - if ($no_sort) { - $sort = 0; - } +sub owner_tlaCount +{ + my ($self, $name, $tla, $covType) = @_; + die("$name not found in owner data for $self->path()") + unless exists($self->[LINE_DATA]->[OWNERS]->{$name}); + + return 0 # not supported, yet + if $covType == FUNCTION_DATA; + + (!defined($covType) || + $covType == LINE_DATA || + $covType == BRANCH_DATA || + $covType == MCDC_DATA) or + die("unsupported coverage type '$covType'"); + + my $hash = + $self->[defined($covType) ? $covType : LINE_DATA]->[OWNERS]->{$name}; + return $hash->{$tla} + if (exists($hash->{$tla})); + + if ($tla eq "found") { + my $total = 0; + foreach my $k (keys(%$hash)) { + # count only code that can be hit (ie., not excluded) + $total += $hash->{$k} + if ('EUB' ne $k && + 'ECB' ne $k); + } + return $total; + } elsif ($tla eq "hit") { + my $hit = 0; + foreach my $k ('CBC', 'GBC', 'GIC', 'GNC') { + $hit += $hash->{$k} + if (exists($hash->{$k})); + } + return $hit; + } elsif ($tla eq "missed") { + my $missed = 0; + foreach my $k ('UBC', 'UNC', 'UIC', 'LBC') { + $missed += $hash->{$k} + if (exists($hash->{$k})); + } + return $tla eq "missed" ? $missed : -$missed; + } + die("unexpected TLA $tla") + unless exists($tlaLocation{$tla}); + return 0; } -@info_filenames = @ARGV; +sub hasOwnerInfo +{ + my $self = shift; + + return %{$self->[LINE_DATA]->[OWNERS]} ? 1 : 0; +} -# Check for help option -if ($help) +sub hasDateInfo { - print_usage(*STDOUT); - exit(0); + my $self = shift; + # we get date- and owner information at the same time from the + # annotation-script - so, if we have owner info, then we have date info too. + return %{$self->[LINE_DATA]->[OWNERS]} ? 1 : 0; } -# Check for version option -if ($version) +sub findOwnerList { - print("$tool_name: $lcov_version\n"); - exit(0); + # return [ [owner, lineCovData, branchCovData, functionCov]] for each owner + # where lineCovData = [missedCount, totalCount] + # branchCovData = [missed, total] or undef if not enabled + # functionCov = [missed, total] or undef if not enabled + # - sorted in descending order number of missed lines + my ($self, $callback_type, $truncate_me, $all) = @_; + + my @owners; + foreach my $owner (keys(%{$self->[LINE_DATA]->[OWNERS]})) { + my $lineMissed = $self->owner_tlaCount($owner, 'missed', LINE_DATA); + my $branchMissed = + $lcovutil::br_coverage ? + $self->owner_tlaCount($owner, 'missed', BRANCH_DATA) : + 0; + my $funcMissed = + $lcovutil::func_coverage ? + $self->owner_tlaCount($owner, 'missed', FUNCTION_DATA) : + 0; + my $mcdcMissed = + $lcovutil::mcdc_coverage ? + $self->owner_tlaCount($owner, 'missed', MCDC_DATA) : + 0; + # filter owners who have unexercised code, if requested + + if ($all || + (0 != $lineMissed || 0 != $branchMissed || 0 != $funcMissed)) { + my $lineCb = OwnerDetailCallback->new($self, $owner, LINE_DATA); + my $branchCb = OwnerDetailCallback->new($self, $owner, BRANCH_DATA); + my $mcdcCb = OwnerDetailCallback->new($self, $owner, MCDC_DATA); + my $functionCb = + OwnerDetailCallback->new($self, $owner, FUNCTION_DATA); + my $lineTotal = $self->owner_tlaCount($owner, 'found', LINE_DATA); + my $branchTotal = + $lcovutil::br_coverage ? + $self->owner_tlaCount($owner, 'found', BRANCH_DATA) : + 0; + my $mcdcTotal = + $lcovutil::mcdc_coverage ? + $self->owner_tlaCount($owner, 'found', MCDC_DATA) : + 0; + my $funcTotal = + $lcovutil::func_coverage ? + $self->owner_tlaCount($owner, 'found', FUNCTION_DATA) : + 0; + push(@owners, + [$owner, + [$lineMissed, $lineTotal, $lineCb], + [$branchMissed, $branchTotal, $branchCb], + [$funcMissed, $funcTotal, $functionCb], + [$mcdcMissed, $mcdcTotal, $mcdcCb] + ]); + } + } + @owners = sort({ + $b->[1]->[0] <=> $a->[1]->[0] || # missed + $b->[1]->[1] <=> $a->[1]->[1] || # then total + $a->[0] cmp $b->[0] + } @owners); # then by name + my $truncated; + if ($truncate_me && + defined($ownerTableElements) && + $ownerTableElements < scalar(@owners) && + (0 == scalar(@truncateOwnerTableLevels) || + grep(/$callback_type/, @truncateOwnerTableLevels)) + ) { + # don't truncate the 'primary' key owner table + + $truncated = (scalar(@owners) - $ownerTableElements); + #lcovutil::info("truncating $truncated elements in header table\n"); + splice(@owners, $ownerTableElements); + } else { + $truncated = 0; + } + return (scalar(@owners) ? \@owners : undef, $truncated); } -# Determine which errors the user wants us to ignore -parse_ignore_errors(@opt_ignore_errors); +sub append +{ + my ($self, $record) = @_; + + # keep track of the records that get merged into me.. + defined($record->[NAME]) or + die("attempt to anonymous SummaryInfo record"); + !exists($self->[SOURCES]->{$record->[NAME]}) or + die("duplicate merge record " . $record->[NAME]); + $self->[SOURCES]->{$record->[NAME]} = $record; + + die($record->name() . " already has parent " . $record->parent()->name()) + if (defined($record->parent()) && $record->[PARENT] != $self); + $record->[PARENT] = $self + if !defined($record->parent()); + + foreach my $group (LINE_DATA, FUNCTION_DATA, BRANCH_DATA, MCDC_DATA) { + my $mine = $self->[$group]->[DATA]; + my $yours = $record->[$group]->[DATA]; + while (my ($key, $value) = each(%$yours)) { + $mine->{$key} += $yours->{$key}; + } + } + + # there will be no date info if we didn't also collect owner data + # merge the date- and owner data, if if we aren't going to display it + # (In future, probably want to serialize the data for future processing) + if (%{$record->[LINE_DATA]->[OWNERS]}) { + foreach my $covType (LINE_DATA, FUNCTION_DATA, BRANCH_DATA, MCDC_DATA) { + for (my $bin = 0; $bin <= $#ageGroupHeader; ++$bin) { + foreach my $key (keys %{$self->[$covType]->[DATA]}) { + # duplicate line-coverage buckets + my $ageval = $self->age_sample($bin); + if ($covType == LINE_DATA) { + $self->lineCovCount($key, "age", $ageval, + $record->lineCovCount($key, "age", $ageval)); + } elsif ($covType == BRANCH_DATA) { + $self->branchCovCount($key, "age", $ageval, + $record->branchCovCount($key, "age", $ageval)); + } elsif ($covType == MCDC_DATA) { + $self->mcdcCovCount($key, "age", $ageval, + $record->branchCovCount($key, "age", $ageval)); + } else { + $self->functionCovCount($key, 'age', $ageval, + $record->functionCovCount($key, "age", $ageval)); + } + } + } + + my $ownerList = $self->[$covType]->[OWNERS]; + while (my ($name, $yours) = each(%{$record->[$covType]->[OWNERS]})) + { + if (!exists($ownerList->{$name})) { + $ownerList->{$name} = {}; + } + my $mine = $ownerList->{$name}; + while (my ($tla, $count) = each(%$yours)) { + if (exists($mine->{$tla})) { + $mine->{$tla} += $count; + } else { + $mine->{$tla} = $count; + } + } + } + } + } + return $self; +} -# Split the list of prefixes if needed -parse_dir_prefix(@opt_dir_prefix); +sub age_sample +{ + my ($self, $i) = @_; + my $bin = $self->[LINE_DATA]->[AGE]->[$i]; + return ($i < $#ageGroupHeader) ? $bin->{_UB} : ($bin->{_LB} + 1); +} -# Check for info filename -if (!@info_filenames) +sub lineCovCount { - die("No filename specified\n". - "Use $tool_name --help to get usage information\n"); + my ($self, $key, $group, $age, $delta) = @_; + + $delta = 0 unless defined($delta); + + if ($key eq 'missed') { + my $found = $self->lineCovCount('found', $group, $age); + my $hit = $self->lineCovCount('hit', $group, $age); + return $found - $hit; + } + + if ($group eq "age") { + my $a = $self->[LINE_DATA]->[AGE]; + my $bin = SummaryInfo::findAgeBin($age); + exists($a->[$bin]) && exists($a->[$bin]->{$key}) or + die("unexpected key '$key' for bin '$bin'"); + $a->[$bin]->{$key} += $delta; + return $a->[$bin]->{$key}; + } + + my $d = $self->[$group]->[DATA]; + defined($d) or + die("SummaryInfo::value: unrecognized group $group\n"); + defined($d->{$key}) or + die("SummaryInfo::value: unrecognized key $key\n"); + + $d->{$key} += $delta; + return $d->{$key}; } -# Generate a title if none is specified -if (!$test_title) +sub branchCovCount { - if (scalar(@info_filenames) == 1) - { - # Only one filename specified, use it as title - $test_title = basename($info_filenames[0]); - } - else - { - # More than one filename specified, used default title - $test_title = "unnamed"; - } + my ($self, $key, $group, $age, $delta) = @_; + + $delta = 0 unless defined($delta); + + if ($key eq 'missed') { + my $found = $self->branchCovCount('found', $group, $age); + my $hit = $self->branchCovCount('hit', $group, $age); + return $found - $hit; + } + + my $branch = $self->[BRANCH_DATA]; + if ($group eq "age") { + my $a = $branch->[AGE]; + my $bin = SummaryInfo::findAgeBin($age); + # LCOV_EXCL_START + unless (exists($a->[$bin]) && exists($a->[$bin]->{$key})) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected key '$key' for bin '$bin'"); + return; + } + # LCOV_EXCL_STOP + $a->[$bin]->{$key} += $delta; + return $a->[$bin]->{$key}; + } + my $d = $branch->[DATA]; + defined($d) or + die("SummaryInfo::value: unrecognized branch group $group\n"); + defined($d->{$key}) or + die("SummaryInfo::value: unrecognized branch key $key\n"); + + $d->{$key} += $delta; + return $d->{$key}; } -# Make sure css_filename is an absolute path (in case we're changing -# directories) -if ($css_filename) +sub mcdcCovCount { - if (!($css_filename =~ /^\/(.*)$/)) - { - $css_filename = $cwd."/".$css_filename; - } + my ($self, $key, $group, $age, $delta) = @_; + + $delta = 0 unless defined($delta); + + if ($key eq 'missed') { + my $found = $self->mcdcCovCount('found', $group, $age); + my $hit = $self->mcdcCovCount('hit', $group, $age); + return $found - $hit; + } + + my $mcdc = $self->[MCDC_DATA]; + if ($group eq "age") { + my $a = $mcdc->[AGE]; + my $bin = SummaryInfo::findAgeBin($age); + # LCOV_EXCL_START + unless (exists($a->[$bin]) && exists($a->[$bin]->{$key})) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected key '$key' for bin '$bin'"); + return; + } + # LCOV_EXCL_STOP + $a->[$bin]->{$key} += $delta; + return $a->[$bin]->{$key}; + } + my $d = $mcdc->[DATA]; + defined($d) or + die("SummaryInfo::value: unrecognized MC/DC group $group\n"); + defined($d->{$key}) or + die("SummaryInfo::value: unrecognized MC/DC key $key\n"); + + $d->{$key} += $delta; + return $d->{$key}; } -# Make sure tab_size is within valid range -if ($tab_size < 1) +sub functionCovCount { - print(STDERR "ERROR: invalid number of spaces specified: ". - "$tab_size!\n"); - exit(1); + my ($self, $key, $group, $age, $delta) = @_; + + $delta = 0 unless defined($delta); + + if ($key eq 'missed') { + my $found = $self->functionCovCount('found', $group, $age); + my $hit = $self->functionCovCount('hit', $group, $age); + return $found - $hit; + } + + my $func = $self->[FUNCTION_DATA]; + if ($group eq "age") { + my $a = $func->[AGE]; + my $bin = SummaryInfo::findAgeBin($age); + # LCOV_EXCL_START + unless (exists($a->[$bin]) && exists($a->[$bin]->{$key})) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected key '$key' for bin '$bin'"); + return; + } + # LCOV_EXCL_STOP + $a->[$bin]->{$key} += $delta; + return $a->[$bin]->{$key}; + } + my $d = $func->[DATA]; + defined($d) or + die("SummaryInfo::value: unrecognized function group $group\n"); + defined($d->{$key}) or + die("SummaryInfo::value: unrecognized function key $key\n"); + + $d->{$key} += $delta; + return $d->{$key}; } -# Get HTML prolog and epilog -$html_prolog = get_html_prolog($html_prolog_file); -$html_epilog = get_html_epilog($html_epilog_file); +sub removeLine +{ + my ($self, $lineData) = @_; + + my $l = $self->[LINE_DATA]->[DATA]; + if ($lineData->in_curr()) { + $l->{hit} -= $lineData->curr_count() != 0; + $l->{found} -= 1; + } + if ($lcovutil::br_coverage) { + my $b = $lineData->current_branch(); + if (defined($b)) { + my ($found, $hit) = $b->totals(); + my $br = $self->[BRANCH_DATA]->[DATA]; + $br->{hit} -= $hit; + $br->{found} -= $found; + } + } + if ($lcovutil::mcdc_coverage) { + my $b = $lineData->current_mcdc(); + if (defined($b)) { + my ($found, $hit) = $b->totals(); + my $br = $self->[MCDC_DATA]->[DATA]; + $br->{hit} -= $hit; + $br->{found} -= $found; + } + } + if ($lcovutil::func_coverage) { + my $f = $lineData->current_function(); + if (defined($f)) { + my $fn = $self->[FUNCTION_DATA]->[DATA]; + $fn->{hit} -= $f->hit() != 0; + $fn->{found} -= 1; + } + } +} -# Issue a warning if --no-sourceview is enabled together with --frames -if ($no_sourceview && defined($frames)) +sub is_empty { - warn("WARNING: option --frames disabled because --no-sourceview ". - "was specified!\n"); - $frames = undef; + my $self = shift; + my $t = $self->[LINE_DATA]->[DATA]; + foreach my $category (keys %$t) { + return 0 if 0 != $t->{$category}; + } + return 1; } -# Issue a warning if --no-prefix is enabled together with --prefix -if ($no_prefix && @dir_prefix) +sub type_count { - warn("WARNING: option --prefix disabled because --no-prefix was ". - "specified!\n"); - @dir_prefix = undef; + my ($type, $status, $self, $delta) = @_; + my $l = $self->[$type]->[DATA]; + $l->{$status} += $delta if defined($delta); + return $l->{$status}; } -@fileview_sortlist = ($SORT_FILE); -@funcview_sortlist = ($SORT_FILE); +sub lines_found +{ + return type_count(LINE_DATA, 'found', @_); +} -if ($sort) { - push(@fileview_sortlist, $SORT_LINE); - push(@fileview_sortlist, $SORT_FUNC) if ($func_coverage); - push(@fileview_sortlist, $SORT_BRANCH) if ($br_coverage); - push(@funcview_sortlist, $SORT_LINE); +sub lines_hit +{ + return type_count(LINE_DATA, 'hit', @_); } -if ($frames) +sub function_found { - # Include genpng code needed for overview image generation - do("$tool_dir/genpng"); + return type_count(FUNCTION_DATA, 'found', @_); } -# Ensure that the c++filt tool is available when using --demangle-cpp -if ($demangle_cpp) +sub function_hit { - if (system_no_output(3, $demangle_cpp_tool, "--version")) { - die("ERROR: could not find $demangle_cpp_tool tool needed for ". - "--demangle-cpp\n"); - } + return type_count(FUNCTION_DATA, 'hit', @_); } -# Make sure precision is within valid range -if ($default_precision < 1 || $default_precision > 4) +sub branch_found { - die("ERROR: specified precision is out of range (1 to 4)\n"); + return type_count(BRANCH_DATA, 'found', @_); } +sub branch_hit +{ + return type_count(BRANCH_DATA, 'hit', @_); +} -# Make sure output_directory exists, create it if necessary -if ($output_directory) +sub mcdc_found { - stat($output_directory); + return type_count(MCDC_DATA, 'found', @_); +} - if (! -e _) - { - create_sub_dir($output_directory); - } +sub mcdc_hit +{ + return type_count(MCDC_DATA, 'hit', @_); } -# Do something -gen_html(); +sub selected +{ + my $rtn; + eval { $rtn = $selectCallback->select(@_); }; + if ($@) { + $rtn = 1; # return everything + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "select(..) failed: $@"); + } + return $rtn; +} -exit(0); +sub tlaSummary +{ + my ($self, $t) = @_; + my $type; + if ($t eq 'line') { + $type = LINE_DATA; + } elsif ($t eq 'function') { + $type = FUNCTION_DATA; + } elsif ($t eq 'mcdc') { + $type = MCDC_DATA; + } else { + die("unknown type $t") unless 'branch' eq $t; + $type = BRANCH_DATA; + } + my $str = ''; + my $sep = ''; + my $d = $self->[$type]->[DATA]; + foreach my $tla (@tlaPriorityOrder) { + my $count = $d->{$tla}; + next if $count == 0; + $str .= $sep . $tla . ':' . $count; + $sep = ' '; + } + return $str; +} +sub checkCoverageCriteria +{ + my $self = shift; + my $type = $self->type(); + + if ($type eq 'top') { + # simplistic top-level criteria.. + CoverageCriteria::check_failUnder($main::current_data); + } + return + unless ($CoverageCriteria::criteriaCallback && + (0 == scalar(@CoverageCriteria::criteriaCallbackLevels) || + grep(/$type/, @CoverageCriteria::criteriaCallbackLevels))); + + my $start = Time::HiRes::gettimeofday(); + my %data; + foreach my $t (LINE_DATA, FUNCTION_DATA, BRANCH_DATA, MCDC_DATA) { + my $key = type2str($t); + my $d = $self->[$t]->[DATA]; + foreach my $k (keys %$d) { + # $k will be 'hit', 'found' or one of the TLAs + my $count = $d->{$k}; + next if $count == 0; + if (exists $data{$key}) { + $data{$key}->{$k} = $count; + } else { + $data{$key} = {$k => $count}; + } + } + } + if (grep(/^date$/, @CoverageCriteria::criteriaCallbackTypes)) { + foreach my $t (LINE_DATA, FUNCTION_DATA, BRANCH_DATA, MCDC_DATA) { + my $key = type2str($t); + + next unless exists($self->[$t]->[AGE]); + my $ageBins = $self->[$t]->[AGE]; + foreach my $i (0 .. $#cutpoints + 1) { + my $bin = $ageBins->[$i]; + foreach my $k (@tlaPriorityOrder, 'found', 'hit') { + my $count = $bin->{$k}; + next if $count == 0; + my $b; + if (exists($data{$key})) { + $b = $data{$key}; + } else { + $b = {}; + $data{$key} = $b; + } + if (exists($b->{$i})) { + $b->{$i}->{$k} = $count; + } else { + $b->{$i} = {$k => $count}; + } + } + } + } + } + if (grep(/^owner$/, @CoverageCriteria::criteriaCallbackTypes)) { + foreach my $t (LINE_DATA, BRANCH_DATA, MCDC_DATA) { + my $key = type2str($t); + next unless exists($self->[$t]->[OWNERS]); + my $ownerBins = $self->[$t]->[OWNERS]; + while (my ($owner, $bin) = each(%$ownerBins)) { + my $b; + if (exists($data{$key})) { + $b = $data{$key}; + } else { + $b = {}; + $data{$key} = $b; + } + my $d = {}; + $b->{$owner} = $d; + foreach my $k (@tlaPriorityOrder, 'found', 'hit') { + next unless exists($bin->{$k}); + my $count = $bin->{$k}; + next if $count == 0; + $d->{$k} = $count; + } + } + } + } + my $name = $self->type() eq 'top' ? 'top' : $self->name(); + my $cmd = + join(' ', @CoverageCriteria::coverageCriteriaScript) . + ' \'' . $name . '\' ' . $self->type() . ' \'json_encoded_data\''; + # command: script name (top|dir|file) jsonString args.. + lcovutil::info(1, "criteria: '$cmd'\n"); + CoverageCriteria::executeCallback($self->type(), $name, \%data); + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{criteria}{$name} = $end - $start; +} +package OwnerDetailCallback; +# somewhat of a hack...I want a class which has a callback 'get' +# that matches the SummaryInfo::get method - but returns owner-specific +# information -# -# print_usage(handle) -# -# Print usage information. -# +use constant { + SUMMARY => 0, + OWNER => 1, + TYPE => 2, +}; -sub print_usage(*) +sub new { - local *HANDLE = $_[0]; + my ($class, $summary, $owner, $covType) = @_; + die("missing type") unless defined($covType); - print(HANDLE <owner(); +} -Misc: - -h, --help Print this help, then exit - -v, --version Print version number, then exit - -q, --quiet Do not print progress messages - --config-file FILENAME Specify configuration file location - --rc SETTING=VALUE Override configuration file setting - --ignore-errors ERRORS Continue after ERRORS (source) +sub cb_type +{ + my $self = shift; + return 'owner'; +} -Operation: - -o, --output-directory OUTDIR Write HTML output to OUTDIR - -s, --show-details Generate detailed directory view - -d, --description-file DESCFILE Read test case descriptions from DESCFILE - -k, --keep-descriptions Do not remove unused test descriptions - -b, --baseline-file BASEFILE Use BASEFILE as baseline file - -p, --prefix PREFIX Remove PREFIX from all directory names - --no-prefix Do not remove prefix from directory names - --(no-)function-coverage Enable (disable) function coverage display - --(no-)branch-coverage Enable (disable) branch coverage display +sub get +{ + my ($self, $key, $type) = @_; -HTML output: - -f, --frames Use HTML frames for source code view - -t, --title TITLE Display TITLE in header of all pages - -c, --css-file CSSFILE Use external style sheet file CSSFILE - --no-source Do not create source code view - --num-spaces NUM Replace tabs with NUM spaces in source view - --highlight Highlight lines with converted-only data - --legend Include color legend in HTML output - --html-prolog FILE Use FILE as HTML prolog for generated pages - --html-epilog FILE Use FILE as HTML epilog for generated pages - --html-extension EXT Use EXT as filename extension for pages - --html-gzip Use gzip to compress HTML - --(no-)sort Enable (disable) sorted coverage views - --demangle-cpp Demangle C++ function names - --precision NUM Set precision of coverage rate - --missed Show miss counts as negative numbers + my ($summary, $owner, $covType) = @$self; -For more information see: $lcov_url -END_OF_USAGE - ; + die("unexpected type $type") + unless !defined($type) || ($type eq $covType); + + return $summary->owner_tlaCount($owner, $key, $covType); } +sub owner +{ + my $self = shift; + return $self->[OWNER]; +} -# -# get_rate(found, hit) -# -# Return a relative value for the specified found&hit values -# which is used for sorting the corresponding entries in a -# file list. -# +sub covType +{ + my $self = shift; + return $self->[TYPE]; +} + +package DateDetailCallback; +# as above: callback class to return date-specific TLA counts -sub get_rate($$) +use constant { + SUMMARY => 0, + AGE => 1, + TYPE => 2, + BIN => 3, +}; + +sub new { - my ($found, $hit) = @_; + my ($class, $summary, $age, $covType) = @_; + $covType = SummaryInfo::LINE_DATA unless defined($covType); - if ($found == 0) { - return 10000; - } - return int($hit * 1000 / $found) * 10 + 2 - (1 / $found); + my $self = [$summary, $age, $covType, SummaryInfo::findAgeBin($age)]; + bless $self, $class; + return $self; } +sub get +{ + my ($self, $key, $type) = @_; -# -# get_overall_line(found, hit, name_singular, name_plural) -# -# Return a string containing overall information for the specified -# found/hit data. -# + my ($summary, $age, $covType) = @$self; + + die("unexpected type $type") + unless (!defined($type) || ($type == $covType)); + + return $summary->lineCovCount($key, 'age', $age) + if $covType == SummaryInfo::LINE_DATA; + + return $summary->functionCovCount($key, 'age', $age) + if $covType == SummaryInfo::FUNCTION_DATA; + + return $summary->mcdcCovCount($key, 'age', $age) + if $covType == SummaryInfo::MCDC_DATA; + + die('$covType coverage not yet implemented') + if $covType != SummaryInfo::BRANCH_DATA; + + return $summary->branchCovCount($key, 'age', $age); +} -sub get_overall_line($$$$) +sub label { - my ($found, $hit, $name_sn, $name_pl) = @_; - my $name; + my $self = shift; + return $self->age(); +} - return "no data found" if (!defined($found) || $found == 0); - $name = ($found == 1) ? $name_sn : $name_pl; - return rate($hit, $found, "% ($hit of $found $name)"); +sub cb_type +{ + my $self = shift; + return 'date'; } +sub age +{ + my $self = shift; + return $self->[AGE]; +} -# -# print_overall_rate(ln_do, ln_found, ln_hit, fn_do, fn_found, fn_hit, br_do -# br_found, br_hit) -# -# Print overall coverage rates for the specified coverage types. -# +sub bin +{ + my $self = shift; + return $self->[BIN]; +} + +sub covType +{ + my $self = shift; + return $self->[TYPE]; +} + +package FileOrDirectoryCallback; +# callback class used by 'write_file_table' to retrieve count of the +# various coverpoint categories in the file or directory (i.e., total +# number). +# Other callbacks classes are used to retrieve per-owner counts, etc. + +sub new +{ + # dirSummary: SummaryInfo object + my ($class, $path, $summary) = @_; -sub print_overall_rate($$$$$$$$$) -{ - my ($ln_do, $ln_found, $ln_hit, $fn_do, $fn_found, $fn_hit, - $br_do, $br_found, $br_hit) = @_; + my $self = [$path, $summary]; - info("Overall coverage rate:\n"); - info(" lines......: %s\n", - get_overall_line($ln_found, $ln_hit, "line", "lines")) - if ($ln_do); - info(" functions..: %s\n", - get_overall_line($fn_found, $fn_hit, "function", "functions")) - if ($fn_do); - info(" branches...: %s\n", - get_overall_line($br_found, $br_hit, "branch", "branches")) - if ($br_do); -} - -sub get_fn_list($) -{ - my ($info) = @_; - my %fns; - my @result; - - foreach my $filename (keys(%{$info})) { - my $data = $info->{$filename}; - my $funcdata = $data->{"func"}; - my $sumfnccount = $data->{"sumfnc"}; - - if (defined($funcdata)) { - foreach my $func_name (keys(%{$funcdata})) { - $fns{$func_name} = 1; - } - } - - if (defined($sumfnccount)) { - foreach my $func_name (keys(%{$sumfnccount})) { - $fns{$func_name} = 1; - } - } - } - - @result = keys(%fns); - - return \@result; -} - -# -# rename_functions(info, conv) -# -# Rename all function names in INFO according to CONV: OLD_NAME -> NEW_NAME. -# In case two functions demangle to the same name, assume that they are -# different object code implementations for the same source function. -# - -sub rename_functions($$) -{ - my ($info, $conv) = @_; - - foreach my $filename (keys(%{$info})) { - my $data = $info->{$filename}; - my $funcdata; - my $testfncdata; - my $sumfnccount; - my %newfuncdata; - my %newsumfnccount; - my $f_found; - my $f_hit; - - # funcdata: function name -> line number - $funcdata = $data->{"func"}; - foreach my $fn (keys(%{$funcdata})) { - my $cn = $conv->{$fn}; - - # Abort if two functions on different lines map to the - # same demangled name. - if (defined($newfuncdata{$cn}) && - $newfuncdata{$cn} != $funcdata->{$fn}) { - die("ERROR: Demangled function name $cn ". - "maps to different lines (". - $newfuncdata{$cn}." vs ". - $funcdata->{$fn}.") in $filename\n"); - } - $newfuncdata{$cn} = $funcdata->{$fn}; - } - $data->{"func"} = \%newfuncdata; - - # testfncdata: test name -> testfnccount - # testfnccount: function name -> execution count - $testfncdata = $data->{"testfnc"}; - foreach my $tn (keys(%{$testfncdata})) { - my $testfnccount = $testfncdata->{$tn}; - my %newtestfnccount; - - foreach my $fn (keys(%{$testfnccount})) { - my $cn = $conv->{$fn}; - - # Add counts for different functions that map - # to the same name. - $newtestfnccount{$cn} += - $testfnccount->{$fn}; - } - $testfncdata->{$tn} = \%newtestfnccount; - } - - # sumfnccount: function name -> execution count - $sumfnccount = $data->{"sumfnc"}; - foreach my $fn (keys(%{$sumfnccount})) { - my $cn = $conv->{$fn}; - - # Add counts for different functions that map - # to the same name. - $newsumfnccount{$cn} += $sumfnccount->{$fn}; - } - $data->{"sumfnc"} = \%newsumfnccount; - - # Update function found and hit counts since they may have - # changed - $f_found = 0; - $f_hit = 0; - foreach my $fn (keys(%newsumfnccount)) { - $f_found++; - $f_hit++ if ($newsumfnccount{$fn} > 0); - } - $data->{"f_found"} = $f_found; - $data->{"f_hit"} = $f_hit; - } + bless $self, $class; + return $self; } -# -# gen_html() -# -# Generate a set of HTML pages from contents of .info file INFO_FILENAME. -# Files will be written to the current directory. If provided, test case -# descriptions will be read from .tests file TEST_FILENAME and included -# in ouput. -# -# Die on error. -# +# page_link is HTML reference to file table page next level down - +# for top-level page: link to directory-level page +# for directory-level page: link to source file details +sub page_link +{ + my $self = shift; + my $data = $self->summary(); + my $page_link = ''; + $page_link = + ($main::flat && '.' ne $data->relativeDir()) ? + File::Spec->catfile($data->relativeDir(), $self->name()) : + $self->name(); + if ($data->type() eq 'file') { + if ($main::no_sourceview) { + return ""; + } + $page_link .= ".gcov"; + $page_link .= ".frameset" + if ($main::frames); + } else { + $page_link =~ s/^$lcovutil::dirseparator//; + $page_link .= $lcovutil::dirseparator . "index"; + } + $page_link .= '.' . $main::html_ext; + return $lcovutil::case_insensitive ? lc($page_link) : $page_link; +} -sub gen_html() +sub data { - local *HTML_HANDLE; - my %overview; - my %base_data; - my $lines_found; - my $lines_hit; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - my $overall_found = 0; - my $overall_hit = 0; - my $total_fn_found = 0; - my $total_fn_hit = 0; - my $total_br_found = 0; - my $total_br_hit = 0; - my $dir_name; - my $link_name; - my @dir_list; - my %new_info; - - # Read in all specified .info files - foreach (@info_filenames) - { - %new_info = %{read_info_file($_)}; - - # Combine %new_info with %info_data - %info_data = %{combine_info_files(\%info_data, \%new_info)}; - } - - info("Found %d entries.\n", scalar(keys(%info_data))); - - # Read and apply baseline data if specified - if ($base_filename) - { - # Read baseline file - info("Reading baseline file $base_filename\n"); - %base_data = %{read_info_file($base_filename)}; - info("Found %d entries.\n", scalar(keys(%base_data))); - - # Apply baseline - info("Subtracting baseline data.\n"); - %info_data = %{apply_baseline(\%info_data, \%base_data)}; - } - - @dir_list = get_dir_list(keys(%info_data)); - - if ($no_prefix) - { - # User requested that we leave filenames alone - info("User asked not to remove filename prefix\n"); - } - elsif (! @dir_prefix) - { - # Get prefix common to most directories in list - my $prefix = get_prefix(1, keys(%info_data)); - - if ($prefix) - { - info("Found common filename prefix \"$prefix\"\n"); - $dir_prefix[0] = $prefix; - - } - else - { - info("No common filename prefix found!\n"); - $no_prefix=1; - } - } - else - { - my $msg = "Using user-specified filename prefix "; - for my $i (0 .. $#dir_prefix) - { - $dir_prefix[$i] =~ s/\/+$//; - $msg .= ", " unless 0 == $i; - $msg .= "\"" . $dir_prefix[$i] . "\""; - } - info($msg . "\n"); - } - - - # Read in test description file if specified - if ($desc_filename) - { - info("Reading test description file $desc_filename\n"); - %test_description = %{read_testfile($desc_filename)}; - - # Remove test descriptions which are not referenced - # from %info_data if user didn't tell us otherwise - if (!$keep_descriptions) - { - remove_unused_descriptions(); - } - } - - # Change to output directory if specified - if ($output_directory) - { - chdir($output_directory) - or die("ERROR: cannot change to directory ". - "$output_directory!\n"); - } - - info("Writing .css and .png files.\n"); - write_css_file(); - write_png_files(); - - if ($html_gzip) - { - info("Writing .htaccess file.\n"); - write_htaccess_file(); - } - - info("Generating output.\n"); - - # Process each subdirectory and collect overview information - foreach $dir_name (@dir_list) - { - ($lines_found, $lines_hit, $fn_found, $fn_hit, - $br_found, $br_hit) - = process_dir($dir_name); - - # Handle files in root directory gracefully - $dir_name = "root" if ($dir_name eq ""); - - # Remove prefix if applicable - if (!$no_prefix && @dir_prefix) - { - # Match directory names beginning with one of @dir_prefix - $dir_name = apply_prefix($dir_name,@dir_prefix); - } - - # Generate name for directory overview HTML page - if ($dir_name =~ /^\/(.*)$/) - { - $link_name = substr($dir_name, 1)."/index.$html_ext"; - } - else - { - $link_name = $dir_name."/index.$html_ext"; - } - - $overview{$dir_name} = [$lines_found, $lines_hit, $fn_found, - $fn_hit, $br_found, $br_hit, $link_name, - get_rate($lines_found, $lines_hit), - get_rate($fn_found, $fn_hit), - get_rate($br_found, $br_hit)]; - $overall_found += $lines_found; - $overall_hit += $lines_hit; - $total_fn_found += $fn_found; - $total_fn_hit += $fn_hit; - $total_br_found += $br_found; - $total_br_hit += $br_hit; - } - - # Generate overview page - info("Writing directory view page.\n"); - - # Create sorted pages - foreach (@fileview_sortlist) { - write_dir_page($fileview_sortname[$_], ".", "", $test_title, - undef, $overall_found, $overall_hit, - $total_fn_found, $total_fn_hit, $total_br_found, - $total_br_hit, \%overview, {}, {}, {}, 0, $_); - } - - # Check if there are any test case descriptions to write out - if (%test_description) - { - info("Writing test case description file.\n"); - write_description_file( \%test_description, - $overall_found, $overall_hit, - $total_fn_found, $total_fn_hit, - $total_br_found, $total_br_hit); - } - - print_overall_rate(1, $overall_found, $overall_hit, - $func_coverage, $total_fn_found, $total_fn_hit, - $br_coverage, $total_br_found, $total_br_hit); - - chdir($cwd); + my $self = shift; + # ($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit, $page_link, + # $fileSummary, $fileDetails) + + my $summary = $self->summary(); + my @rtn; + foreach my $type (SummaryInfo::LINE_DATA, SummaryInfo::FUNCTION_DATA, + SummaryInfo::BRANCH_DATA, SummaryInfo::MCDC_DATA + ) { + my $hash = $summary->[$type]->[SummaryInfo::DATA]; + push(@rtn, $hash->{found}, $hash->{hit}); + } + my $link = $self->page_link(); + my $sourceFile = $summary->fileDetails() + if 'file' eq $summary->type(); + push(@rtn, $link, $summary, $sourceFile); + return @rtn; } -# -# html_create(handle, filename) -# +sub secondaryElementFileData +{ + my ($self, $name) = @_; + my $summary = $self->summary(); + my $sourceFile = $summary->fileDetails() + if 'file' eq $summary->type(); + return [$summary->name(), $summary, $sourceFile, $self->page_link()]; +} -sub html_create($$) +sub name { - my $handle = $_[0]; - my $filename = $_[1]; - - if ($html_gzip) - { - open($handle, "|-", "gzip -c >'$filename'") - or die("ERROR: cannot open $filename for writing ". - "(gzip)!\n"); - } - else - { - open($handle, ">", $filename) - or die("ERROR: cannot open $filename for writing!\n"); - } -} - -sub write_dir_page($$$$$$$$$$$$$$$$$) -{ - my ($name, $rel_dir, $base_dir, $title, $trunc_dir, $overall_found, - $overall_hit, $total_fn_found, $total_fn_hit, $total_br_found, - $total_br_hit, $overview, $testhash, $testfnchash, $testbrhash, - $view_type, $sort_type) = @_; - - # Generate directory overview page including details - html_create(*HTML_HANDLE, "$rel_dir/index$name.$html_ext"); - if (!defined($trunc_dir)) { - $trunc_dir = ""; - } - $title .= " - " if ($trunc_dir ne ""); - write_html_prolog(*HTML_HANDLE, $base_dir, "LCOV - $title$trunc_dir"); - write_header(*HTML_HANDLE, $view_type, $trunc_dir, $rel_dir, - $overall_found, $overall_hit, $total_fn_found, - $total_fn_hit, $total_br_found, $total_br_hit, $sort_type); - write_file_table(*HTML_HANDLE, $base_dir, $overview, $testhash, - $testfnchash, $testbrhash, $view_type, $sort_type); - write_html_epilog(*HTML_HANDLE, $base_dir); - close(*HTML_HANDLE); -} - - -# -# process_dir(dir_name) -# - -sub process_dir($) -{ - my $abs_dir = $_[0]; - my $trunc_dir; - my $rel_dir = $abs_dir; - my $base_dir; - my $filename; - my %overview; - my $lines_found; - my $lines_hit; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - my $overall_found=0; - my $overall_hit=0; - my $total_fn_found=0; - my $total_fn_hit=0; - my $total_br_found = 0; - my $total_br_hit = 0; - my $base_name; - my $extension; - my $testdata; - my %testhash; - my $testfncdata; - my %testfnchash; - my $testbrdata; - my %testbrhash; - my @sort_list; - local *HTML_HANDLE; - - # Remove prefix if applicable - if (!$no_prefix) - { - # Match directory name beginning with one of @dir_prefix - $rel_dir = apply_prefix($rel_dir,@dir_prefix); - } - - $trunc_dir = $rel_dir; - - # Remove leading / - if ($rel_dir =~ /^\/(.*)$/) - { - $rel_dir = substr($rel_dir, 1); - } - - # Handle files in root directory gracefully - $rel_dir = "root" if ($rel_dir eq ""); - $trunc_dir = "root" if ($trunc_dir eq ""); - - $base_dir = get_relative_base_path($rel_dir); - - create_sub_dir($rel_dir); - - # Match filenames which specify files in this directory, not including - # sub-directories - foreach $filename (grep(/^\Q$abs_dir\E\/[^\/]*$/,keys(%info_data))) - { - my $page_link; - my $func_link; - - ($lines_found, $lines_hit, $fn_found, $fn_hit, $br_found, - $br_hit, $testdata, $testfncdata, $testbrdata) = - process_file($trunc_dir, $rel_dir, $filename); - - $base_name = basename($filename); - - if ($no_sourceview) { - $page_link = ""; - } elsif ($frames) { - # Link to frameset page - $page_link = "$base_name.gcov.frameset.$html_ext"; - } else { - # Link directory to source code view page - $page_link = "$base_name.gcov.$html_ext"; - } - $overview{$base_name} = [$lines_found, $lines_hit, $fn_found, - $fn_hit, $br_found, $br_hit, - $page_link, - get_rate($lines_found, $lines_hit), - get_rate($fn_found, $fn_hit), - get_rate($br_found, $br_hit)]; - - $testhash{$base_name} = $testdata; - $testfnchash{$base_name} = $testfncdata; - $testbrhash{$base_name} = $testbrdata; - - $overall_found += $lines_found; - $overall_hit += $lines_hit; - - $total_fn_found += $fn_found; - $total_fn_hit += $fn_hit; - - $total_br_found += $br_found; - $total_br_hit += $br_hit; - } - - # Create sorted pages - foreach (@fileview_sortlist) { - # Generate directory overview page (without details) - write_dir_page($fileview_sortname[$_], $rel_dir, $base_dir, - $test_title, $trunc_dir, $overall_found, - $overall_hit, $total_fn_found, $total_fn_hit, - $total_br_found, $total_br_hit, \%overview, {}, - {}, {}, 1, $_); - if (!$show_details) { - next; - } - # Generate directory overview page including details - write_dir_page("-detail".$fileview_sortname[$_], $rel_dir, - $base_dir, $test_title, $trunc_dir, - $overall_found, $overall_hit, $total_fn_found, - $total_fn_hit, $total_br_found, $total_br_hit, - \%overview, \%testhash, \%testfnchash, - \%testbrhash, 1, $_); - } - - # Calculate resulting line counts - return ($overall_found, $overall_hit, $total_fn_found, $total_fn_hit, - $total_br_found, $total_br_hit); -} - - -# -# get_converted_lines(testdata) -# -# Return hash of line numbers of those lines which were only covered in -# converted data sets. -# - -sub get_converted_lines($) -{ - my $testdata = $_[0]; - my $testcount; - my %converted; - my %nonconverted; - my $hash; - my $testcase; - my $line; - my %result; - - - # Get a hash containing line numbers with positive counts both for - # converted and original data sets - foreach $testcase (keys(%{$testdata})) - { - # Check to see if this is a converted data set - if ($testcase =~ /,diff$/) - { - $hash = \%converted; - } - else - { - $hash = \%nonconverted; - } - - $testcount = $testdata->{$testcase}; - # Add lines with a positive count to hash - foreach $line (keys%{$testcount}) - { - if ($testcount->{$line} > 0) - { - $hash->{$line} = 1; - } - } - } - - # Combine both hashes to resulting list - foreach $line (keys(%converted)) - { - if (!defined($nonconverted{$line})) - { - $result{$line} = 1; - } - } - - return \%result; -} - - -sub write_function_page($$$$$$$$$$$$$$$$$$) -{ - my ($base_dir, $rel_dir, $trunc_dir, $base_name, $title, - $lines_found, $lines_hit, $fn_found, $fn_hit, $br_found, $br_hit, - $sumcount, $funcdata, $sumfnccount, $testfncdata, $sumbrcount, - $testbrdata, $sort_type) = @_; - my $pagetitle; - my $filename; - - # Generate function table for this file - if ($sort_type == 0) { - $filename = "$rel_dir/$base_name.func.$html_ext"; - } else { - $filename = "$rel_dir/$base_name.func-sort-c.$html_ext"; - } - html_create(*HTML_HANDLE, $filename); - $pagetitle = "LCOV - $title - $trunc_dir/$base_name - functions"; - write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle); - write_header(*HTML_HANDLE, 4, "$trunc_dir/$base_name", - "$rel_dir/$base_name", $lines_found, $lines_hit, - $fn_found, $fn_hit, $br_found, $br_hit, $sort_type); - write_function_table(*HTML_HANDLE, "$base_name.gcov.$html_ext", - $sumcount, $funcdata, - $sumfnccount, $testfncdata, $sumbrcount, - $testbrdata, $base_name, - $base_dir, $sort_type); - write_html_epilog(*HTML_HANDLE, $base_dir, 1); - close(*HTML_HANDLE); -} - - -# -# process_file(trunc_dir, rel_dir, filename) -# - -sub process_file($$$) -{ - info("Processing file ".apply_prefix($_[2], @dir_prefix)."\n"); - - my $trunc_dir = $_[0]; - my $rel_dir = $_[1]; - my $filename = $_[2]; - my $base_name = basename($filename); - my $base_dir = get_relative_base_path($rel_dir); - my $testdata; - my $testcount; - my $sumcount; - my $funcdata; - my $checkdata; - my $testfncdata; - my $sumfnccount; - my $testbrdata; - my $sumbrcount; - my $lines_found; - my $lines_hit; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - my $converted; - my @source; - my $pagetitle; - local *HTML_HANDLE; - - ($testdata, $sumcount, $funcdata, $checkdata, $testfncdata, - $sumfnccount, $testbrdata, $sumbrcount, $lines_found, $lines_hit, - $fn_found, $fn_hit, $br_found, $br_hit) - = get_info_entry($info_data{$filename}); - - # Return after this point in case user asked us not to generate - # source code view - if ($no_sourceview) - { - return ($lines_found, $lines_hit, $fn_found, $fn_hit, - $br_found, $br_hit, $testdata, $testfncdata, - $testbrdata); - } - - $converted = get_converted_lines($testdata); - # Generate source code view for this file - html_create(*HTML_HANDLE, "$rel_dir/$base_name.gcov.$html_ext"); - $pagetitle = "LCOV - $test_title - $trunc_dir/$base_name"; - write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle); - write_header(*HTML_HANDLE, 2, "$trunc_dir/$base_name", - "$rel_dir/$base_name", $lines_found, $lines_hit, - $fn_found, $fn_hit, $br_found, $br_hit, 0); - @source = write_source(*HTML_HANDLE, $filename, $sumcount, $checkdata, - $converted, $funcdata, $sumbrcount); - - write_html_epilog(*HTML_HANDLE, $base_dir, 1); - close(*HTML_HANDLE); - - if ($func_coverage) { - # Create function tables - foreach (@funcview_sortlist) { - write_function_page($base_dir, $rel_dir, $trunc_dir, - $base_name, $test_title, - $lines_found, $lines_hit, - $fn_found, $fn_hit, $br_found, - $br_hit, $sumcount, - $funcdata, $sumfnccount, - $testfncdata, $sumbrcount, - $testbrdata, $_); - } - } - - # Additional files are needed in case of frame output - if (!$frames) - { - return ($lines_found, $lines_hit, $fn_found, $fn_hit, - $br_found, $br_hit, $testdata, $testfncdata, - $testbrdata); - } - - # Create overview png file - gen_png("$rel_dir/$base_name.gcov.png", $overview_width, $tab_size, - @source); - - # Create frameset page - html_create(*HTML_HANDLE, - "$rel_dir/$base_name.gcov.frameset.$html_ext"); - write_frameset(*HTML_HANDLE, $base_dir, $base_name, $pagetitle); - close(*HTML_HANDLE); - - # Write overview frame - html_create(*HTML_HANDLE, - "$rel_dir/$base_name.gcov.overview.$html_ext"); - write_overview(*HTML_HANDLE, $base_dir, $base_name, $pagetitle, - scalar(@source)); - close(*HTML_HANDLE); - - return ($lines_found, $lines_hit, $fn_found, $fn_hit, $br_found, - $br_hit, $testdata, $testfncdata, $testbrdata); -} - - -sub compress_brcount($) -{ - my ($brcount) = @_; - my $db; - - $db = brcount_to_db($brcount); - return db_to_brcount($db, $brcount); -} - - -# -# read_info_file(info_filename) -# -# Read in the contents of the .info file specified by INFO_FILENAME. Data will -# be returned as a reference to a hash containing the following mappings: -# -# %result: for each filename found in file -> \%data -# -# %data: "test" -> \%testdata -# "sum" -> \%sumcount -# "func" -> \%funcdata -# "found" -> $lines_found (number of instrumented lines found in file) -# "hit" -> $lines_hit (number of executed lines in file) -# "f_found" -> $fn_found (number of instrumented functions found in file) -# "f_hit" -> $fn_hit (number of executed functions in file) -# "b_found" -> $br_found (number of instrumented branches found in file) -# "b_hit" -> $br_hit (number of executed branches in file) -# "check" -> \%checkdata -# "testfnc" -> \%testfncdata -# "sumfnc" -> \%sumfnccount -# "testbr" -> \%testbrdata -# "sumbr" -> \%sumbrcount -# -# %testdata : name of test affecting this file -> \%testcount -# %testfncdata: name of test affecting this file -> \%testfnccount -# %testbrdata: name of test affecting this file -> \%testbrcount -# -# %testcount : line number -> execution count for a single test -# %testfnccount: function name -> execution count for a single test -# %testbrcount : line number -> branch coverage data for a single test -# %sumcount : line number -> execution count for all tests -# %sumfnccount : function name -> execution count for all tests -# %sumbrcount : line number -> branch coverage data for all tests -# %funcdata : function name -> line number -# %checkdata : line number -> checksum of source code line -# $brdata : vector of items: block, branch, taken -# -# Note that .info file sections referring to the same file and test name -# will automatically be combined by adding all execution counts. -# -# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file -# is compressed using GZIP. If available, GUNZIP will be used to decompress -# this file. -# -# Die on error. -# + my $self = shift; + return $self->[0]; +} -sub read_info_file($) -{ - my $tracefile = $_[0]; # Name of tracefile - my %result; # Resulting hash: file -> data - my $data; # Data handle for current entry - my $testdata; # " " - my $testcount; # " " - my $sumcount; # " " - my $funcdata; # " " - my $checkdata; # " " - my $testfncdata; - my $testfnccount; - my $sumfnccount; - my $testbrdata; - my $testbrcount; - my $sumbrcount; - my $line; # Current line read from .info file - my $testname; # Current test name - my $filename; # Current filename - my $hitcount; # Count for lines hit - my $count; # Execution count of current line - my $negative; # If set, warn about negative counts - my $changed_testname; # If set, warn about changed testname - my $line_checksum; # Checksum of current line - my $notified_about_relative_paths; - local *INFO_HANDLE; # Filehandle for .info file - - info("Reading data file $tracefile\n"); - - # Check if file exists and is readable - stat($_[0]); - if (!(-r _)) - { - die("ERROR: cannot read file $_[0]!\n"); - } - - # Check if this is really a plain file - if (!(-f _)) - { - die("ERROR: not a plain file: $_[0]!\n"); - } - - # Check for .gz extension - if ($_[0] =~ /\.gz$/) - { - # Check for availability of GZIP tool - system_no_output(1, "gunzip" ,"-h") - and die("ERROR: gunzip command not available!\n"); - - # Check integrity of compressed file - system_no_output(1, "gunzip", "-t", $_[0]) - and die("ERROR: integrity check failed for ". - "compressed file $_[0]!\n"); - - # Open compressed file - open(INFO_HANDLE, "-|", "gunzip -c '$_[0]'") - or die("ERROR: cannot start gunzip to decompress ". - "file $_[0]!\n"); - } - else - { - # Open decompressed file - open(INFO_HANDLE, "<", $_[0]) - or die("ERROR: cannot read file $_[0]!\n"); - } - - $testname = ""; - while () - { - chomp($_); - $line = $_; - - # Switch statement - foreach ($line) - { - /^TN:([^,]*)(,diff)?/ && do - { - # Test name information found - $testname = defined($1) ? $1 : ""; - if ($testname =~ s/\W/_/g) - { - $changed_testname = 1; - } - $testname .= $2 if (defined($2)); - last; - }; - - /^[SK]F:(.*)/ && do - { - # Filename information found - # Retrieve data for new entry - $filename = File::Spec->rel2abs($1, $cwd); - - if (!File::Spec->file_name_is_absolute($1) && - !$notified_about_relative_paths) - { - info("Resolved relative source file ". - "path \"$1\" with CWD to ". - "\"$filename\".\n"); - $notified_about_relative_paths = 1; - } - - $data = $result{$filename}; - ($testdata, $sumcount, $funcdata, $checkdata, - $testfncdata, $sumfnccount, $testbrdata, - $sumbrcount) = - get_info_entry($data); - - if (defined($testname)) - { - $testcount = $testdata->{$testname}; - $testfnccount = $testfncdata->{$testname}; - $testbrcount = $testbrdata->{$testname}; - } - else - { - $testcount = {}; - $testfnccount = {}; - $testbrcount = {}; - } - last; - }; - - /^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do - { - # Fix negative counts - $count = $2 < 0 ? 0 : $2; - if ($2 < 0) - { - $negative = 1; - } - # Execution count found, add to structure - # Add summary counts - $sumcount->{$1} += $count; - - # Add test-specific counts - if (defined($testname)) - { - $testcount->{$1} += $count; - } - - # Store line checksum if available - if (defined($3)) - { - $line_checksum = substr($3, 1); - - # Does it match a previous definition - if (defined($checkdata->{$1}) && - ($checkdata->{$1} ne - $line_checksum)) - { - die("ERROR: checksum mismatch ". - "at $filename:$1\n"); - } - - $checkdata->{$1} = $line_checksum; - } - last; - }; - - /^FN:(\d+),([^,]+)/ && do - { - last if (!$func_coverage); - - # Function data found, add to structure - $funcdata->{$2} = $1; - - # Also initialize function call data - if (!defined($sumfnccount->{$2})) { - $sumfnccount->{$2} = 0; - } - if (defined($testname)) - { - if (!defined($testfnccount->{$2})) { - $testfnccount->{$2} = 0; - } - } - last; - }; - - /^FNDA:(\d+),([^,]+)/ && do - { - last if (!$func_coverage); - # Function call count found, add to structure - # Add summary counts - $sumfnccount->{$2} += $1; - - # Add test-specific counts - if (defined($testname)) - { - $testfnccount->{$2} += $1; - } - last; - }; - - /^BRDA:(\d+),(\d+),(\d+),(\d+|-)/ && do { - # Branch coverage data found - my ($line, $block, $branch, $taken) = - ($1, $2, $3, $4); - - last if (!$br_coverage); - $block = -1 if ($block == $UNNAMED_BLOCK); - $sumbrcount->{$line} .= - "$block,$branch,$taken:"; - - # Add test-specific counts - if (defined($testname)) { - $testbrcount->{$line} .= - "$block,$branch,$taken:"; - } - last; - }; - - /^end_of_record/ && do - { - # Found end of section marker - if ($filename) - { - # Store current section data - if (defined($testname)) - { - $testdata->{$testname} = - $testcount; - $testfncdata->{$testname} = - $testfnccount; - $testbrdata->{$testname} = - $testbrcount; - } - - set_info_entry($data, $testdata, - $sumcount, $funcdata, - $checkdata, $testfncdata, - $sumfnccount, - $testbrdata, - $sumbrcount); - $result{$filename} = $data; - last; - } - }; - - # default - last; - } - } - close(INFO_HANDLE); - - # Calculate lines_found and lines_hit for each file - foreach $filename (keys(%result)) - { - $data = $result{$filename}; - - ($testdata, $sumcount, undef, undef, $testfncdata, - $sumfnccount, $testbrdata, $sumbrcount) = - get_info_entry($data); - - # Filter out empty files - if (scalar(keys(%{$sumcount})) == 0) - { - delete($result{$filename}); - next; - } - # Filter out empty test cases - foreach $testname (keys(%{$testdata})) - { - if (!defined($testdata->{$testname}) || - scalar(keys(%{$testdata->{$testname}})) == 0) - { - delete($testdata->{$testname}); - delete($testfncdata->{$testname}); - } - } - - $data->{"found"} = scalar(keys(%{$sumcount})); - $hitcount = 0; - - foreach (keys(%{$sumcount})) - { - if ($sumcount->{$_} > 0) { $hitcount++; } - } - - $data->{"hit"} = $hitcount; - - # Get found/hit values for function call data - $data->{"f_found"} = scalar(keys(%{$sumfnccount})); - $hitcount = 0; - - foreach (keys(%{$sumfnccount})) { - if ($sumfnccount->{$_} > 0) { - $hitcount++; - } - } - $data->{"f_hit"} = $hitcount; - - # Combine branch data for the same branches - (undef, $data->{"b_found"}, $data->{"b_hit"}) = - compress_brcount($sumbrcount); - foreach $testname (keys(%{$testbrdata})) { - compress_brcount($testbrdata->{$testname}); - } - } - - if (scalar(keys(%result)) == 0) - { - die("ERROR: no valid records found in tracefile $tracefile\n"); - } - if ($negative) - { - warn("WARNING: negative counts found in tracefile ". - "$tracefile\n"); - } - if ($changed_testname) - { - warn("WARNING: invalid characters removed from testname in ". - "tracefile $tracefile\n"); - } - - return(\%result); -} - - -# -# get_info_entry(hash_ref) -# -# Retrieve data from an entry of the structure generated by read_info_file(). -# Return a list of references to hashes: -# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash -# ref, testfncdata hash ref, sumfnccount hash ref, lines found, lines hit, -# functions found, functions hit) -# - -sub get_info_entry($) -{ - my $testdata_ref = $_[0]->{"test"}; - my $sumcount_ref = $_[0]->{"sum"}; - my $funcdata_ref = $_[0]->{"func"}; - my $checkdata_ref = $_[0]->{"check"}; - my $testfncdata = $_[0]->{"testfnc"}; - my $sumfnccount = $_[0]->{"sumfnc"}; - my $testbrdata = $_[0]->{"testbr"}; - my $sumbrcount = $_[0]->{"sumbr"}; - my $lines_found = $_[0]->{"found"}; - my $lines_hit = $_[0]->{"hit"}; - my $fn_found = $_[0]->{"f_found"}; - my $fn_hit = $_[0]->{"f_hit"}; - my $br_found = $_[0]->{"b_found"}; - my $br_hit = $_[0]->{"b_hit"}; +sub summary +{ + # return undef or SummaryInfo object + my $self = shift; + return $self->[1]; +} - return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref, - $testfncdata, $sumfnccount, $testbrdata, $sumbrcount, - $lines_found, $lines_hit, $fn_found, $fn_hit, - $br_found, $br_hit); +sub findOwnerList +{ + my $self = shift; + # return [ [owner, lineCovData, branchCovData]] for each owner + # where lineCovData = [missedCount, totalCount, callback] + # branchCovData = [missed, total, callback] or undef if not enabled + # - sorted in descending order number of missed lines + return $self->summary()->findOwnerList(@_); } +sub dateDetailCallback +{ + # callback to compute count in particular date bin + my ($self, $ageval, $covtype) = @_; + $covtype == SummaryInfo::LINE_DATA || + $covtype == SummaryInfo::BRANCH_DATA || + $covtype == SummaryInfo::MCDC_DATA || + $covtype == SummaryInfo::FUNCTION_DATA or + die("'$covtype' type not supported"); + + return DateDetailCallback->new($self->summary(), $ageval, $covtype); +} -# -# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref, -# checkdata_ref, testfncdata_ref, sumfcncount_ref, -# testbrdata_ref, sumbrcount_ref[,lines_found, -# lines_hit, f_found, f_hit, $b_found, $b_hit]) -# -# Update the hash referenced by HASH_REF with the provided data references. -# +sub ownerDetailCallback +{ + # callback to compute count in particular owner bin + my ($self, $owner, $covtype) = @_; + $covtype == SummaryInfo::LINE_DATA || + $covtype == SummaryInfo::BRANCH_DATA or + $covtype == SummaryInfo::MCDC_DATA or + die("'$covtype' type not supported"); + + return OwnerDetailCallback->new($self->summary(), $owner, $covtype); +} -sub set_info_entry($$$$$$$$$;$$$$$$) +sub totalCallback { - my $data_ref = $_[0]; + my ($self, $covtype) = @_; + # callback to compute total elements of 'covtype' in each TLA + if (SummaryInfo::LINE_DATA == $covtype) { + return $self->summary(); + } else { + return CovTypeSummaryCallback->new($self->summary(), $covtype); + } +} + +package FileOrDirectoryOwnerCallback; +# callback class used by 'write_file_table' to retrieve owner- +# specific coverage numbers (for all entries in the directory) - $data_ref->{"test"} = $_[1]; - $data_ref->{"sum"} = $_[2]; - $data_ref->{"func"} = $_[3]; - $data_ref->{"check"} = $_[4]; - $data_ref->{"testfnc"} = $_[5]; - $data_ref->{"sumfnc"} = $_[6]; - $data_ref->{"testbr"} = $_[7]; - $data_ref->{"sumbr"} = $_[8]; +sub new +{ + my ($class, $owner, $dirSummary) = @_; - if (defined($_[9])) { $data_ref->{"found"} = $_[9]; } - if (defined($_[10])) { $data_ref->{"hit"} = $_[10]; } - if (defined($_[11])) { $data_ref->{"f_found"} = $_[11]; } - if (defined($_[12])) { $data_ref->{"f_hit"} = $_[12]; } - if (defined($_[13])) { $data_ref->{"b_found"} = $_[13]; } - if (defined($_[14])) { $data_ref->{"b_hit"} = $_[14]; } + my $self = [$owner, $dirSummary]; + bless $self, $class; + return $self; } +sub name +{ + my $self = shift; + return $self->[0]; +} -# -# add_counts(data1_ref, data2_ref) -# -# DATA1_REF and DATA2_REF are references to hashes containing a mapping -# -# line number -> execution count -# -# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF -# is a reference to a hash containing the combined mapping in which -# execution counts are added. -# +sub summary +{ + my $self = shift; + return $self->[1]; +} -sub add_counts($$) +sub data { - my $data1_ref = $_[0]; # Hash 1 - my $data2_ref = $_[1]; # Hash 2 - my %result; # Resulting hash - my $line; # Current line iteration scalar - my $data1_count; # Count of line in hash1 - my $data2_count; # Count of line in hash2 - my $found = 0; # Total number of lines found - my $hit = 0; # Number of lines with a count > 0 + my $self = shift; + + my $lineCb = OwnerDetailCallback->new($self->[1], $self->[0], + SummaryInfo::LINE_DATA); + my $found = $lineCb->get('found'); + my $hit = $lineCb->get('hit'); + my $branchCb = + OwnerDetailCallback->new($self->[1], $self->[0], + SummaryInfo::BRANCH_DATA); + my $mcdcCb = + OwnerDetailCallback->new($self->[1], $self->[0], + SummaryInfo::MCDC_DATA); + my $fn_found = 0; + # ($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit, $page_link, + # $fileSummary, $fileDetails) + # this is the 'totals' callback for this owner - so there is no + # associated file or summary info. Pass undef. + return ($lineCb->get('found'), $lineCb->get('hit'), + 0, 0, # fn_found, fn_hit + $branchCb->get('found'), $branchCb->get('hit'), + $mcdcCb->get('found'), $mcdcCb->get('hit')); +} - foreach $line (keys(%$data1_ref)) - { - $data1_count = $data1_ref->{$line}; - $data2_count = $data2_ref->{$line}; +sub totalCallback +{ + # callback to compute total 'covtype' elements in each TLA + my ($self, $covtype) = @_; + die("$covtype not supported by OwnerDetail callback") + unless ($covtype == SummaryInfo::LINE_DATA || + $covtype == SummaryInfo::BRANCH_DATA || + $covtype == SummaryInfo::MCDC_DATA); + + return OwnerDetailCallback->new($self->[1], $self->name(), $covtype); +} - # Add counts if present in both hashes - if (defined($data2_count)) { $data1_count += $data2_count; } +sub findFileList +{ + my ($self, $all) = @_; + + # return [ [filename, lineCovData, branchCovData]] for each file + # such that this owner has at least 1 line. + # where lineCovData = [missedCount, totalCount, OwnerDetailCallback] + # branchCovData = [missed, total, dateDetailCallback] + # or undef if not enabled + # - sorted in descending order number of missed lines + my $dirSummary = $self->[1]; + my $owner = $self->[0]; + my @files; + my $skipped = 0; + foreach my $file ($dirSummary->sources()) { + my $source = $dirSummary->get_source($file); + next unless $source->contains_owner($owner); + + my $lineCb = + OwnerDetailCallback->new($source, $owner, SummaryInfo::LINE_DATA); + my $brCb = + OwnerDetailCallback->new($source, $owner, SummaryInfo::BRANCH_DATA); + my $mcdcCb = + OwnerDetailCallback->new($source, $owner, SummaryInfo::MCDC_DATA); + my $funcCb = + OwnerDetailCallback->new($source, $owner, + SummaryInfo::FUNCTION_DATA); + my $total = $lineCb->get('found'); + my $br_total = $lcovutil::br_coverage ? $brCb->get('found') : 0; + my $fn_total = $lcovutil::func_coverage ? $funcCb->get('found') : 0; + my $mcdc_total = $lcovutil::mcdc_coverage ? $mcdcCb->get('found') : 0; + next + if (0 == $total && + 0 == $br_total && + 0 == $mcdc_total && + 0 == $fn_total); + my $missed = $lineCb->get('missed'); + my $br_missed = $lcovutil::br_coverage ? $brCb->get('missed') : 0; + my $mcdc_missed = $lcovutil::mcdc_coverage ? $mcdcCb->get('missed') : 0; + my $fn_missed = $lcovutil::func_coverage ? $funcCb->get('missed') : 0; + + if ($all || + 0 != $missed || + 0 != $br_missed || + 0 != $fn_missed || + 0 != $mcdc_missed) { + + push(@files, + [$file, + [$missed, $total, $lineCb], + [$br_missed, $br_total, $brCb], + [$mcdc_missed, $mcdc_total, $mcdcCb], + [$fn_missed, $fn_total, $funcCb] + ]); + } else { + ++$skipped; + } + } + return [$skipped, @files]; +} + +sub secondaryElementFileData +{ + my ($self, $name) = @_; + my $dirSummary = $self->[1]; + my $file = File::Basename::basename($name); + my $sourceSummary = $dirSummary->get_source($name); + + my $page_link; + if ($sourceSummary->is_directory()) { + $page_link = + File::Spec->catfile($name, "index-bin_owner." . $main::html_ext); + } elsif ($main::no_sourceview) { + $page_link = ""; + } else { + $name = $file; + $page_link = $name . ".gcov."; + $page_link .= "frameset." + if $main::frames; + $page_link .= $main::html_ext; + } + $page_link = lc($page_link) if $lcovutil::case_insensitive; + # pass owner in callback data + my $sourceFile = $sourceSummary->fileDetails() + if 'file' eq $sourceSummary->type(); + return [$name, $sourceSummary, $sourceFile, $page_link, $self->[0]]; +} + +package FileOrDirectoryDateCallback; +# callback class used by 'write_file_table' to retrieve date- +# specific coverage numbers (for all entries in the directory) + +sub new +{ + my ($class, $bin, $dirSummary) = @_; + + my $self = [$bin, $dirSummary->age_sample($bin), $dirSummary]; + bless $self, $class; + return $self; +} + +sub name +{ + my $self = shift; + return $SummaryInfo::ageGroupHeader[$self->[0]]; +} + +sub summary +{ + my $self = shift; + return $self->[2]; +} + +sub data +{ + my $self = shift; + + my @rtn; + foreach my $covType (SummaryInfo::LINE_DATA, SummaryInfo::FUNCTION_DATA, + SummaryInfo::BRANCH_DATA, SummaryInfo::MCDC_DATA + ) { + my $cb = DateDetailCallback->new($self->[2], $self->[1], $covType); + push(@rtn, $cb->get('found'), $cb->get('hit')); + } + # ($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit, $page_link, + # $fileSummary, $fileDetails) + # this is the top-level 'total' callback - so no associated file or + # summary info + return @rtn; +} + +sub totalCallback +{ + # callback to compute total elements of 'covtype' in each TLA + my ($self, $covtype) = @_; + return DateDetailCallback->new($self->[2], $self->[1], $covtype); +} + +sub findFileList +{ + my ($self, $all) = @_; + + # return [ [filename, lineCovData, branchCovData]] for each file + # such that this owner has at least 1 line. + # where lineCovData = [missedCount, totalCount, OwnerDetailCallback] + # branchCovData = [missed, total, dateDetailCallback] + # or undef if not enabled + # - sorted in descending order number of missed lines + my $dirSummary = $self->[2]; + my $ageval = $self->[1]; + my @files; + my $skipped = 0; + foreach my $file ($dirSummary->sources()) { + my $source = $dirSummary->get_source($file); + + my $lineCb = + DateDetailCallback->new($source, $ageval, SummaryInfo::LINE_DATA); + my $brCb = + DateDetailCallback->new($source, $ageval, SummaryInfo::BRANCH_DATA); + my $mcdcCb = + DateDetailCallback->new($source, $ageval, SummaryInfo::MCDC_DATA); + my $funcCb = + DateDetailCallback->new($source, $ageval, + SummaryInfo::FUNCTION_DATA); + my $total = $lineCb->get('found'); + my $br_total = $lcovutil::br_coverage ? $brCb->get('found') : 0; + my $mcdc_total = $lcovutil::mcdc_coverage ? $mcdcCb->get('found') : 0; + my $fn_total = $lcovutil::func_coverage ? $funcCb->get('found') : 0; + next + if (0 == $total && + 0 == $br_total && + 0 == $mcdc_total && + 0 == $fn_total); + + my $missed = $lineCb->get('missed'); + my $br_missed = $lcovutil::br_coverage ? $brCb->get('missed') : 0; + my $mcdc_missed = $lcovutil::mcdc_coverage ? $mcdcCb->get('missed') : 0; + my $fn_missed = $lcovutil::func_coverage ? $funcCb->get('missed') : 0; + if ($all || + 0 != $missed || + 0 != $br_missed || + 0 != $mcdc_missed || + 0 != $fn_missed) { + push(@files, + [$file, + [$missed, $total, $lineCb], + [$br_missed, $br_total, $brCb], + [$mcdc_missed, $mcdc_total, $mcdcCb], + [$fn_missed, $fn_total, $funcCb] + ]); + } else { + ++$skipped; + } + } + return [$skipped, @files]; +} + +sub secondaryElementFileData +{ + my ($self, $name) = @_; + my $dirSummary = $self->[2]; + my $file = File::Basename::basename($name); + my $sourceSummary = $dirSummary->get_source($name); + + my $page_link; + if ($sourceSummary->is_directory()) { + $page_link = + File::Spec->catfile($name, "index-bin_date." . $main::html_ext); + } elsif ($main::no_sourceview) { + $page_link = ""; + } else { + $name = $file; + $page_link = $name . ".gcov."; + $page_link .= "frameset." + if $main::frames; + $page_link .= $main::html_ext; + } + $page_link = lc($page_link) if $lcovutil::case_insensitive; + # pass bin index in callback data + my $sourceFile = $sourceSummary->fileDetails() + if 'file' eq $sourceSummary->type(); + return [$name, $sourceSummary, $sourceFile, $page_link, $self->[0]]; +} + +package CovTypeSummaryCallback; +# callback class to return total branches in each TLA category +sub new +{ + my ($class, $summary, $covType) = @_; + defined($summary) or + die("no summary"); + die("$covType not supported yet") + unless ($covType eq SummaryInfo::LINE_DATA || + $covType eq SummaryInfo::BRANCH_DATA || + $covType eq SummaryInfo::MCDC_DATA || + $covType eq SummaryInfo::FUNCTION_DATA); + my $self = [$summary, $covType]; + bless $self, $class; + return $self; +} + +sub get +{ + my ($self, $key) = @_; + + return $self->[0]->get($key, $self->[1]); +} + +sub owner +{ + my $self = shift; + die("CovTypeSummaryCallback::owner not supported for " . $self->[1]) + unless ($self->[1] == SummaryInfo::BRANCH_DATA || + $self->[1] == SummaryInfo::MCDC_DATA); + return $self->[0]->owner(); +} + +sub age +{ + my $self = shift; + return $self->[0]->age(); +} + +sub bin +{ + my $self = shift; + return $self->[0]->bin(); +} + +sub covType +{ + my $self = shift; + return $self->[1]; +} + +package PrintCallback; +# maintain some callback data from one line to the next + +use constant { + FILE_INFO => 0, # SourceFile struct + LINE_DATA => 1, # FileCoverageInfo struct + TLA => 2, + OWNER => 3, + AGE => 4, + NEXT_OWNER => 5, + NEXT_AGE => 6, + LINENO => 7, +}; + +sub new +{ + my ($class, $sourceFileStruct, $lineCovInfo) = @_; + my $self = [$sourceFileStruct, + $lineCovInfo, + "", # current TLA + "", # owner + "", # age + {}, # next header line for corresponding owner + {}, # next header line for corresponding date bin + undef # line number + ]; + bless $self, $class; + return $self; +} + +sub sourceDetail +{ + my $self = shift; + return $self->[FILE_INFO]; +} + +sub lineData +{ + my $self = shift; + return $self->[LINE_DATA]; +} + +sub lineNo +{ + my ($self, $lineNo) = @_; + $self->[LINENO] = $lineNo + if defined($lineNo); + return $self->[LINENO]; +} + +sub tla +{ + my ($self, $newTLA, $lineNo) = @_; + # NOTE: 'undef' TLA means that this line is not code (it is a comment, + # blank line, opening brace or something). + # We return 'same' as previous line' in that case so the category + # block can be larger (e.g., 1 CBC line, a 2 line comment, then 3 more + # lines) can get just one label (first line). + # This reduces visual clutter. + # Note that the 'block finding' code has to do the same thing (else the + # HTML links won't be generated correctly) + if (defined($newTLA) && + $newTLA ne $self->[TLA]) { + $self->[TLA] = $newTLA; + return $newTLA; + } + return " " x $main::tla_field_width; # same TLA as previous line. +} + +sub age +{ + my ($self, $newval, $lineNo) = @_; + if (defined($newval) && $newval ne $self->[AGE]) { + $self->[AGE] = $newval; + return $newval; + } + return " " x $main::age_field_width; # same age as previous line. +} + +sub owner +{ + my ($self, $newval, $lineNo) = @_; + if (defined($newval) && $newval ne $self->[OWNER]) { + $self->[OWNER] = $newval; + return $newval; + } + return " " x $main::owner_field_width; # same age as previous line. +} + +sub current +{ + my ($self, $key) = @_; + + if ($key eq 'tla') { + return $self->[TLA]; + } elsif ($key eq 'owner') { + return $self->[OWNER]; + } elsif ($key eq 'age') { + return $self->[AGE]; + } else { + ($key eq 'dateBucket') or + die("unexpected key $key"); + return SummaryInfo::findAgeBin($self->[AGE]); + } +} + +sub nextOwner +{ + my ($self, $owner, $tla, $value) = @_; + my $map = $self->[NEXT_OWNER]; + + my $key = $tla . ' ' . $owner; + if (defined($value)) { + $map->{$key} = $value; + return $value; + } + return exists($map->{$key}) ? $map->{$key} : undef; +} + +sub nextDate +{ + my ($self, $date, $tla, $value) = @_; + my $map = $self->[NEXT_AGE]; + my $key = $tla . ' ' . $date; + if (defined($value)) { + $map->{$key} = $value; + return $value; + } + return $map->{$key}; +} + +package ReadBaselineSource; + +use base 'ReadCurrentSource'; + +sub new +{ + my ($class, $diffData) = @_; + + my $self = $class->SUPER::new(); + push(@$self, $diffData); + + return $self; +} + +sub open +{ + my ($self, $filename) = @_; + my $diffmap = $self->[1]; + if (defined($diffmap) && $diffmap->containsFile($filename)) { + # if there are any diffs, then need to load the current source, then + # walk the diff to insert and remove changed lines + + my $currentSrc = $self->_load($filename, 'baseline'); + my $src = $diffmap->recreateBaseline($filename, $currentSrc); + return $self->parseLines($filename, $src); + } + # else no diff data here - just read the file + return ReadCurrentSource::open($self, $filename, 'baseline'); +} + +package LineData; + +use constant { + TYPE => 0, + LINENO_BASE => 1, + LINENO_CURRENT => 2, # location of this line in current data + LINE_DATA => 3, + BRANCH_DATA => 4, + MCDC_DATA => 5, + FUNCTION_DATA => 6, + + # data elements in line data + TLA => 0, + LINE_BASELINE => 1, + LINE_CURRENT => 2, + + # data elements for branch/function data + DATA_BASELINE => 0, + DATA_CURRENT => 1, + DATA_DIFFERENTIAL => 2, +}; + +sub new +{ + my ($class, $type) = @_; + # [ type, lineNo_base, lineNo_current, + # bucket, base_count, curr_count <- line coverage count data + # base_branch, curr_branch, differential_branch ] <- branch coverage count data + # $type in ('insert', 'equal', 'delete') + my $self = [$type, undef, undef, + ['UNK', undef, undef], # line coverage data + [], # branch coverage data + [], # MCDC coverage data + [] + ]; # function coverage + bless $self, $class; + return $self; +} + +sub to_list +{ + # used by script-level 'select' callback + my $self = shift; + + my @rtn = ($self->[TYPE], + [$self->tla(), + $self->in_curr() ? $self->curr_count() : undef, + $self->in_base() ? $self->base_count() : undef + ]); + # @todo perhaps visit MC/DC here too + my $branch = $self->differential_branch(); + if (defined($branch)) { + my @data; + push(@rtn, \@data); + foreach my $block ($branch->blocks()) { + my $br = $branch->getBlock($block); + my @b; + push(@data, \@b); + foreach my $b (@$br) { + my ($br, $tla, $d) = @$b; + my ($base_count, $curr_count) = @$d; + push(@b, [$tla, $curr_count, $base_count]); + } + } + } else { + $branch = $self->current_branch(); + if (defined($branch)) { + my @data; + push(@rtn, \@data); + foreach my $block ($branch->blocks()) { + my $br = $branch->getBlock($block); + push(@data, $br->count()); + } + } else { + push(@rtn, undef); + } + } + return \@rtn; +} + +sub tla +{ + my ($self, $tla) = @_; + my $linecov = $self->[LINE_DATA]; + $linecov->[TLA] = $tla + if defined($tla); + return $linecov->[TLA]; +} + +sub type +{ + my $self = shift; + return $self->[TYPE]; +} + +sub lineNo +{ + my ($self, $which, $lineNo) = @_; + my $loc; + if ($which eq "current") { + $loc = LINENO_CURRENT; + } else { + die("unknown key $which - should be 'base' or 'current'") + unless $which eq "base"; + $loc = LINENO_BASE; + } + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "inconsistent $which line location $loc: " . + $self->[$loc] . " -> $lineNo") + if (defined($lineNo) && + defined($self->[$loc]) && + $self->[$loc] != $lineNo); + + $self->[$loc] = $lineNo + if defined($lineNo); + return $self->[$loc]; +} + +sub in_base +{ + # @return true or false: is this object present in the baseline? + my $self = shift; + # coverpoint is in baseline data if line coverage number is defined + return defined($self->[LINE_DATA]->[LINE_BASELINE]); +} + +sub in_curr +{ + # @return true or false: is this object present in the current version? + # storing negative number for 'current' location of deleted line - + # first location above or below the deleted region + my $self = shift; + # coverpoint is in current data if line coverage number is defined + # otherwise, the line may be present without coverage data associated + # with it (say, excluded now - or possibly filtered out) + return defined($self->[LINE_DATA]->[LINE_CURRENT]); +} + +sub base_count +{ + # return line hit count in baseline + my ($self, $inc) = @_; + die("non-zero count but not in base") + if (defined($inc) && !defined($self->[LINENO_BASE])); + my $linecov = $self->[LINE_DATA]; + if (defined($inc)) { + if (defined($linecov->[LINE_BASELINE])) { + $linecov->[LINE_BASELINE] += $inc; + } else { + $linecov->[LINE_BASELINE] = $inc; + } + } + + return $linecov->[LINE_BASELINE]; +} + +sub curr_count +{ + # return line hit count in current + my ($self, $inc) = @_; + die("non-zero count but not in current") + if (defined($inc) && + (!defined($self->[LINENO_CURRENT]) || $self->[LINENO_CURRENT] < 0)); + my $linecov = $self->[LINE_DATA]; + if (defined($inc)) { + if (defined($linecov->[LINE_CURRENT])) { + $linecov->[LINE_CURRENT] += $inc; + } else { + $linecov->[LINE_CURRENT] += $inc; + } + } + return $linecov->[LINE_CURRENT]; +} + +sub _mergeBranchData +{ + my ($self, $loc, $branchData, $filename) = @_; + my $branch = $self->[BRANCH_DATA]; + if (defined($branch->[$loc])) { + + my $current = $branch->[$loc]; + foreach my $branchId ($current->blocks()) { + # LCOV_EXCL_START + if (!$branchData->hasBlock($branchId)) { + # don't know how to get here...but someone on the internet + # managed to do it - so we need to handle the error + my $which = $loc == DATA_BASELINE ? 'baseline' : 'current'; + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + $filename . ':' . $current->line() . + ": '$which' line " . + $self->lineNo($which) . + " merge block (line " . + $branchData->line() . + ") does not contain branch $branchId." + ); + next; + } + # LCOV_EXCL_STOP + + my $c = $current->getBlock($branchId); + my $d = $branchData->getBlock($branchId); + # handle case of inconsistent branch data + my $nc = scalar(@$c); + my $nd = scalar(@$d); + # LCOV_EXCL_START + if ($nc != $nd) { + # similarly: this should not happen - but it might + my $which = $loc == DATA_BASELINE ? 'baseline' : 'current'; + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + $filename . ':' . $current->line() . + ": '$which' line " . $self->lineNo($which) . + " branch $branchId contains $nc elements but merge data contains $nd." + ); + } + # LCOV_EXCL_STOP + for (my $i = ($nc > $nd ? $nd : $nc) - 1; $i >= 0; --$i) { + my $br = $d->[$i]; + $c->[$i]->merge($br); + } + # now append any new values from 'branchData': + # (go here only if |D| > |C| and we ignore the mismatch error + for (my $i = $nc; $i < $nd; ++$i) { + push(@$c, Storable::dclone($d->[$i])); + } + } + } else { + $branch->[$loc] = Storable::dclone($branchData); + } +} + +sub baseline_branch +{ + my ($self, $branchData, $filename) = @_; + die("has baseline branch data but not in baseline") + if (defined($branchData) && !defined($self->[LINENO_BASE])); + if (defined($branchData)) { + $self->_mergeBranchData(DATA_BASELINE, $branchData, $filename); + } + my $branch = $self->[BRANCH_DATA]; + return $branch->[DATA_BASELINE]; +} + +sub current_branch +{ + my ($self, $branchData, $filename) = @_; + die("has current branch data but not in current") + if (defined($branchData) && + (!defined($self->[LINENO_CURRENT]) || $self->[LINENO_CURRENT] < 0)); + if (defined($branchData)) { + $self->_mergeBranchData(DATA_CURRENT, $branchData, $filename); + } + my $branch = $self->[BRANCH_DATA]; + return $branch->[DATA_CURRENT]; +} + +sub differential_branch +{ + my ($self, $differential) = @_; + my $branch = $self->[BRANCH_DATA]; + if (defined($differential)) { + $branch->[DATA_DIFFERENTIAL] = $differential; + } + return $branch->[DATA_DIFFERENTIAL]; +} + +sub _mergeMcdcData +{ + # this is nearly identical to 'function' implementation...maybe share + my ($self, $loc, $mcdcData, $filename) = @_; + my $mcdc = $self->[MCDC_DATA]; + if (defined($mcdc->[$loc])) { + + my $current = $mcdc->[$loc]; + $current->merge($mcdcData); + } else { + $mcdc->[$loc] = Storable::dclone($mcdcData); + } +} + +sub baseline_mcdc +{ + my ($self, $mcdcData, $filename) = @_; + die("has baseline MC/DC data but not in baseline") + if (defined($mcdcData) && !defined($self->[LINENO_BASE])); + if (defined($mcdcData)) { + $self->_mergeMcdcData(DATA_BASELINE, $mcdcData, $filename); + } + my $mcdc = $self->[MCDC_DATA]; + return $mcdc->[DATA_BASELINE]; +} + +sub current_mcdc +{ + my ($self, $mcdcData, $filename) = @_; + die("has current MC/DC data but not in current") + if (defined($mcdcData) && + (!defined($self->[LINENO_CURRENT]) || $self->[LINENO_CURRENT] < 0)); + if (defined($mcdcData)) { + $self->_mergeMcdcData(DATA_CURRENT, $mcdcData, $filename); + } + my $mcdc = $self->[MCDC_DATA]; + return $mcdc->[DATA_CURRENT]; +} + +sub differential_mcdc +{ + my ($self, $differential) = @_; + my $mcdc = $self->[MCDC_DATA]; + if (defined($differential)) { + $mcdc->[DATA_DIFFERENTIAL] = $differential; + } + return $mcdc->[DATA_DIFFERENTIAL]; +} + +sub _mergeFunctionData +{ + my ($self, $loc, $functionData) = @_; + die('expected FunctionEntry found ' . ref($functionData)) + unless 'FunctionEntry' eq ref($functionData); + my $function = $self->[FUNCTION_DATA]; + if (defined($function->[$loc])) { + my $current = $function->[$loc]; + $current->merge($functionData); + } else { + # also clone hit count data + $function->[$loc] = $functionData->cloneWithEndLine(1, 1); + } +} + +sub baseline_function +{ + my ($self, $functionData) = @_; + die("has baseline function data but not in baseline") + if (defined($functionData) && !defined($self->[LINENO_BASE])); + if (defined($functionData)) { + $self->_mergeFunctionData(DATA_BASELINE, $functionData); + } + my $function = $self->[FUNCTION_DATA]; + return $function->[DATA_BASELINE]; +} + +sub current_function +{ + my ($self, $functionData) = @_; + die("has current function data but not in current") + if (defined($functionData) && + (!defined($self->[LINENO_CURRENT]) || $self->[LINENO_CURRENT] < 0)); + if (defined($functionData)) { + $self->_mergeFunctionData(DATA_CURRENT, $functionData); + } + my $function = $self->[FUNCTION_DATA]; + return $function->[DATA_CURRENT]; +} + +sub differential_function +{ + my ($self, $differential) = @_; + my $function = $self->[FUNCTION_DATA]; + if (defined($differential)) { + $function->[DATA_DIFFERENTIAL] = $differential; + } + return $function->[DATA_DIFFERENTIAL]; +} + +# structure holding coverage data for a particular file: +# - associated with a line line number: +# - line coverage +# - branch coverage +# - function coverage (not directly associated with line number +package FileCoverageInfo; + +use constant { + VERSION => 0, + LINEMAP => 1, + DELETED_LINE_LEADER => 2, + FUNCTIONMAP => 3, +}; + +sub new +{ + my ($class, $filename, $base_data, $current_data, $diffMap, $verbose) = @_; + + # [hash of lineNumber -> LineData struct, optional FunctionMap] + my $self = [ + [defined($base_data) ? $base_data->version() : undef, + $current_data->version() + ], + {}, # the line data map + {} # currentLineNo -> [deleted lines for which this is the leader] + ]; + bless $self, $class; + + $diffMap->show_map($filename) + if ((defined($verbose) && $verbose) || + (defined($lcovutil::verbose) && $lcovutil::verbose > 1)); + + # line coverage categorization includes date- and owner- bins in + # the vanilla case when there is no baseline. + $self->_categorizeLineCov($filename, $base_data, $current_data, + $diffMap, $verbose); + $self->_categorizeBranchCov($filename, $base_data, $current_data, + $diffMap, $verbose) + if ($lcovutil::br_coverage); + $self->_categorizeMcdcCov($filename, $base_data, $current_data, + $diffMap, $verbose) + if ($lcovutil::mcdc_coverage); + $self->_categorizeFunctionCov($filename, $base_data, $current_data, + $diffMap, $verbose) + if ($lcovutil::func_coverage); + + while (my ($lineNo, $deleted) = each(%{$self->[DELETED_LINE_LEADER]})) { + @$deleted = + sort({ $a->lineNo('base') <=> $b->lineNo('base') } @$deleted); + } + return $self; +} + +sub version +{ + my ($self, $which) = @_; + return $which eq 'current' ? $self->[VERSION]->[1] : $self->[VERSION]->[0]; +} + +sub lineMap +{ + my $self = shift; + return $self->[LINEMAP]; +} + +sub functionMap +{ + # simply a map of function leader name -> differential FunctionEntry + my $self = shift; + return (scalar(@$self) >= FUNCTIONMAP) ? $self->[FUNCTIONMAP] : undef; +} + +sub line +{ + my ($self, $lineNo) = @_; + my $lineMap = $self->lineMap(); + return exists($lineMap->{$lineNo}) ? $lineMap->{$lineNo} : undef; +} + +sub deletedLineData +{ + my ($self, $currentLineNumber) = @_; + + my $map = $self->[DELETED_LINE_LEADER]; + return + exists($map->{$currentLineNumber}) ? $map->{$currentLineNumber} : undef; +} + +sub recategorizeTlaAsBaseline +{ + # intended use: this file appears to have been added to the "coverage" + # suite - but the file itself is old/has been around for a long time. + # - by default, we will see this as "Included Code" + # - which means that 'un-exercised' code will be "UIC" + # - but: non-zero UIC will fail our Jenkins coverage ratchet. + # As a workaround: treat this file as if the baseline data was the same + # as 'current' - so code will be categorized as "CBC/UBC" - which will not + # trigger the coverage criteria. + my $self = shift; + my $lineMap = $self->lineMap(); + my %remap = ('UIC' => 'UBC', + 'GIC' => 'CBC'); + + while (my ($line, $data) = each(%$lineMap)) { + die("unexpected $line 'in_base'") if $data->in_base(); + + my $lineTla = $data->tla(); + if (exists($remap{$lineTla})) { + # don't remap GNC, UNC, etc + $data->tla($remap{$lineTla}); + } + + # branch coverage... + if ($lcovutil::br_coverage && defined($data->differential_branch())) { + my $br = $data->differential_branch(); + + foreach my $branchId ($br->blocks()) { + my $diff = $br->getBlock($branchId); + foreach my $b (@$diff) { + my $tla = $b->[1]; + if (exists($remap{$tla})) { + $b->[1] = $remap{$tla}; + } + } + } + } # if branch data + + # MC/DC coverage... + if ($lcovutil::mcdc_coverage && defined($data->differential_mcdc())) { + my $mcdc = $data->differential_mcdc(); + + while (my ($groupSize, $group) = each(%{$mcdc->groups()})) { + foreach my $cond (@$group) { + # remap both the true and false sense.. + foreach my $sense (0, 1) { + my $c = $cond->count($sense); + my $tla = $c->[0]; + if (exists($remap{$tla})) { + $c->[0] = $remap{$tla}; + } + } + } + } + } # if MCDC data + + # function coverage.. + if ($lcovutil::func_coverage && defined($data->differential_function())) + { + my $func = $data->differential_function(); + my $hit = $func->hit(); + my $tla = $hit->[1]; + if (exists($remap{$tla})) { + $hit->[1] = $remap{$tla}; + } + + while (my ($alias, $data) = each(%{$func->aliases()})) { + my $tla = $data->[1]; + if (exists($remap{$tla})) { + $data->[1] = $remap{$tla}; + } + } + } + } # if function data +} + +sub _categorize +{ + my ($baseCount, $currCount) = @_; + my $tla; + if (0 == $baseCount) { + $tla = (0 == $currCount) ? "UBC" : "GBC"; + } elsif (0 == $currCount) { + $tla = "LBC"; + } else { + $tla = "CBC"; + } + return $tla; +} + +sub _findLineData +{ + my ($self, $diffMap, $filename, $base_lineNo) = @_; + my $current_lineNo = + $diffMap->lookup($filename, $diffMap->OLD, $base_lineNo); + my $type = $diffMap->type($filename, $diffMap->OLD, $base_lineNo); + + my $lineDataMap = $self->lineMap(); + my $linedata; + + if ($type ne "delete") { + if (!defined($lineDataMap->{$current_lineNo})) { + $linedata = LineData->new($type); + $lineDataMap->{$current_lineNo} = $linedata; + $linedata->lineNo('current', $current_lineNo); + } else { + $linedata = $lineDataMap->{$current_lineNo}; + } + $linedata->lineNo('base', $base_lineNo); + } else { + # nothing walks the keylist so a prefix is sufficient to distinguish + # records that should be summarized but not displayed + my $dline = "<<<" . $base_lineNo; + if (!exists($lineDataMap->{$dline})) { + $linedata = LineData->new($type); + $linedata->lineNo('base', $base_lineNo); + $lineDataMap->{$dline} = $linedata; + # look up and/or down to find the first baseline line + # which is not deleted - and store that as the corresponding + # 'current' line. + # this way, we can know the extents of the deleted region + my $c; + for (my $i = $base_lineNo - 1; $i > 0; --$i) { + if ('delete' ne $diffMap->type($filename, $diffMap->OLD, $i)) { + $c = $diffMap->lookup($filename, $diffMap->OLD, $i); + last; + } + } + if (!defined($c)) { + # there were no 'current' lines above me - so I must be + # at the first line in the file. It must not be deleted + $c = 1; + die("$filename:1: incorrectly marked 'delete'") + if ( + 'delete' eq $diffMap->type($filename, $diffMap->OLD, $c)); + } + die("$filename: no current block for deleted line $base_lineNo") + unless defined($c); + $linedata->lineNo('current', -$c); + # keep track of where deleted lines were - so we can + # mark them in the source view + if (exists($self->[DELETED_LINE_LEADER]->{$c})) { + push(@{$self->[DELETED_LINE_LEADER]->{$c}}, $linedata); + } else { + $self->[DELETED_LINE_LEADER]->{$c} = [$linedata]; + } + } else { + $linedata = $lineDataMap->{$dline}; + } + } + return $linedata; +} + +# categorize line coverage numbers +sub _categorizeLineCov +{ + my ($self, $filename, $base_data, $current_data, $diffMap, $verbose) = @_; + my $lineDataMap = $self->lineMap(); + + if ($verbose) { + print("categorize lines $filename\n"); + } + # $lineCovBase, $lineCovCurrent are CountData objects + my $lineCovBase = $base_data->sum() if defined($base_data); + my $lineCovCurrent = $current_data->sum(); + + # walk the branch coverpoints to check for data consistency: + # - we expect a line coverpoint in every location which has branches + # - if not found, the generate message and/or create a fake coverpoint + # LLVM seems to like to generate inconsistent data. + my $branchCurrent = $current_data->sumbr(); + foreach my $line ($branchCurrent->keylist()) { + # just ignore bogus data - we already warned when we read the data + next if ($line <= 0); + + my $type = $diffMap->type($filename, $diffMap->NEW, $line); + # LCOV_EXCL_START + if ($type eq 'delete') { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "'current' line $filename:$line should not be marked 'delete'"); + delete($branchCurrent->{$line}); + next; + } + # LCOV_EXCL_STOP + } + + # it is sufficient to just walk the 'global' (merged) line + # coverage dataset because we only care/we only show total + # coverage - not changed coverage per testcase. + # (This observation is also true for branch and function + # coverages.) + foreach my $line ($lineCovCurrent->keylist()) { + # just ignore bogus data - we already warned when we read the data + next if ($line <= 0); + my $type = $diffMap->type($filename, $diffMap->NEW, $line); + if ($type eq 'delete') { + # can happen in some inconsistent case, when there are certain + # out-of-range references in a file which contained diffs - and we + # ignored the error check + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "'current' line $filename:$line should not be marked 'delete'"); + $lineCovCurrent->remove($line); + next; + } + my $linedata; + if (!exists($lineDataMap->{$line})) { + $linedata = LineData->new($type); + $lineDataMap->{$line} = $linedata; + } else { + $linedata = $lineDataMap->{$line}; + } + my $val = $lineCovCurrent->value($line); + $linedata->lineNo("current", $line); + $linedata->curr_count($val); + $linedata->tla($val == 0 ? 'UNC' : 'GNC') + if (!defined($lineCovBase)); + } + return unless (defined($lineCovBase)); + + foreach my $bline ($lineCovBase->keylist()) { + # just ignore bogus data - we already warned when we read the data + next if ($bline <= 0); + my $linedata = $self->_findLineData($diffMap, $filename, $bline); + my $val = $lineCovBase->value($bline); + $linedata->base_count($val); + } + if ($verbose) { + print(" line data map:\n"); + foreach my $line (sort keys %$lineDataMap) { + my $data = $lineDataMap->{$line}; + print(" $line: ", + $data->type(), + ' curr:', + $data->in_curr() ? $data->lineNo('current') : '-', + ' base:', + $data->in_base() ? $data->lineNo('base') : '-', + "\n"); + } + } + foreach my $line (sort keys %$lineDataMap) { + my $linedata = $lineDataMap->{$line}; + my $tla; + if ($linedata->type() eq "insert") { + if (!$linedata->in_curr()) { + # can get here if the 'diff' file is wrong with respect to + # baseline vs. current coverage data - e.g., showing that + # an unchanged line has a difference + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "$filename:$line: 'diff' data claims this line is inserted but it is not in 'current' coverage data" + ); + next; + } + $tla = ($linedata->curr_count() > 0) ? "GNC" : "UNC"; + print(" insert $line $tla\n") if ($verbose); + } elsif ($linedata->type() eq "delete") { + if (!$linedata->in_base()) { + # similarly: can get here if the diff vs baseline/current data + # is inconsistent. + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "$filename:$line: 'diff' data claims this line is deleted but it is not in 'baseline' coverage data" + ); + next; + } + $tla = ($linedata->base_count() > 0) ? "DCB" : "DUB"; + print(" delete $line $tla\n") if ($verbose); + } else { + die("FileCoverageInfo:: deleted segment line=$line file=$filename") + unless $linedata->type() eq "equal"; + + if ($linedata->in_base() && $linedata->in_curr()) { + $tla = + _categorize($linedata->base_count(), $linedata->curr_count); + } elsif ($linedata->in_base()) { + $tla = ($linedata->base_count() > 0) ? "ECB" : "EUB"; + $linedata->tla($tla); + } else { + die("FileCoverageInfo:: non-executed line line=$line file=$filename" + ) unless $linedata->in_curr(); + + $tla = ($linedata->curr_count() > 0) ? "GIC" : "UIC"; + } + print(" equal $line $tla in:" . + ($linedata->in_base() ? ' base' : '') . + ($linedata->in_curr() ? ' curr' : '') . "\n") + if ($verbose); + } + $linedata->tla($tla); + } +} + +sub _cloneBranchEntry +{ + my ($cloneInto, $cloneFrom, $missTla, $hitTla) = @_; + foreach my $branchId ($cloneFrom->blocks()) { + my $block = $cloneInto->addBlock($branchId); + foreach my $br (@{$cloneFrom->getBlock($branchId)}) { + my $count = $br->count(); + my $tla = (0 == $count) ? $missTla : $hitTla; + push(@$block, [$br, $tla, [undef, $count]]); + } + } +} + +# categorize branch coverage numbers +sub _categorizeBranchCov +{ + my ($self, $filename, $base_data, $current_data, $diffMap, $verbose) = @_; + my $lineDataMap = $self->lineMap(); + + my $branchBaseline = $base_data->sumbr() if defined($base_data); + my $branchCurrent = $current_data->sumbr(); + + my %branchCovLines; + # look through the 'current' data, to find all the branch data + # keep track of hit count in baseline, current - in element 2 of the + # retained branch data. These counts are useful - for example, to report + # the hit count in the baseline for LBC branches: + # - e.g., in random testing: is this branch lost because it was a + # low probablilty event (..baselinecount is a small number)> + # Or because we did something bad and no longer reach what had been + # a high probability event? + foreach my $line ($branchCurrent->keylist()) { + next if ($line <= 0); # ignore bogus + unless (exists($lineDataMap->{$line})) { + # unless ignored, should have been caught or fixed during + # TraceInfo::_checkConsistency + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "\"$filename\":$line line has branchcov but no linecov data (skipping)." + ); + next; + } + my $data = $lineDataMap->{$line}; + + $branchCovLines{$line} = 1; + # we expect that the line number matches... + $data->lineNo("current", $line); + # append this branch data for the line + my $currBranchData = $branchCurrent->value($line); + $data->current_branch($currBranchData, $filename); + if (!defined($branchBaseline)) { + my $categorized = BranchEntry->new($line); + $data->differential_branch($categorized); + _cloneBranchEntry($categorized, $currBranchData, 'UNC', 'GNC'); + } + } # foreach line in 'current' branch data + + return unless defined($branchBaseline); + + # now look through the baseline to find matching data + + foreach my $base_line ($branchBaseline->keylist()) { + my $data = $self->_findLineData($diffMap, $filename, $base_line); + my $curr_line = $data->lineNo('current'); + my $type = $data->type(); + if ($type ne 'delete') { + $branchCovLines{$curr_line} = 1; + } else { + # the line has been deleted...just record the data + my $deleteKey = "<<<" . $base_line; + } + my $baseBranchData = $branchBaseline->value($base_line); + $data->baseline_branch($baseBranchData, $filename); + } # foreach line in baseline data + + # go through all the branch data for each line, and categorize everything + foreach my $line (keys(%branchCovLines)) { + next if ($line <= 0); # ignore bogus + my $data = $self->lineMap()->{$line}; + my $type = $data->type(); + my $curr = $data->current_branch(); + my $base = $data->baseline_branch(); + my $categorized = BranchEntry->new($line); + $data->differential_branch($categorized); + # handle case that baseline and/or current do not contain branch data + my @currBlocks = defined($curr) ? $curr->blocks() : (); + my @baseBlocks = defined($base) ? $base->blocks() : (); + + if ($type eq 'insert') { + # can get here if diff data vs baseline/current is not consistent + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "baseline branch data should not be defined for inserted line $filename:$line" + ) if defined($base); + + _cloneBranchEntry($categorized, $curr, 'UNC', 'GNC'); + } elsif ($type eq 'delete') { + # similarly: get here if diff data vs baseline/current is not consistent + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "current branch data should not be defined for deleted line $filename:$line" + ) if defined($curr); + + _cloneBranchEntry($categorized, $base, 'DUB', 'DCB'); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected branch coverage type $type at $filename:$line") + if $type ne 'equal'; + + # branch might or might not be in both baseline and current + foreach my $branchId (@baseBlocks) { + my $b = $base->getBlock($branchId); + my $branchData = $categorized->addBlock($branchId); + if (defined($curr) && + $curr->hasBlock($branchId)) { + my $c = $curr->getBlock($branchId); + + my $num_base = scalar(@$b); + my $num_curr = scalar(@$c); + my $max = $num_base > $num_curr ? $num_base : $num_curr; + my $tla; + for (my $i = 0; $i < $max; ++$i) { + if ($i < $num_base && + $i < $num_curr) { + my $base_br = $b->[$i]; + my $curr_br = $c->[$i]; + $tla = + _categorize($base_br->count(), + $curr_br->count()); + push(@$branchData, + [$curr_br, $tla, + [$base_br->count(), $curr_br->count()] + ]); + } elsif ($i < $num_base) { + my $base_br = $b->[$i]; + $tla = (0 == $base_br->count()) ? 'EUB' : 'ECB'; + push(@$branchData, + [$base_br, $tla, [$base_br->count(), undef]]); + } else { + my $curr_br = $c->[$i]; + $tla = (0 == $curr_br->count()) ? 'UIC' : 'GIC'; + push(@$branchData, + [$curr_br, $tla, [undef, $curr_br->count()]]); + } + } + } else { + # branch not found in current... + foreach my $base_br (@$b) { + my $tla = (0 == $base_br->count()) ? 'EUB' : 'ECB'; + push(@$branchData, + [$base_br, $tla, [$base_br->count(), undef]]); + } + } + } + # now check for branches that are in current but not in baseline... + foreach my $branchId (@currBlocks) { + next + if defined($base) && + $base->hasBlock($branchId); # already processed + my $c = $curr->getBlock($branchId); + my $branchData = $categorized->addBlock($branchId); + foreach my $curr_br (@$c) { + my $tla = (0 == $curr_br->count()) ? 'UIC' : 'GIC'; + push(@$branchData, + [$curr_br, $tla, [undef, $curr_br->count()]]); + } + } # foreach branchId in current that isn't in base + } + } +} + +sub _cloneMcdcEntry +{ + my ($cloneInto, $cloneFrom, $missTla, $hitTla) = @_; + while (my ($groupSize, $group) = each(%{$cloneFrom->groups()})) { + foreach my $expr (@$group) { + foreach my $sense (0, 1) { + my $count = $expr->count($sense); + my $tla = (0 == $count) ? $missTla : $hitTla; + $count = [$tla, undef, $count] + ; # [TLA, baseline value, current value] + $cloneInto->insertExpr('unknownFile', $groupSize, + $sense, $count, + $expr->index(), $expr->expression()); + } + } + } +} + +sub _categorizeMcdcCov +{ + my ($self, $filename, $base_data, $current_data, $diffMap, $verbose) = @_; + my $lineDataMap = $self->lineMap(); + + my $mcdcBaseline = $base_data->mcdc() if defined($base_data); + my $mcdcCurrent = $current_data->mcdc(); + + my %mcdcCovLines; + # look through the 'current' data, to find all the MC/DC data + # keep track of hit count in baseline, current - in element 2 of the + # retained MC/DC data. These counts are useful - for example, to report + # the hit count in the baseline for LBC elements: + # - e.g., in random testing: is this branch lost because it was a + # low probablilty event (..baseline count is a small number)> + # Or because we did something bad and no longer reach what had been + # a high probability event? + foreach my $line ($mcdcCurrent->keylist()) { + next if ($line <= 0); # ignore bogus + unless (exists($lineDataMap->{$line})) { + # unless ignored, should have been caught or fixed during + # TraceInfo::_checkConsistency + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "\"$filename\":$line line has MC/DC but no linecov data (skipping)." + ); + next; + } + my $data = $lineDataMap->{$line}; + + $mcdcCovLines{$line} = 1; + # we expect that the line number matches... + $data->lineNo("current", $line); + # append this MC/DC data for the line (this is an MCDC_Block) + my $currMcdcData = $mcdcCurrent->value($line); + $data->current_mcdc($currMcdcData, $filename); + if (!defined($mcdcBaseline)) { + my $categorized = MCDC_Block->new($line); + $data->differential_mcdc($categorized); + _cloneMcdcEntry($categorized, $currMcdcData, 'UNC', 'GNC'); + } + } # foreach line in 'current' branch data + + return unless defined($mcdcBaseline); + + # now look through the baseline to find matching data + + foreach my $base_line ($mcdcBaseline->keylist()) { + next if ($base_line <= 0); # ignore bogus + my $data = $self->_findLineData($diffMap, $filename, $base_line); + my $curr_line = $data->lineNo('current'); + my $type = $data->type(); + if ($type ne 'delete') { + $mcdcCovLines{$curr_line} = 1; + } else { + # the line has been deleted...just record the data + my $deleteKey = "<<<" . $base_line; + } + my $baseMcdcData = $mcdcBaseline->value($base_line); + $data->baseline_mcdc($baseMcdcData, $filename); + } # foreach line in baseline data + + # go through all the MC/DC data for each line, and categorize everything + foreach my $line (keys(%mcdcCovLines)) { + my $data = $self->lineMap()->{$line}; + my $type = $data->type(); + my $curr = $data->current_mcdc(); + my $base = $data->baseline_mcdc(); + my $categorized = MCDC_Block->new($line); + $data->differential_mcdc($categorized); + # handle case that baseline and/or current do not conta MC/DC data + my $currGroups = defined($curr) ? $curr->groups() : (); + my $baseGroups = defined($base) ? $base->groups() : (); + + if ($type eq 'insert') { + # can get here if diff data vs baseline/current is not consistent + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "baseline MC/DC data should not be defined for inserted line $filename:$line" + ) if defined($base); + + _cloneMcdcEntry($categorized, $curr, 'UNC', 'GNC'); + } elsif ($type eq 'delete') { + # similarly: get here if diff data vs baseline/current is not consistent + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "current MC/DC data should not be defined for deleted line $filename:$line" + ) if defined($curr); + + _cloneMcdcEntry($categorized, $base, 'DUB', 'DCB'); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected MC/DC coverage type $type at $filename:$line") + if $type ne 'equal'; + # group might or might not be in both baseline and current + while (my ($groupSize, $bgroup) = each(%$baseGroups)) { + + if (defined($currGroups) && exists($currGroups->{$groupSize})) { + my $cgroup = $currGroups->{$groupSize}; + + for (my $i = 0; $i < $groupSize; ++$i) { + my $b = $bgroup->[$i]; + my $c = $cgroup->[$i]; + die("mismatched expressions") + unless $b->expression() eq $c->expression(); + + foreach my $sense (0, 1) { + my $b_count = $b->count($sense); + my $c_count = $c->count($sense); + my $tla = _categorize($b_count, $c_count); + $categorized->insertExpr('unknownFile', $groupSize, + $sense, [$tla, $b_count, $c_count], + $i, $c->expression()); + } + } + } else { + # not found in current + foreach my $b (@$bgroup) { + foreach my $sense (0, 1) { + my $b_count = $b->count($sense); + my $tla = $b_count ? 'ECB' : 'EUB'; + $categorized->insertExpr('unknownFile', $groupSize, + $sense, [$tla, $b_count, undef], + $b->index(), $b->expression()); + } + } + } + } #foreach group in base + } # endif deleted line + } # foreach line +} + +sub _cloneFunctionEntry +{ + my ($cloneInto, $cloneFrom, $missTla, $hitTla) = @_; + + my $hit = $cloneFrom->hit(); + my $tla = (0 == $hit) ? $missTla : $hitTla; + $cloneInto->setCountDifferential([$hit, $tla]); + my $aliases = $cloneFrom->aliases(); + foreach my $alias (keys %$aliases) { + $hit = $aliases->{$alias}; + $tla = (0 == $hit) ? $missTla : $hitTla; + $cloneInto->addAliasDifferential($alias, [$hit, $tla]); + } +} + +sub _categorizeFunctionCov +{ + my ($self, $filename, $base_data, $current_data, $diffMap, $verbose) = @_; + die("map should not be defined yet") unless !defined($self->functionMap()); + my $differentialMap = {}; + push(@$self, $differentialMap); + my $lineDataMap = $self->lineMap(); + + my $funcBase = $base_data->func() if defined($base_data); + my $funcCurrent = $current_data->func(); + # use merged line coverage to categorize function by checking for + # edits in function range + # $lineCovBase and $lineCovCurrent are 'CountData' objects + # - of lineNo->hit count + my $lineCovBase = $base_data->sum() if defined($base_data); + my $lineCovCurrent = $current_data->sum(); + + my %funcCovLines; + foreach my $key ($funcCurrent->keylist()) { + my $func = $funcCurrent->findKey($key); + my $line = $func->line(); + next if ($line <= 0); # ignore bogus + my $type = $diffMap->type($filename, $diffMap->NEW, $line); + $funcCovLines{$line} = 1; + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "'current' line $filename:$line should not be marked 'delete'") + if $type eq 'delete'; + my $data; + if (!exists($lineDataMap->{$line})) { + $data = LineData->new($type); + $lineDataMap->{$line} = $data; + } else { + $data = $lineDataMap->{$line}; + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "type mismatch " . $data->type() . + " -> $type for $filename:$line") + unless $data->type() eq $type; + } + # we expect that the line number matches... + $data->lineNo("current", $line); + # function data for the line + $data->current_function($func); + if (!defined($funcBase)) { + my $name = $func->name(); + my $categorized = + FunctionEntry->new($name, $funcCurrent, + $line, $func->end_line()); + $differentialMap->{$name} = $categorized; + $data->differential_function($categorized); + + _cloneFunctionEntry($categorized, $func, 'UNC', 'GNC'); + } + } # foreach function in current data + + return unless (defined($funcBase)); + + # look through the baseline to find matching data + foreach my $key ($funcBase->keylist()) { + my $func = $funcBase->findKey($key); + my $line = $func->line(); + next if ($line <= 0); # ignore bogus + my $data = $self->_findLineData($diffMap, $filename, $line); + + my $type = $data->type(); + my $curr_line = $data->lineNo('current'); + if ($type ne 'delete') { + $funcCovLines{$curr_line} = 1; + } else { + # the line has been deleted...just record the data + my $deleteKey = "<<<" . $line; + $funcCovLines{$deleteKey} = 1; + } + $data->baseline_function($func); + } # foreach function in baseline data + + # go through function data for each line and categorize... + foreach my $line (keys %funcCovLines) { + my $data = $lineDataMap->{$line}; + my $type = $data->type(); + my $curr = $data->current_function(); + my $base = $data->baseline_function(); + my $categorized; + if (defined($curr)) { + # also copy the end line + $categorized = $curr->cloneWithEndLine(1); + } else { + # not in current - don't copy end line + # @todo if needed, could compute where the end line of the deleted + # function is now + $categorized = $base->cloneWithEndLine(0); + } + my $name = $categorized->name(); + $differentialMap->{$name} = $categorized; + $data->differential_function($categorized); + + if (!defined($base)) { + # either this line was inserted or the line hasn't changed but + # wasn't recognized as a function before (e.g., unused template) + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "$filename:$line: unexpected undef baseline function data for deleted $name" + ) if $type eq 'delete'; + _cloneFunctionEntry($categorized, $curr, + $type eq 'insert' ? 'UNC' : 'UIC', + $type eq 'insert' ? 'GNC' : 'GIC'); + } elsif (!defined($curr)) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "$filename:$line: unexpected undef current function data for inserted $name" + ) if $type eq 'insert'; + _cloneFunctionEntry($categorized, $base, + $type eq 'delete' ? 'DUB' : 'EUB', + $type eq 'delete' ? 'DCB' : 'ECB'); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected function coverage type $type at $filename:$line") + if $type ne 'equal'; + # if we know end lines for this function, then check if there + # have been any changes in the function body. If any changes, + # then mark GNC or UNC + my $changed; + my $end_line = $curr->end_line(); + + if (defined($end_line)) { + # we keep list of functions and branch coverpoints contained + # in the function - and can report per-function coverage + # in the function detail view. Maybe user will prioritize + # functions which are hit but whose coverage is low + $changed = 0; + for (my $line = $curr->line(); $line <= $end_line; ++$line) { + # data for this line + my $type = $diffMap->type($filename, $diffMap->NEW, $line); + + # claim a change if: + # - line is new and is source code, OR + # - line is unchanged and was code before and isn't code + # now, or line wasn't code before and is now + if ($type eq 'insert' && + defined($lineCovCurrent->value($line))) # line is code + { + $changed = 1; + last; + } else { + # line is same + my $bline = + $diffMap->lookup($filename, $diffMap->NEW, $line); + if (defined($lineCovBase->value($bline)) ^ + defined($lineCovCurrent->value($line))) { + $changed = 1; + last; + } + } + } # end for each current line in current function + $end_line = $base->end_line(); + if (defined($end_line) && !$changed) { + # check for baseline lines which were deleted + for (my $bline = $base->line(); + $bline <= $end_line; + ++$bline) { + # data for this line + my $type = + $diffMap->type($filename, $diffMap->OLD, $bline); + # claim a change if line is deleted and was code + # before. + # note that we already checked unchanged lines, above + if ($type ne 'equal' && # line is in old but not in new + defined($lineCovBase->value($bline)) + ) # line is code + { + $changed = 1; + last; + } + } # end for each current line in current function + } + lcovutil::info(1, "$name body at $filename:$line is changed\n") + if ($changed); + } + my $tla = _categorize($base->hit(), $curr->hit()); + if (defined($changed) && + $changed) { + if ($tla eq 'UBC') { + $tla = 'UNC'; + } elsif ($tla eq 'GBC' || + $tla eq 'CBC') { + $tla = 'GNC'; + } # else is LBC - leave it that way + lcovutil::info(2, + "$name recategorized to $tla at $filename:$line\n"); + } + + $categorized->setCountDifferential([$curr->hit(), $tla]); + # particular alias may be in both versions + my $base_aliases = $base->aliases(); + my $curr_aliases = $curr->aliases(); + foreach my $alias (keys %$base_aliases) { + my $hit = $base_aliases->{$alias}; + + my $tla; + if (exists($curr_aliases->{$alias})) { + my $hitCurr = $curr_aliases->{$alias}; + $tla = _categorize($hit, $hitCurr); + $hit = $hitCurr; + # adjust TLA if function range known.. + if (defined($changed) && + $changed) { + if ($tla eq 'UBC') { + $tla = 'UNC'; + } elsif ($tla eq 'GBC' || + $tla eq 'CBC') { + $tla = 'GNC'; + } # else is LBC - leave it that way + lcovutil::info(2, + "$name alias $alias recategorized to $tla at $filename:$line\n" + ); + } + } else { + $tla = (0 == $hit) ? 'EUB' : 'ECB'; + } + $categorized->addAliasDifferential($alias, [$hit, $tla]); + } + # now look for aliases that are in current but not in baseline + foreach my $alias (keys %$curr_aliases) { + next if exists($base_aliases->{$alias}); + + my $hit = $curr_aliases->{$alias}; + my $tla = (0 == $hit) ? "UIC" : "GIC"; + $categorized->addAliasDifferential($alias, [$hit, $tla]); + } + } + + if ('UNK' eq $data->tla()) { + # there is a function here - but no line - manufacture some data + my $d = $categorized->hit(); + my ($hit, $funcTla) = @$d; + $data->tla($funcTla); + if (defined($base) && + $data->in_base()) { + $data->base_count($base->hit()); + } + if (defined($curr) && + $data->in_curr()) { + $data->curr_count($hit); + } + } + } +} + +package DiffMap; + +# @todo Could convert this use use a callback API - similar to annotate, etc. +# that would be more consistent and might execute faster. + +use constant { + LINEMAP => 0, + FILEMAP => 1, + BASELINE => 2, + # keep track of line number where file entry is found in diff file + # - use line number in error messages. + DIFF_FILENAME => 3, + LOCATION => 4, + UNCHANGED => 5, + ALIASES => 6, + DIFF_ROOT => 7, + + OLD => 0, + NEW => 1, + TYPE => 2, + + _START => 0, + _END => 1 +}; + +sub new +{ + my $class = shift; + my $self = [{}, # linemap + {}, # filemap: new_filename->old_filename + {}, # baseline: filename -> baseline lineno -> text + undef, # diff filename + [{}, {}], # def location + # element 0: old filename -> line number where this + # entry starts + # element 1: new filename -> line numbern + {} # unchanged + ]; + bless $self, $class; + return $self; +} + +sub load +{ + my ($self, $path, $info, $buildDirs) = @_; + $self->_read_udiff($path); + + # find list of soft links in [buildDirs] which may point to files in $info + # we need to keep all the aliases as there may be files in baseline + # which are not in current + if (@$buildDirs) { + my @stack = @$buildDirs; + while (@stack) { + my $dir = pop(@stack); + die("unexpected non-directory '$dir'") unless -d $dir; + $dir = File::Spec->catdir($main::cwd, $dir) + unless File::Spec->file_name_is_absolute($dir); + + opendir(my $dh, $dir) or die("can't open directory $dir: $!"); + while (my $entry = readdir($dh)) { + next if $entry eq '.' || $entry eq '..'; + my $path = File::Spec->catfile($dir, $entry); + if (-d $path) { + push(@stack, $path); + } elsif (-l $path) { + my $l = Cwd::realpath($path); + next if (!-e $l || TraceFile::skipCurrentFile($l)); + # may need to turn $l into relative path?? + $self->[ALIASES]->{$path} = $l if (-f $l); + #lcovutil::info("add alias '$path' -> '$l'\n"); + # really, this should be a file... + die("unexpected soft link $path to directory") + unless -f $l; + } + # else just ignore file entry + } + closedir($dh); + } + } + return $self; +} + +sub empty +{ + my $self = shift; + return !scalar(%{$self->[LINEMAP]}); +} + +sub findName +{ + my ($self, $file) = @_; + my $f = $lcovutil::case_insensitive ? lc($file) : $file; + $f = $self->[ALIASES]->{$f} if exists($self->[ALIASES]->{$f}); + + if (File::Spec->file_name_is_absolute($f) && + !exists($self->[LINEMAP]->{$f})) { + my $p = + $lcovutil::case_insensitive ? + lc($self->[DIFF_ROOT]) : + $self->[DIFF_ROOT]; + $p .= $lcovutil::dirseparator; + my $l = length($p); + if (length($f) > $l && + $p eq substr($f, 0, $l)) { + my $s = substr($f, $l); + if (exists($self->[LINEMAP]->{$s})) { + $f = $s; + } elsif (exists($self->[ALIASES]->{$s})) { + $f = $self->[ALIASES]->{$s}; + } + } + } + return $f; +} + +sub containsFile +{ + my ($self, $file) = @_; + $file = $self->findName($file); + return exists($self->[LINEMAP]->{$file}); +} + +sub recreateBaseline +{ + my ($self, $filename, $currentSrcLines) = @_; + + my $diffs = $self->[LINEMAP]->{$self->findName($filename)}; + die("no diff data for $filename") unless defined $diffs; + + my $deleted = $self->[BASELINE]->{$self->findName($filename)}; + my @lines; + foreach my $chunk (@$diffs) { + if ($chunk->[TYPE] eq 'equal') { + my ($from, $to) = @{$chunk->[NEW]}; + push(@lines, @{$currentSrcLines}[($from - 1) .. ($to - 1)]); + } elsif ($chunk->[TYPE] eq 'delete') { + my $r = $chunk->[OLD]; + for (my $i = $r->[_START]; $i <= $r->[_END]; ++$i) { + die("missing baseline line $i") + unless defined($deleted) && exists($deleted->{$i}); + push(@lines, $deleted->{$i}); + } + } + # else 'insert': nothing to do/those lines are not in baseline + } + return \@lines; +} + +sub lookup +{ + my ($self, $file, $vers, $line) = @_; + + $file = $self->findName($file); + + if (!exists($self->[LINEMAP]->{$file})) { + #mapping is identity when no diff was read + return $line; + } + + my @candidates = + grep { $_->[$vers]->[_START] < $line } @{$self->[LINEMAP]->{$file}}; + # candidates is empty if $line==1 - which is unusual, as there is typically + # a comment, copyright notice, #include, or whatever on the first line + return $line unless @candidates; + + my $chunk = pop @candidates; + + my $alt = ($vers == OLD) ? NEW : OLD; + + if ($line > $chunk->[$vers]->[_END]) { + return ($chunk->[$alt]->[_END] + ($line - $chunk->[$vers]->[_END])); + } + return ($chunk->[$alt]->[_START] + ($line - $chunk->[$vers]->[_START])); +} + +sub type +{ + my ($self, $file, $vers, $line) = @_; + + $file = $self->findName($file); + + if (!defined($self->[LINEMAP]->{$file})) { + #mapping is identity when no diff was read + if (defined($main::show_tla) && + (@main::base_filenames || + $main::diff_filename) + ) { + return "equal"; # categories will be "GIC", "UIC" + } else { + return "insert"; # categories will be "GNC", "UNC" + } + } + + if (!defined($self->[FILEMAP]->{$file})) { + #mapping with no filemap when baseline file was deleted + return "delete"; + } + + # ->{start} equal $line only if beginning of range or omitted in ->{type} + my @candidates = + grep { $_->[$vers]->[_START] <= $line } @{$self->[LINEMAP]->{$file}}; + my $chunk = pop @candidates; + my $prev = pop @candidates; + while (defined($prev) && + $line >= $prev->[$vers]->[_START] && + $line <= $prev->[$vers]->[_END]) { + $chunk = $prev; + $prev = pop @candidates; + } + if (!defined($chunk)) { + warn "DiffMap::type(): got undef chunk at $file, $vers, $line\n"; + return "undef chunk"; + } + if (!defined($chunk->[TYPE])) { + warn "DiffMap::type(): got undef type at $file, $vers, $line\n"; + return "undef type"; + } + return $chunk->[TYPE]; +} + +sub baseline_file_name +{ + # file may have been moved between baseline and current... + my ($self, $current_name) = @_; + + my $key = $self->findName($current_name); + if (exists($self->[FILEMAP]->{$key})) { + return $self->[FILEMAP]->{$key}; + } + return $current_name; +} + +sub files +{ + my $self = shift; + return keys(%{$self->[FILEMAP]}); +} + +sub dump_map +{ + my $self = shift; + + foreach my $file (keys %{$self->[FILEMAP]}) { + my $currfile = + defined($self->[FILEMAP]->{$file}) ? $self->[FILEMAP]->{$file} : + "[deleted]"; + printf("In $file (was: $currfile):\n"); + foreach my $chunk (@{$self->[LINEMAP]->{$file}}) { + _printChunk($chunk); + } + } + return $self; +} + +sub check_version_match +{ + my ($self, $baseline, $current) = @_; + return unless $lcovutil::versionCallback; + + # skip files which were dropped (no longer in project) or were + # just added to the project (was not in the baseline - so we aren't + # looking for differences. + foreach my $curr ($current->files()) { + next unless $baseline->file_exists($curr); + my $currData = $current->data($curr); + my $curr_version = $currData->version(); + my $baseData = $baseline->data($curr); + my $base_version = $baseData->version(); + + # silently compare version... + my $versionMatch = + lcovutil::checkVersionMatch($curr, $base_version, $curr_version, + "diff entry compare", 1); + if ($self->containsFile($curr)) { + # file is in 'diff' data: we expect the version to be different + # between baseline/current + if ($versionMatch) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "File \"$curr\" appears in 'diff' data file '" . + $self->[DIFF_FILENAME] . + "' but 'baseline' and 'current' versions '" . + ($curr_version ? $curr_version : '') . + "' match"); + } + } else { + # not in 'diff': we expect the versions to be identical + if (!$versionMatch) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "File \"$curr\" version changed from '" . + ($base_version ? $base_version : '') . + "' but file not found in 'diff' data file '" . + $self->[DIFF_FILENAME] . "'."); + } + } + } +} + +sub check_path_consistency +{ + # check that paths which appear in diff also appear in baseline or current + # .info files - if not, then there is likely a path consistency issue + # $baseline and $current are both TraceFile structs - + # return 0 if inconsistency found + + my ($self, $baseline, $current) = @_; + (ref($baseline) eq 'TraceFile' && ref($current) eq 'TraceFile') or + die("wrong arg types"); + + # check that files which are in both baseline and current and are NOT + # in the 'diff' data have the same version. + # That is: files whose version differs should appear in the diff data + $self->check_version_match($baseline, $current); + + my %diffMap; # current_filename -> where_used + my %diffBaseMap; # current_directory -> [] + # for every 'current' filename in the udiff file + foreach my $f ($self->files()) { + $diffMap{$f} = 0; # this file not used in baseline or current (yet) + my $b = File::Basename::basename($f); + $diffBaseMap{$b} = [[], {}] + unless exists($diffBaseMap{$b}); + push(@{$diffBaseMap{$b}->[0]}, $f); + } + # foreach unchanged file in udiff data + foreach my $f (keys %{$self->[UNCHANGED]}) { + # unchanged in baseline and current + $diffMap{$f} = 3; + } + my %missed; + # for each file in 'current' info: + foreach my $curr ($current->files()) { + my $b = File::Basename::basename($curr); + if ($self->containsFile($curr)) { + my $alias = + exists($self->[ALIASES]->{$curr}) ? $self->[ALIASES]->{$curr} : + $curr; + $diffMap{$alias} |= 1; # used in current + $diffBaseMap{$b}->[1]->{$alias} = 0 + unless exists($diffBaseMap{$b}->[1]->{$alias}); + ++$diffBaseMap{$b}->[1]->{$alias}; + } elsif (!exists($self->[UNCHANGED]->{$self->findName($curr)})) { + $missed{$curr} = 1; # in current but not in diff + } + } + # for each file in 'baseline' info: + foreach my $base ($baseline->files()) { + my $b = File::Basename::basename($base); + if ($self->containsFile($base)) { + my $alias = + exists($self->[ALIASES]->{$base}) ? $self->[ALIASES]->{$base} : + $base; + $diffMap{$alias} |= 2; # used in baseline + $diffBaseMap{$b}->[1]->{$alias} = 0 + unless exists($diffBaseMap{$b}->[1]->{$alias}); + ++$diffBaseMap{$b}->[1]->{$alias}; + } elsif (!exists($self->[UNCHANGED]->{$self->findName($base)})) { + # in baseline but not in diff + if (exists($missed{$base})) { + $missed{$base} |= 2; + } else { + $missed{$base} = 2; + } + } + } + my $ok = 1; + foreach my $f (sort keys(%missed)) { + my $b = File::Basename::basename($f); + next unless exists($diffBaseMap{$b}); + + # this basename is in the diff file and didn't match any other + # trace filename entry (i.e., same filename in more than one + # source code directory) - then warn about possible pathname issue + my ($diffFiles, $sourceFiles) = @{$diffBaseMap{$b}}; + # find the files which appear in the 'diff' list which have the + # same basename and were not matched - those might be candidates + my @unused; + for my $d (@$diffFiles) { + # my $location = $self->[DIFF_FILENAME] . ':' . $self->[LOCATION]->[NEW]->{$d}; + push(@unused, $d) + unless exists($sourceFiles->{$d}); + } + next unless @unused; + + # my $baseData = $baseline->data($f); + # my $baseLocation = join(":", ${$baseData->location()}); + # my $currData = $current->data($f); + # my $currLocation = join(":", ${$currData->location()}); + + my $type; + if (2 == $missed{$f}) { + $type = "baseline"; + } elsif (1 == $missed{$f}) { + $type = "current"; + } else { + $type = "both baseline and current"; + } + my $single = 1 == scalar(@unused); + # @todo could print line numbers in baseline, current .info files and + # in diff file .. + if (lcovutil::warn_once($lcovutil::ERROR_MISMATCH, $f)) { + my $suffix = + $main::elide_path_mismatch ? ' (elided)' : + lcovutil::explain_once( + 'elide-path-mismatch', + ' Perhaps see "--elide-path-mismatch", "--substitute" and "--build-directory" options in \'man ' + . $lcovutil::tool_name . '\''); + lcovutil::ignorable_warning( + $lcovutil::ERROR_MISMATCH, + "source file '$f' (in $type .info file" . + ($missed{$f} == 3 ? "s" : "") . + ") has same basename as 'diff' " + . + ($single ? 'entry ' : "entries:\n\t" + ) . + "'" . + join("'\n\t", @unused) . + "' - but a different path." . + ($single ? " " : "\n\t") . + 'Possible pathname mismatch?' . + $suffix); + } + if ($main::elide_path_mismatch && + $missed{$f} == 3 && + $single) { + $self->[FILEMAP]->{$f} = $f; + $self->[LINEMAP]->{$f} = $self->[LINEMAP]->{$unused[0]}; + } else { + $ok = 0; + } + } + return $ok; +} + +sub _printChunk +{ + my $chunk = shift; + printf(" %6s\t[%d:%d]\t[%d:%d]\n", + $chunk->[TYPE], $chunk->[OLD]->[_START], + $chunk->[OLD]->[_END], $chunk->[NEW]->[_START], + $chunk->[NEW]->[_END]); +} + +sub _newChunk +{ + my ($type, $baseline_start, $current_start) = @_; + # new chunk starts and ends on the same line - until we see more lines and + # extend either the old or new range + return [[$baseline_start, $baseline_start], + [$current_start, $current_start], + $type + ]; +} + +sub show_map +{ + my ($self, $file) = @_; + + $file = $self->findName($file); + return $self unless exists($self->[FILEMAP]->{$file}); + my $currfile = + defined($self->[FILEMAP]->{$file}) ? $self->[FILEMAP]->{$file} : + "[deleted]"; + print("In $file" . ($currfile ne $file ? " (was: $currfile)" : '') . ":\n"); + foreach my $chunk (@{$self->[LINEMAP]->{$file}}) { + _printChunk($chunk); + } + return $self; +} + +sub _read_udiff +{ + my $self = shift; + my $diff_file = shift; # Name of diff file + my $line; # Contents of current line + my $file_old; # Name of old file in diff section + my $file_new; # Name of new file in diff section + my $filename; # Name of common filename of diff section + + # Check if file exists and is readable + stat($diff_file); + if (!(-r _)) { + die("cannot read udiff file $diff_file!\n"); + } + + $self->[DIFF_FILENAME] = $diff_file; + + my $diffFile = InOutFile->in($diff_file); + my $diffHdl = $diffFile->hdl(); + + my $chunk; + my $old_block = 0; + my $new_block = 0; + # would like to use Regexp::Common::time - but module not installed + #my $time = $RE{time}{iso}; + my $time = + '[1-9]{1}[0-9]{3}\-[0-9]{2}\-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]*)?( .[0-9]+)?'; + # Parse diff file line by line + my $verbose = 0; + while (<$diffHdl>) { + chomp($_); + s/\r//g; + $line = $_; + + # the 'diff' new/old file name line may be followed by a timestamp + # If so, remove it so our regexp matches more easily. + # p4 and git diff outputs do not have the timestamp + if ($line =~ /^[-+=]{3} \S.*(\s+$time)$/) { + $line =~ s/\Q$1\E$//; + } + foreach ($line) { + /^Git Root: (.+)$/ && do { + $self->[DIFF_ROOT] = $1; + last; + }; + + # Filename of unchanged file: + # === + /^=== (.+)$/ && do # note: filename may contain whitespace + { + if ($filename) { + die("not case insensitive") + unless !$lcovutil::case_insensitive || + ($filename eq lc($filename)); + push(@{$self->[LINEMAP]->{$filename}}, $chunk); + undef $filename; + } + my $file = $1; + $file = lcovutil::strip_directories($file, $main::strip); + my $key = ReadCurrentSource::resolve_path($file, 1); + $key = lc($key) if $lcovutil::case_insensitive; + if (exists($self->[UNCHANGED]->{$key})) { + # unchanged entry flag value should be 1 + die("$diff_file:$.: $key already in linemap - marked unchanged" + . ($file eq $key ? '' : " (substituted '$file')")) + unless $self->[UNCHANGED]->{$key}; + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "$diff_file:$.: duplicate 'unchanged' entry for $key\n" + ); + last; + } + $verbose = (defined($main::verboseScopeRegexp) && + $key =~ m/$main::verboseScopeRegexp/); + print(" $key: unchanged\n") if $verbose; + # LCOV_EXCL_START + if (exists($self->[LINEMAP]->{$key})) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "$diff_file:$.: $key 'unchanged' entry found but diff records exist" + . ($file eq $key ? '' : " (substituted '$file')")); + # message ignored - delete the diff records + delete($self->[LINEMAP]->{$key}); + } + # LCOV_EXCL_STOP + $self->[UNCHANGED]->{$key} = 1; + last; + }; + + # Filename of old file: + # --- + /^--- (.+)$/ && do { + if ($filename) { + die("not case insensitive") + unless !$lcovutil::case_insensitive || + ($filename eq lc($filename)); + push(@{$self->[LINEMAP]->{$filename}}, $chunk); + undef $filename; + } + $file_old = $1; + $file_old = + lcovutil::strip_directories($file_old, $main::strip); + $file_old = ReadCurrentSource::resolve_path($file_old, 1); + $file_old = lc($file_old) if $lcovutil::case_insensitive; + $self->[LOCATION]->[OLD]->{$file_old} = $.; + last; + }; + # Filename of new file: + # +++ + /^\+\+\+ (.+)$/ && do { + # Add last file to resulting hash + $file_new = $1; + $file_new = + lcovutil::strip_directories($file_new, $main::strip); + my $key = ReadCurrentSource::resolve_path($file_new, 1); + $key = lc($key) + if $lcovutil::case_insensitive; + my $notNull = $file_new ne $lcovutil::devnull; + $filename = $notNull ? $key : undef; + + if ($filename) { + # LCOV_EXCL_START + if (exists($self->[UNCHANGED]->{$key})) { + lcovutil::ignorable_error( + $lcovutil::ERROR_INTERNAL, + "$diff_file:$.: $filename marked unchanged but diff record found" + . + ($file_new eq $key ? '' : + " (substituted '$file_new')")); + # OK - error ignored...remove the 'unchanged' marker + delete($self->[UNCHANGED]->{$key}); + } + # LCOV_EXCL_STOP + $self->[LINEMAP]->{$key} = []; + } + $verbose = (defined($main::verboseScopeRegexp) && + $key =~ m/$main::verboseScopeRegexp/); + print("file $key\n") if $verbose; + if ($notNull) { + $self->[FILEMAP]->{$key} = $file_old; + # keep track of location where this file was found + $self->[LOCATION]->[NEW]->{$key} = + $.; # record original name too.. $file_new + } + # new file - chunk starts here + $chunk = _newChunk('equal', 1, 1); + last; + }; + # Start of diff block: + # @@ -old_start,old_num, +new_start,new_num @@ + /^\@\@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+\@\@.*$/ && do { + if ($1 > ($chunk->[OLD]->[_END])) { + # old start skips "equal" lines + if ($chunk->[TYPE] ne "equal") { + if ($filename) { + push(@{$self->[LINEMAP]->{$filename}}, $chunk); + _printChunk($chunk) if ($verbose); + } + $chunk = + _newChunk('equal', + $chunk->[OLD]->[_END] + 1, + $chunk->[NEW]->[_END] + 1); + } + } + # "END" will be incremented on content lines + $chunk->[OLD]->[_END] = $1 - 1; + $chunk->[NEW]->[_END] = $3 - 1; + $old_block = $2; + $new_block = $4; + #printf "equal [%d:%d] [%d:%d]\n", $l[0], $l[1], $l[2], $l[3]; + last; + }; + # Unchanged line + # + /^ / && do { + if ($old_block == 0 && $new_block == 0) { + last; + } + if ($chunk->[TYPE] ne "equal") { + if ($filename) { + push(@{$self->[LINEMAP]->{$filename}}, $chunk); + _printChunk($chunk) if ($verbose); + } + # next chunk starts right after current one + $chunk = _newChunk('equal', + $chunk->[OLD]->[_END] + 1, + $chunk->[NEW]->[_END] + 1); + } else { + $chunk->[NEW]->[_END] += 1; + $chunk->[OLD]->[_END] += 1; + } + last; + }; + # Line as seen in old file + # + /^-(.*)$/ && do { + if ($old_block == 0 && $new_block == 0) { + last; + } + my $baseline_lineno = $chunk->[OLD]->[_END] + 1; + # really only need to keep track of baseline content if user + # is planning on doing any filtering + my $lines; + if (exists($self->[BASELINE]->{$file_old})) { + $lines = $self->[BASELINE]->{$file_old}; + } else { + $lines = {}; + $self->[BASELINE]->{$file_old} = $lines; + } + $lines->{$baseline_lineno} = $1; + + if ($chunk->[TYPE] ne "delete") { + if ($filename) { + push(@{$self->[LINEMAP]->{$filename}}, $chunk); + _printChunk($chunk) if ($verbose); + } + $chunk = _newChunk('delete', $baseline_lineno, + $chunk->[NEW]->[_END]); + } else { + $chunk->[OLD]->[_END] = $baseline_lineno; + } + last; + }; + # Line as seen in new file + # + /^\+/ && do { + if ($old_block == 0 && $new_block == 0) { + last; + } + if ($chunk->[TYPE] ne "insert") { + if ($filename) { + push(@{$self->[LINEMAP]->{$filename}}, $chunk); + _printChunk($chunk) if ($verbose); + } + $chunk = _newChunk('insert', + $chunk->[OLD]->[_END], + $chunk->[NEW]->[_END] + 1); + } else { + $chunk->[NEW]->[_END] += 1; + } + last; + }; + # Empty line + /^$/ && do { + if ($old_block == 0 && $new_block == 0) { + last; + } + if ($chunk->[TYPE] ne "equal") { + if ($filename) { + push(@{$self->[LINEMAP]->{$filename}}, $chunk); + _printChunk($chunk) if ($verbose); + } + $chunk = _newChunk('equal', + $chunk->[OLD]->[_END] + 1, + $chunk->[NEW]->[_END] + 1); + } else { + $chunk->[NEW]->[_END] += 1; + $chunk->[OLD]->[_END] += 1; + } + last; + }; + } + } + + # Add final diff file section to resulting hash + if ($filename) { + push(@{$self->[LINEMAP]->{$filename}}, $chunk); + _printChunk($chunk) if ($verbose); + } + + # default root + $self->[DIFF_ROOT] = $main::cwd unless defined($self->[DIFF_ROOT]); + + if ($self->empty()) { + # this is probably OK - there are no differences between 'baseline' and current. + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "'diff' data file $diff_file contains no differences (this may be OK, if there are no difference between 'baseline' and 'current').\n" + . "Make sure to use 'diff -u' when generating the diff file."); + } + return $self; +} + +package InInterestingRegion; + +use constant { + NEXT => 0, + PREV => 1, + STACK => 2, + ANNOTATE_OBJ => 3, + LINECOV_HASH => 4, + # non-code line in a contiguous region before or after the + # 'interesting' code lie - should also be included - + # keep track of the length of that region, above and below + LINES_BEFORE => 5, + LINES_AFTER => 6, + +}; + +our $num_context_lines = 5; + +sub new +{ + my ($class, $srcFileStruct, $lineCovHash) = @_; + + my $self = [undef, undef, + Storable::dclone($srcFileStruct->interesting_lines()), + $srcFileStruct, $lineCovHash + ]; + # we will call the $annotateObj->line(int) to collect annotate data + # for certain lines - to find out of a non-code line is in a + # contiguous region by the same author, in the same SHA or etc. + $self->[NEXT] = shift(@{$self->[STACK]}); + if (defined($self->[NEXT]) && $self->[NEXT] != 1) { + _computeContextBefore($self, 0); + } + return bless $self, $class; +} + +sub _computeContextBefore +{ + my ($self, $prev) = @_; + + my $count = 0; + my $data = $self->[LINECOV_HASH]; + my $annotate = $self->[ANNOTATE_OBJ]; + # find contiguous source region which matches the select criteria. + # That might include non-code lined (e.g., comments) + for (my $l = $self->[NEXT] - 1; $l > $prev; --$l) { + my $lineData = $data->{$l} if exists($data->{$l}); + my $annotateData = $annotate->line($l); + last + unless SummaryInfo::selected($lineData, $annotateData, + $self->[ANNOTATE_OBJ]->path(), $l); + ++$count; + } + # region of interest is the (possibly empty) contiguous region we found + # plus the number of context lines + $self->[LINES_BEFORE] = $num_context_lines + $count; +} + +sub interesting +{ + my ($self, $lineNum) = @_; + die("unexpected lineNum '$lineNum'") if $lineNum < 1; + if (defined($self->[NEXT])) { + if ($lineNum == $self->[NEXT]) { + $self->[PREV] = $self->[NEXT]; + $self->[NEXT] = shift(@{$self->[STACK]}); + # compute following context lines - starting from here + # and going to either end of file or next interesting line + my $max = defined($self->[NEXT]) ? $self->[NEXT] : + $self->[ANNOTATE_OBJ]->num_lines(); + my $count = 0; + my $data = $self->[LINECOV_HASH]; + my $annotate = $self->[ANNOTATE_OBJ]; + for (my $l = $self->[PREV] + 1; $l < $max; ++$l) { + my $lineData = $data->{$l} if exists($data->{$l}); + my $annotateData = $annotate->line($l); + last + if !SummaryInfo::selected($lineData, $annotateData, + $self->[ANNOTATE_OBJ]->path(), $l); + ++$count; + } + $self->[LINES_AFTER] = $num_context_lines + $count; + + # and how many lines before the next interesting one? + if (defined($self->[NEXT])) { + $self->_computeContextBefore($self->[PREV]); + } else { + $self->[LINES_BEFORE] = 0; + } + return 1; + } elsif ($lineNum >= $self->[NEXT] - $self->[LINES_BEFORE]) { + return 1; + } + } + if (defined($self->[PREV])) { + if ($lineNum <= $self->[PREV] + $self->[LINES_AFTER]) { + return 1; + } + $self->[PREV] = undef; + } + return 0; +} + +package SourceLine; + +use constant { + LINE => 0, + TEXT => 1, + OWNER => 2, + FULL_NAME => 3, + DATE => 4, + AGE => 5, + COMMIT => 6, + LINE_TLA => 7, + BRANCH_TLA => 8, + MCDC_TLA => 9, + FUNCTION_TLA => 10, +}; + +sub new +{ + my $class = shift; + # [lineNo, text, abbrev_name, full_name, date, age, commitID, lineTLA, branchTLA, + # MCDC_TLA, functionTLA] + my @data = @_; + my $self = \@data; + bless $self, $class; + return $self; +} + +sub to_list +{ + # used by 'select' callback - if not a package + my $self = shift; + if ($#$self >= OWNER) { + my @rtn = @{$self}[OWNER, FUNCTION_TLA]; + return \@rtn; + } + return undef; +} + +sub owner +{ + my $self = shift; + return $#$self >= OWNER ? $self->[OWNER] : undef; +} + +sub full_name +{ + my $self = shift; + return $#$self >= FULL_NAME ? $self->[FULL_NAME] : undef; +} + +# line coverage TLA +sub tla +{ + my ($self, $tla) = @_; + $self->[LINE_TLA] = $tla + if (defined($tla)); + return $#$self >= LINE_TLA ? $self->[LINE_TLA] : undef; +} + +sub branchElem +{ + my ($self, $branchElem) = @_; + $self->[BRANCH_TLA] = $branchElem + if defined($branchElem); + return $#$self >= BRANCH_TLA ? $self->[BRANCH_TLA] : undef; +} + +sub mcdcElem +{ + my ($self, $mcdcElem) = @_; + $self->[MCDC_TLA] = $mcdcElem + if defined($mcdcElem); + return $#$self >= MCDC_TLA ? $self->[MCDC_TLA] : undef; +} + +sub functionElem +{ + my ($self, $funcElem) = @_; + $self->[FUNCTION_TLA] = $funcElem + if defined($funcElem); + return $#$self >= FUNCTION_TLA ? $self->[FUNCTION_TLA] : undef; +} + +sub commit +{ + my $self = shift; + return $#$self >= COMMIT ? $self->[COMMIT] : undef; +} + +sub date +{ + my $self = shift; + return $#$self >= DATE ? $self->[DATE] : undef; +} + +sub age +{ + my $self = shift; + return $#$self >= AGE ? $self->[AGE] : undef; +} + +sub line +{ + my $self = shift; + return $self->[LINE]; +} + +sub text +{ + my $self = shift; + return $self->[TEXT]; +} + +package SourceFile; +our @annotateScript; +our $annotateCallback; +our $annotateTooltip = 'Line %l: commit %C on %d by %F'; +our $annotatedFiles = 0; +our $totalFiles = 0; + +use constant { + PATH => 0, + LINES => 1, + INTERESTING_LINES => 2, + LINE_OWNERS => 3, + LINE_CATEGORIES => 4, + BRANCH_OWNERS => 5, + BRANCH_CATEGORIES => 6, + MCDC_OWNERS => 5, + MCDC_CATEGORIES => 6, +}; + +sub new +{ + my ($class, $filepath, $fileSummary, $fileCovInfo, $hasNoBaselineData) = @_; + + (ref($fileSummary) eq 'SummaryInfo' && + ref($fileCovInfo) eq "FileCoverageInfo") or + die("unexpected input args"); + + my $self = [$filepath, + [], # lines + undef, # list of interesting lines + {}, # owner -> hash of TLS->list of lines + {}, # TLA -> list of lines + {} # owner -> hash of TLA->list of lines + ]; + bless $self, $class; + + $fileSummary->fileDetails($self); + + # use the line coverage count to synthesize a fake file, if we can't + # find an actual file + $self->_load($fileCovInfo); + + if ($hasNoBaselineData) { + my $fileAge = $self->age(); + if (defined($fileAge) && + (!defined($main::age_basefile) || + $fileAge > $main::age_basefile) + ) { + # go through the fileCov data and change UIC->UBC, GIC->CBC + # - pretend that we already saw this file data - this is the first + # coverage report which contains this data. + $fileCovInfo->recategorizeTlaAsBaseline(); + } + } + + # sort lines in ascending numerical order - we want the 'owner' + # and 'tla' line lists to be sorted - and it is probably faster to + # sort the file list once than to sort each of the sub-lists + # individually afterward. + # DCB, DUB category keys have leading "<<<" characters - which we strip + # in order to compare + my $currentTla; + my $regionStartLine; + my $lineCovData = $fileCovInfo->lineMap(); + + my @lineArray; # interesting line numbers + my $inRegion; + if (defined($selectCallback)) { + # This implementation looks for all the 'line' coverpoints - then + # expands the selected region around those points - e.g., by + # adding noncode lines which are in the selected changelist and + # surrounding the selected region with 'num_context_lines' of + # context. + # A side effect of this criteria is that disjoint noncode regions - + # e.g., comments or unused #ifdef code - will not be selected. + # A different implementation would go through the annotated source + # and mark all selected lines, then go through again to add + # context. + # It isn't clear which approach is best - but the current approach + # seems to match user expectations - so I'm going with it, at + # least for now. + # Not too hard to do - but significant overkill - to make the + # approach configurable. Let's see if there is user demand. + while (my ($line, $lne) = each(%$lineCovData)) { + # ignore deleted lines as they don't appear in source listing + my $tla = $lne->tla(); + # no annotations for deleted line + my $annotateData = $self->line($line) + unless grep(/$tla/, ('DUB', 'DCB')); + # callback arguments are (LineData, SourceLine) + if (SummaryInfo::selected($lne, $annotateData, + $self->path(), $line)) { + push(@lineArray, $line); + } + } + @lineArray = sort({ $a <=> $b } @lineArray); + $self->[INTERESTING_LINES] = \@lineArray; + $inRegion = InInterestingRegion->new($self, $lineCovData); + } + foreach my $line (sort({ + my $ka = + ("<" ne substr($a, 0, 1)) ? $a : + substr($a, 3); + my $kb = + ("<" ne substr($b, 0, 1)) ? $b : + substr($b, 3); + $ka <=> $kb + } keys(%{$lineCovData})) + ) { + # is this line within N of an interesting one? + my $lne = $lineCovData->{$line}; + my $tla = $lne->tla(); + # deleted line associated with first 'current' line above - + # or line below, if there is no current line above + my $lineNum = + grep(/$tla/, ('DUB', 'DCB')) ? -$lne->lineNo('current') : $line; + + if (defined($inRegion) && + !$inRegion->interesting($lineNum)) { + lcovutil::info(1, + " drop $line" . + ($line eq $lineNum ? '' : " ($lineNum)") . "\n"); + $fileSummary->removeLine($lne); + delete($lineCovData->{$line}); + next; + } + $self->_countLineTlaData($line, $lne, $fileSummary); + + $self->_countBranchTlaData($line, $lne, $fileSummary) + if ($lcovutil::br_coverage && defined($lne->differential_branch())); + + $self->_countMcdcTlaData($line, $lne, $fileSummary) + if ($lcovutil::mcdc_coverage && defined($lne->differential_mcdc())); + + $self->_countFunctionTlaData($line, $lne, $fileSummary) + if ($lcovutil::func_coverage && + defined($lne->differential_function())); + } + if (defined($main::show_dateBins) && + %$lineCovData && # haven't filtered out everything + !$self->isProjectFile() + ) { + lcovutil::info("no owner/date info for '$filepath'\n"); + } + return $self; +} + +sub simplify +{ + my $self = shift; + # retain only the information required to populate the hyperlinks in + # the parent 'directory details' table + # This struct can be huge, for a large, complicated file + $self->[INTERESTING_LINES] = undef; + + my %keep; + # keep the first line in each date bin.. + for (my $bin = 0; $bin <= $#ageGroupHeader; ++$bin) { + foreach my $tla (keys %{$self->[LINE_CATEGORIES]}) { + # at least for the moment, skip deleted lines + next if grep(/$tla/, ('DUB', 'DCB')); + my $line = $self->nextInDateBin($bin, $tla); + $keep{$line} = 1 if defined($line); + } + if ($lcovutil::br_coverage) { + foreach my $tla (keys %{$self->[BRANCH_CATEGORIES]}) { + next if grep(/$tla/, ('DUB', 'DCB')); + my $line = $self->nextBranchInDateBin($bin, $tla); + $keep{$line} = 1 if defined($line); + } + } + if ($lcovutil::mcdc_coverage) { + foreach my $tla (keys %{$self->[MCDC_CATEGORIES]}) { + next if grep(/$tla/, ('DUB', 'DCB')); + my $line = $self->nextMcdcInDateBin($bin, $tla); + $keep{$line} = 1 if defined($line); + } + } + } + + # retain first location of each TLA (globally) and each TLA for each owner + foreach my $bin (LINE_OWNERS, LINE_CATEGORIES, + BRANCH_OWNERS, BRANCH_CATEGORIES, + MCDC_OWNERS, MCDC_CATEGORIES + ) { + while (my ($key, $list) = each(%{$self->[$bin]})) { + if ('ARRAY' eq ref($list)) { + next if grep(/$key/, ('DUB', 'DCB')); + # retain the first entry and any interesting entries + $keep{$list->[0]} = 1; + @$list = grep(exists($keep{$_}), @$list); + } else { + die("unexpected type") unless 'HASH' eq ref($list); + while (my ($tla, $l) = each(%$list)) { + next if grep(/$tla/, ('DUB', 'DCB')); + # retain the first entry + $keep{$l->[0]} = 1; + @$l = grep(exists($keep{$_}), @$l); + } + } + } + } + # throw away everything we aren't interested in + for (my $l = $self->num_lines(); $l > 0; --$l) { + if (!exists($keep{$l})) { + $self->[LINES]->[$l - 1] = undef; + } + } +} + +sub _countBranchTlaData +{ + my ($self, $line, $lineData, $fileSummary) = @_; + my $differentialData = $lineData->differential_branch(); + + my %foundBranchTlas; + my ($src_age, $developer, $srcLine); + my $lineTla = $lineData->tla(); + $srcLine = $self->line($line); + if (!defined($srcLine)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no data for 'branch' line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, $self->path())); + } else { + $srcLine->branchElem($differentialData); + if (@SourceFile::annotateScript && + !grep(/^$lineTla$/, ('DUB', 'DCB'))) { + # deleted lines don't have owner data... + # if this line is not in the project (e.g., from some 3rd party + # library - then we might not have file history for it. + $src_age = $srcLine->age(); + $developer = $srcLine->owner(); + + if (defined($developer)) { + my $shash = $self->[BRANCH_OWNERS]; + if (!exists($shash->{$developer})) { + $shash->{$developer} = {}; + $shash->{$developer}->{lines} = []; + } + push(@{$shash->{$developer}->{lines}}, $line); + } + } + } + + my %recorded; + foreach my $branchId ($differentialData->blocks()) { + my $diff = $differentialData->getBlock($branchId); + foreach my $b (@$diff) { + my $tla = $b->[1]; + # LCOV_EXCL_START + unless (defined($tla)) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "undef TLA for branch $branchId of " . $self->path() . + ":$line - lineTLA:$lineTla taken:" . $b->[0]); + next; + } + # LCOV_EXCL_STOP + $fileSummary->[SummaryInfo::BRANCH_DATA]->[SummaryInfo::DATA] + ->{$tla} += 1; + # keep track of all the branch TLAs found on this line... + if (!exists($foundBranchTlas{$tla})) { + $foundBranchTlas{$tla} = 1; + $self->[BRANCH_CATEGORIES]->{$tla} = [] + unless exists($self->[BRANCH_CATEGORIES]->{$tla}); + } + push(@{$self->[BRANCH_CATEGORIES]->{$tla}}, $line); + + next + if (0 == ($SummaryInfo::tlaLocation{$tla} & 0x1)); + # skip "DUB' and 'DCB' categories - which are not in current + # and thus have no line associated + + # and the age... + #lcovutil::info("$l: $tla" . $lineData->in_curr() . "\n"); + + unless (defined($srcLine) && defined($src_age)) { + # just count totals + $fileSummary->branchCovCount($tla, 'noGroup', undef, 1); + next; + } + + # increment count of branches of this age we found for this TLA + $fileSummary->branchCovCount($tla, "age", $src_age, 1); + + # HGC: could clean this up...no need to keep track + # of 'hit' as we can just compute from CBC + GNC + ... + # found another line... + my $hit = grep(/$tla/, ('GBC', 'GIC', 'GNC', 'CBC')); + $fileSummary->branchCovCount("found", "age", $src_age, 1); + $fileSummary->branchCovCount("hit", "age", $src_age, 1) + if $hit; + + next + unless defined($developer); + + # add this line to those that belong to this owner.. + + # first: increment line count in 'file summary' + my $ohash = + $fileSummary->[SummaryInfo::BRANCH_DATA]->[SummaryInfo::OWNERS]; + if (!exists($ohash->{$developer})) { + my %data = ('hit' => $hit ? 1 : 0, + 'found' => 1, + $tla => 1); + $ohash->{$developer} = \%data; + } else { + my $d = $ohash->{$developer}; + $d->{$tla} = 0 + unless exists($d->{$tla}); + $d->{$tla} += 1; + $d->{found} += 1; + $d->{hit} += 1 + if $hit; + } + + # now append this branchTLA to the owner... + my $ownerKey = $developer . $tla; + if (!exists($recorded{$ownerKey})) { + $recorded{$ownerKey} = 1; + my $dhash = $self->[BRANCH_OWNERS]->{$developer}; + $dhash->{$tla} = [] + unless exists($dhash->{$tla}); + push(@{$dhash->{$tla}}, $line); + } + } + } +} + +sub _countMcdcTlaData +{ + my ($self, $line, $lineData, $fileSummary) = @_; + my $differentialData = $lineData->differential_mcdc(); + + my %foundMcdcTlas; + my ($src_age, $developer, $srcLine); + my $lineTla = $lineData->tla(); + $srcLine = $self->line($line); + if (!defined($srcLine)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no data for 'MC/DC' line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, $self->path())); + } else { + $srcLine->mcdcElem($differentialData); + if (@SourceFile::annotateScript && + !grep(/^$lineTla$/, ('DUB', 'DCB'))) { + # deleted lines don't have owner data... + # if this line is not in the project (e.g., from some 3rd party + # library - then we might not have file history for it. + $src_age = $srcLine->age(); + $developer = $srcLine->owner(); + + if (defined($developer)) { + my $shash = $self->[BRANCH_OWNERS]; + if (!exists($shash->{$developer})) { + $shash->{$developer} = {}; + $shash->{$developer}->{lines} = []; + } + push(@{$shash->{$developer}->{lines}}, $line); + } + } + } + + my %recorded; + while (my ($groupSize, $group) = each(%{$differentialData->groups()})) { + foreach my $expr (@$group) { + foreach my $sense (0, 1) { + my $count = $expr->count($sense); + my $tla = $count->[0]; + # LCOV_EXCL_START + unless (defined($tla)) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "undef TLA for sense $sense of MC/DC " . + $expr->index() . + ' of ' . $self->path() . + ":$line - lineTLA:$lineTla taken:" . + $count->[1]); + next; + } + # LCOV_EXCL_STOP + $fileSummary->[SummaryInfo::MCDC_DATA]->[SummaryInfo::DATA] + ->{$tla} += 1; + # keep track of all the branch TLAs found on this line... + if (!exists($foundMcdcTlas{$tla})) { + $foundMcdcTlas{$tla} = 1; + $self->[MCDC_CATEGORIES]->{$tla} = [] + unless exists($self->[MCDC_CATEGORIES]->{$tla}); + } + push(@{$self->[MCDC_CATEGORIES]->{$tla}}, $line); + + next + if (0 == ($SummaryInfo::tlaLocation{$tla} & 0x1)); + # skip "DUB' and 'DCB' categories - which are not in current + # and thus have no line associated + + # and the age... + #lcovutil::info("$l: $tla" . $lineData->in_curr() . "\n"); + + unless (defined($srcLine) && defined($src_age)) { + # just count totals + $fileSummary->mcdcCovCount($tla, 'noGroup', undef, 1); + next; + } + + # increment count of branches of this age we found for this TLA + $fileSummary->mcdcCovCount($tla, "age", $src_age, 1); + + my $hit = grep(/$tla/, ('GBC', 'GIC', 'GNC', 'CBC')); + $fileSummary->mcdcCovCount("found", "age", $src_age, 1); + $fileSummary->mcdcCovCount("hit", "age", $src_age, 1) + if $hit; + + next + unless defined($developer); + + # add this line to those that belong to this owner.. + + # first: increment line count in 'file summary' + my $ohash = + $fileSummary->[SummaryInfo::MCDC_DATA] + ->[SummaryInfo::OWNERS]; + if (!exists($ohash->{$developer})) { + my %data = ('hit' => $hit ? 1 : 0, + 'found' => 1, + $tla => 1); + $ohash->{$developer} = \%data; + } else { + my $d = $ohash->{$developer}; + $d->{$tla} = 0 + unless exists($d->{$tla}); + $d->{$tla} += 1; + $d->{found} += 1; + $d->{hit} += 1 + if $hit; + } + + # now append this branchTLA to the owner... + my $ownerKey = $developer . $tla; + if (!exists($recorded{$ownerKey})) { + $recorded{$ownerKey} = 1; + my $dhash = $self->[MCDC_OWNERS]->{$developer}; + $dhash->{$tla} = [] + unless exists($dhash->{$tla}); + push(@{$dhash->{$tla}}, $line); + } + } + } + } +} + +sub _countLineTlaData +{ + my ($self, $line, $lineData, $fileSummary) = @_; + # there is differential line coverage data... + my $tla = $lineData->tla(); + + if (!exists($SummaryInfo::tlaLocation{$tla})) { + # this case can happen if the line number annotations are + # wrong in the .info file - so the first line of some function + # or some branch coverage line number turns out not to be an + # executable source code line + lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY, + "unexpected category $tla for line " . $self->path() . ":$line"); + return; + } + # one more line in this bucket... + $fileSummary->[SummaryInfo::LINE_DATA]->[SummaryInfo::DATA]->{$tla} += 1; + # create the category list, if necessary + $self->[LINE_CATEGORIES]->{$tla} = [] + unless exists($self->[LINE_CATEGORIES]->{$tla}); + + push(@{$self->[LINE_CATEGORIES]->{$tla}}, $line); + + # and the age data... + + if ($SummaryInfo::tlaLocation{$tla} & 0x1) { + # skip "DUB' and 'DCB' categories - which are not in current + # and thus have no line associated + + #lcovutil::info("$l: $tla" . $lineData->in_curr() . "\n"); + + my $l = $self->line($line); + + if (!defined($l)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no data for line:$line, TLA:$tla, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, + $self->path())); + return; + } + # set the TLA of this line... + $l->tla($tla); + + my $src_age = $l->age(); + # if this line is not in the project (e.g., from some 3rd party + # library - then we might not have file history for it. + return + unless defined($src_age); + + # increment count of lines of this age we found for this TLA + $fileSummary->lineCovCount($tla, "age", $src_age, 1); + + if ($lineData->in_curr()) { + # HGC: could clean this up...no need to keep track + # of 'hit' as we can just compute from CBC + GNC + ... + # found another line... + $fileSummary->lineCovCount("found", "age", $src_age, 1); + if ($lineData->curr_count() > 0) { + $fileSummary->lineCovCount("hit", "age", $src_age, 1); + } + } + + if (defined($l->owner())) { + # add this line to those that belong to this owner.. + my $developer = $l->owner(); + + # first: increment line count in 'file summary' + my $ohash = + $fileSummary->[SummaryInfo::LINE_DATA]->[SummaryInfo::OWNERS]; + $ohash->{$developer} = {} + unless exists($ohash->{$developer}); + my $d = $ohash->{$developer}; + $d->{$tla} = 0 + unless exists($d->{$tla}); + $d->{$tla} += 1; + + # now push this line onto the list of in this file, belonging + # to this owner + $self->[LINE_OWNERS]->{$developer} = {} + unless exists($self->[LINE_OWNERS]->{$developer}); + my $dhash = $self->[LINE_OWNERS]->{$developer}; + $dhash->{lines} = [] + unless exists($dhash->{lines}); + push(@{$dhash->{lines}}, $line); + $dhash->{$tla} = [] + unless exists($dhash->{$tla}); + # and the list of lines with this TLA, belonging to this user + push(@{$dhash->{$tla}}, $line); + } + } +} + +sub _accountFunction +{ + my ($fileSummary, $tla, $src_age) = @_; + + # LCOV_EXCL_START + unless (defined($tla)) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "undef function TLA for age '$src_age' of " . + $fileSummary->name()); + return 1; # error + } + # LCOV_EXCL_STOP + + $fileSummary->[SummaryInfo::FUNCTION_DATA]->[SummaryInfo::DATA]->{$tla} += + 1; + + if (defined($src_age)) { + $fileSummary->functionCovCount($tla, 'age', $src_age, 1); + + my $hit = grep(/$tla/, ('GBC', 'GIC', 'GNC', 'CBC')); + $fileSummary->functionCovCount("found", "age", $src_age, 1); + $fileSummary->functionCovCount("hit", "age", $src_age, 1) + if $hit; + } + return 0; +} + +sub _countFunctionTlaData +{ + my ($self, $line, $lineData, $fileSummary) = @_; + my $func = $lineData->differential_function(); + + my %foundFunctionTlas; + my ($src_age, $developer, $srcLine); + + my $h = $func->hit(); + my $mergedTla = $h->[1]; + # LCOV_EXCL_START + if (!defined($mergedTla)) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "undef TLA for function '" . $func->name() . + "' hit " . $h->[0] . " at line " . + $line . " (" . $lineData->tla() . + ' ' . $lineData->curr_count() . ")"); + #die("this should not happen"); + # This is new code - somehow miscategorized. + $mergedTla = $h->[0] == 0 ? 'UNC' : 'GNC'; + $h->[1] = $mergedTla; + #return; + } + # LCOV_EXCL_STOP + + if (!grep(/^$mergedTla$/, ('DUB', 'DCB'))) { + # deleted lines don't have owner data... + $srcLine = $self->line($line); + # info might not be available, if no annotations + if (defined($srcLine)) { + # should probably look at the source code to find the open/close + # parens - then claim the age is the youngest line + $src_age = $srcLine->age(); + $srcLine->functionElem($func); + } + } + + if ($main::merge_function_aliases) { + _accountFunction($fileSummary, $mergedTla, $src_age); + } else { + my $aliases = $func->aliases(); + foreach my $alias (keys %$aliases) { + my $data = $aliases->{$alias}; + my $tla = $data->[1]; + # LCOV_EXCL_START + if (!defined($tla)) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "undef TLA for alias:'$alias' hit:" . + $data->[0] . " of function '" . $func->name() . + "' hit " . $h->[0] . " at line " . $line . " (" . + $lineData->tla() . ' ' . $lineData->curr_count() . ")"); + # die("this should not happen either"); + $tla = $data->[0] == 0 ? 'UNC' : 'GNC'; + $data->[1] = $tla; + } + # LCOV_EXCL_STOP + _accountFunction($fileSummary, $tla, $src_age); + } + } +} + +sub path +{ + my $self = shift; + return $self->[PATH]; +} + +sub isProjectFile +{ + # return 'true' if no owner/date information for this file. + my $self = shift; + return scalar(%{$self->[LINE_OWNERS]}); +} + +sub line +{ + my $self = shift; + my $i = shift; + die("bad line index '$i'") + unless ($i =~ /^[0-9]+$/); + return $self->[LINES]->[$i - 1]; +} + +# how old is the oldest (or youngest) line in this file? +sub age +{ + my ($self, $youngest) = @_; + return undef unless $self->isProjectFile(); + + my $age = $self->line(1)->age(); + foreach my $line (@{$self->lines()}) { + my $a = $line->age(); + if (defined($youngest)) { + $age = $a + if ($a < $age); + } else { + $age = $a + if ($a > $age); + } + } + return $age; +} + +sub lines +{ + my $self = shift; + return $self->[LINES]; +} + +sub num_lines +{ + my $self = shift; + return scalar(@{$self->[LINES]}); +} + +sub is_empty +{ + my $self = shift; + # anything interesting here? + return defined($self->interesting_lines()) && + 0 == @{$self->interesting_lines()}; +} + +sub interesting_lines +{ + my $self = shift; + # return list of lines containing coverpoints, which are marked by + # the $selectionCallback - see the --select-script parameter + return $self->[INTERESTING_LINES]; +} + +sub binarySearchLine +{ + my ($list, $after) = @_; + + defined($list) && 0 != scalar(@$list) or + die("invalid location list"); + + my $max = $#$list; + my $min = 0; + my $mid; + while (1) { + $mid = int(($max + $min) / 2); + my $v = $list->[$mid]; + if ($v > $after) { + $max = $mid; + } elsif ($v < $after) { + $min = $mid; + } else { + return $mid; + } + my $diff = $max - $min; + if ($diff <= 1) { + $mid = $min; + $mid = $max + if $list->[$min] < $after; + last; + } + } + return $list->[$mid] >= $after ? $mid : undef; +} + +sub nextTlaGroup +{ + # return number of first line of next group of specified linecov TLA - + # for example, if line [5:8] and [13:17] are 'CNC', then: + # 5 = nextTlaGroup('CBC') : "after == undef" + # 13 = nextTlaGroup('CBC', 5) : skip contiguous group of CBC lines + # undef = nexTlaGroup('CBC', 13) : no following group + my ($self, $tla, $after) = @_; + if (!exists($SummaryInfo::tlaLocation{$tla})) { + lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY, + "unknown linecov TLA '$tla'"); + return undef; + } + + # note the the "$self->line(...)" argument is 1-based (not zero-base) + my $line; + if (defined($after) && + defined($self->line($after)->tla())) { + die("$after is not in $tla group") + unless ($self->line($after)->tla() eq $tla); + # skip to the end of the current section... + # if the TLA of this group is unset (non-code line: comment, + # blank, etc) - then look for next code line. If that line's + # TLA matches, then treat as a contiguous group. + # This way, we avoid visual clutter from having a lot of single-line + # TLA segments. + my $lastline = scalar(@{$self->[LINES]}); + for ($line = $after + 1; $line <= $lastline; ++$line) { + my $t = $self->line($line)->tla(); + last + if (defined($t) && + $t ne $tla); + } + } else { + $line = 1; + } + my $locations = $self->[LINE_CATEGORIES]->{$tla}; + my $idx = binarySearchLine($locations, $line); + return defined($idx) ? $locations->[$idx] : undef; +} + +sub nextCategoryTlaGroup +{ + # return number of first line of next group of specified branchcov TLA - + # note that all branch lines are independent - so we will + # report and go the next branch, even if it is on the adjacent line + my ($type, $category, $self, $tla, $after) = @_; + if (!exists($SummaryInfo::tlaLocation{$tla})) { + lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY, + "unknown $type TLA '$tla'"); + return undef; + } + die("no $type data for TLA '$tla'") + unless exists($self->[$category]->{$tla}); + + my $locations = $self->[$category]->{$tla}; + + $after = defined($after) ? $after + 1 : 1; + my $idx = binarySearchLine($locations, $after); + return defined($idx) ? $locations->[$idx] : undef; +} + +sub nextBranchTlaGroup +{ + # ($self, $tla, $after_line) + return nextCategoryTlaGroup('branch', BRANCH_CATEGORIES, @_); +} + +sub nextMcdcTlaGroup +{ + return nextCategoryTlaGroup('MC/DC', MCDC_CATEGORIES, @_); +} + +sub nextInDateBin +{ + my ($self, $dateBin, $tla, $after) = @_; + + if (!exists($SummaryInfo::tlaLocation{$tla})) { + lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY, + "unknown linecov TLA '$tla'"); + return undef; + } + $dateBin <= $#SummaryInfo::ageGroupHeader or + die("unexpected age group $dateBin"); + + # note the the "$self->line(...)" argument is 1-based (not zero-base) + my $line; + if (defined($after)) { + + ($self->line($after)->tla() eq $tla) or + die("$after is not in $tla group"); + + my $lastline = scalar(@{$self->[LINES]}); + # skip to the end of the current section... + for ($line = $after + 1; $line <= $lastline; ++$line) { + my $t = $self->line($line)->tla(); + my $a = $self->line($line)->age(); + if (!defined($a)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no age data for line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, + $self->path())); + return undef; + } + last + if (defined($t) && + ($t ne $tla || + $dateBin != SummaryInfo::findAgeBin($a))); + } + } else { + $line = 1; + } + # the data isn't stored by date bin (at least for now) - so the + # only way to find it currently is by searching forward. + my $tlaLocations = $self->[LINE_CATEGORIES]->{$tla}; + my $idx = binarySearchLine($tlaLocations, $line); + return undef unless defined($idx); + my $max = scalar(@$tlaLocations); + for (; $idx < $max; ++$idx) { + $line = $tlaLocations->[$idx]; + my $l = $self->line($line); + if (!defined($l)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no data for line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, + $self->path())); + return undef; + } + my $a = $l->age(); + if (!defined($a)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no age data for line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, + $self->path())); + return undef; + } + my $bin = SummaryInfo::findAgeBin($a); + + if ($bin == $dateBin) { + my $t = $l->tla(); + return $line + if (defined($t) && + $t eq $tla); + } + } + return undef; +} + +sub nextInOwnerBin +{ + my ($self, $owner, $tla, $after) = @_; + + if (!exists($SummaryInfo::tlaLocation{$tla})) { + lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY, + "unknown linecov TLA '$tla'"); + return undef; + } + exists($self->[LINE_OWNERS]->{$owner}) && + exists($self->[LINE_OWNERS]->{$owner}->{$tla}) or + die("$owner not responsible for any $tla lines in" . $self->path()); + + # note the the "$self->line(...)" argument is 1-based (not zero-base) + my $line; + if (defined($after)) { + + ($self->line($after)->tla() eq $tla) or + die("$after is not in $tla group"); + + my $lastline = scalar(@{$self->[LINES]}); + # skip to the end of the current section... + for ($line = $after + 1; $line <= $lastline; ++$line) { + my $l = $self->line($line); + if (!defined($l)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no data for line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, + $self->path())); + return undef; + } + my $t = $l->tla(); + my $o = $l->owner(); + if (!defined($o)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no owber data for line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, + $self->path())); + return undef; + } + last + if (defined($t) && + ($t ne $tla || + $o ne $owner)); + } + } else { + $line = 1; + } + + my $locations = $self->[LINE_OWNERS]->{$owner}->{$tla}; + my $idx = binarySearchLine($locations, $line); + return defined($idx) ? $locations->[$idx] : undef; +} + +sub nextCategoryInDateBin +{ + my ($type, $category, $self, $dateBin, $tla, $after) = @_; + + if (!exists($SummaryInfo::tlaLocation{$tla})) { + lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY, + "unknown $type TLA '$tla'"); + return undef; + } + $dateBin <= $#SummaryInfo::ageGroupHeader or + die("unexpected age group $dateBin"); + + # note the the "$self->line(...)" argument is 1-based (not zero-base) + $after = defined($after) ? $after + 1 : 1; + + exists($self->[$category]->{$tla}) or + die("no $tla ${type}es in " . $self->path()); + + my $lines = $self->[$category]->{$tla}; + my $idx = binarySearchLine($lines, $after); + + return undef unless defined($idx); + my $max = scalar(@$lines); + for (; $idx < $max; ++$idx) { + my $line = $lines->[$idx]; + my $l = $self->line($line); + if (!defined($l)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no data for line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, + $self->path())); + next; + } + my $a = $l->age(); + if (!defined($a)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNMAPPED_LINE, + "no age data for line:$line, file:" . $self->path()) + if (!$lcovutil::warn_once_per_file || + lcovutil::warn_once($lcovutil::ERROR_UNMAPPED_LINE, + $self->path())); + return undef; + } + my $bin = SummaryInfo::findAgeBin($a); + if ($bin == $dateBin) { + return $line; + } + } + return undef; +} + +sub nextBranchInDateBin +{ + # ($self, $dateBin, $tla, $after) = @_; + return nextCategoryInDateBin('branch', BRANCH_CATEGORIES, @_); +} + +sub nextMcdcInDateBin +{ + return nextCategoryInDateBin('MC/DC', MCDC_CATEGORIES, @_); +} + +sub nextCategoryInOwnerBin +{ + my ($type, $category, $self, $owner, $tla, $after) = @_; + + if (!exists($SummaryInfo::tlaLocation{$tla})) { + lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY, + "unknown $type TLA '$tla'"); + return undef; + } + + # note the the "$self->line(...)" argument is 1-based (not zero-base) + $after = defined($after) ? $after + 1 : 1; + + if (exists($self->[$category]->{$owner})) { + my $od = $self->[$category]->{$owner}; + if (exists($od->{$tla})) { + my $l = $od->{$tla}; + my $idx = binarySearchLine($l, $after); + return defined($idx) ? $l->[$idx] : undef; + } + } + return undef; +} + +sub nextBranchInOwnerBin +{ + # ($self, $owner, $tla, $after) = @_; + return nextCategoryInOwnerBin('branch', BRANCH_OWNERS, @_); +} + +sub nextMcdcInOwnerBin +{ + # ($self, $owner, $tla, $after) = @_; + return nextCategoryInOwnerBin('MC/DC', MCDC_OWNERS, @_); +} + +sub _computeAge +{ + my ($when, $path) = @_; + + # if SOURCE_DATE_EPOCH is set, then use that as 'now': age of this + # file is as of the epoch date. + # if this file is newer than that ($when is in the future of epoch) - then + # warn and put this file into the 'zero age' bin. + my $now = + exists($ENV{SOURCE_DATE_EPOCH}) ? + DateTime->from_epoch(epoch => $ENV{SOURCE_DATE_EPOCH}) : + DateTime->now(); + my $then = lcovutil::parse_w3cdtf($when); + if ($then > $now) { + if (lcovutil::warn_once($lcovutil::ERROR_INCONSISTENT_DATA, $path)) { + # issue annotation warning at most once per file + # also attempt to clarify where the date comes from + my $data = + exists($ENV{SOURCE_DATE_EPOCH}) ? + ( + lcovutil::warn_once($lcovutil::ERROR_INCONSISTENT_DATA, + 'SOURCE_DATE_EPOCH') ? + "computed from your 'SOURCE_DATE_EPOCH=$ENV{SOURCE_DATE_EPOCH}' environment variable - see 'man genhtml'" + : + "'SOURCE_DATE_EPOCH=$ENV{SOURCE_DATE_EPOCH}'") : + "'now'"; + + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "File \"$path\": '$now' ($data) is older than annotate time '$when'" + ); + } + return 0; + } + return $then->delta_days($now)->in_units('days'); +} + +sub _load +{ + my ($self, $fileCovInfo) = @_; + + my $start = Time::HiRes::gettimeofday(); + ++$totalFiles; + + my $path = ReadCurrentSource::resolve_path($self->path()); + # try to simplify path - 'realpath' will fail if the file does + # not exist - but that might be fine if it exists in the repo + my $repo_path = Cwd::realpath($path); + if (!defined($repo_path)) { + if (!defined($annotateCallback) && + !$main::synthesizeMissingFile) { + my $suffix = + lcovutil::explain_once('synthesize', + ' (see --synthesize-missing option to work around)'); + lcovutil::ignorable_error($lcovutil::ERROR_SOURCE, + "\"" . $self->path() . "\" does not exist: $!" . $suffix); + } + $repo_path = $self->path(); + } + + # check for version mismatch... + if (@lcovutil::extractVersionScript) { + my $currentVersion = $fileCovInfo->version('current'); + my $version = lcovutil::extractFileVersion($repo_path); + if (defined($version) && + '' ne $version) { + if (defined($currentVersion)) { + lcovutil::checkVersionMatch($repo_path, $version, + $currentVersion, 'load'); + } else { + my $suffix = lcovutil::explain_once('missingVersionMsg', + "\n\tSee the 'compute_file_version' section in man lcovrc(5)." + ); + lcovutil::ignorable_error($lcovutil::ERROR_VERSION, + "'$repo_path': computed '$version' but version not defined in 'current' data file " + . $main::info_filenames[0] + . $suffix); + } + } elsif (defined($currentVersion)) { + lcovutil::ignorable_error($lcovutil::ERROR_VERSION, + "'$repo_path': version mismatch: your '--version-script' returned empty but 'current' data in " + . $main::info_filenames[0] + . " defines '$currentVersion'"); + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{check_version}{$self->path()} = $end - $start; + } + + if (defined($annotateCallback) && + # also skip if we already emitted 'missing file' error + (defined($lcovutil::versionCallback) || + !lcovutil::fileExistenceBeforeCallbackError($repo_path)) + ) { + my $begin = Time::HiRes::gettimeofday(); + my $lineNum = 0; + + my ($status, $lines); + eval { ($status, $lines) = $annotateCallback->annotate($repo_path); }; + if ($@) { + my $context = MessageContext::context(); + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + 'annotate(' . $self->path() . ") failed$context: $@"); + $status = 1; + # set $lines so exit status error below prints something + $lines = [[$@]]; + } + if (!$status && defined($lines)) { + my $found; # check that either all lines are annotated or none are + foreach my $line (@$lines) { + + my ($text, $abbrev, $full, $when, $commit) = @$line; + ++$lineNum; + + my $age = _computeAge($when, $path); + if ($commit ne 'NONE') { + die("inconsistent 'annotate' data for '$repo_path': both 'commit' and 'no commit' lines" + ) if (defined($found) && !$found); + $found = 1; + + defined($abbrev) or + die("owner is undef for $repo_path:$lineNum"); + } else { + die("inconsistent 'annotate' data for '$repo_path': both 'no commit' and 'commit' lines" + ) if (defined($found) && $found); + $found = 0; + $abbrev = "no.body"; + } + $full = $abbrev unless defined($full); + push @{$self->[LINES]}, + SourceLine->new($lineNum, $text, $abbrev, $full, + $when, $age, $commit); + } + + my $end = Time::HiRes::gettimeofday(); + + $lcovutil::profileData{annotate}{$self->path()} = $end - $begin; + + ++$annotatedFiles if $found; + $self->_synthesize($fileCovInfo, 1); # fake annotations too + return $self; + } else { + + # non-zero exit status: something bad happened in annotation + # if we ignore the error - then fall through and just try to load the file + my $text = ''; + $text = ': ' . $lines->[0]->[0] . '...' + if $lines && @$lines; + # might be useful to provide more than one line of context - if there is more than one line? + + lcovutil::report_exit_status($lcovutil::ERROR_ANNOTATE_SCRIPT, + "annotate command failed", + $status, '', $text); + } # end if error + } # end if annotate script exists + + # Check if file exists and is readable + my $begin = Time::HiRes::gettimeofday(); + if (!-r $path) { + # perhaps this error should be ignorable: if the source is not + # found and the 'source' error is ignored, then synthesize + # Note that we can't extract version data from synthesized code. + if ($main::synthesizeMissingFile || + # don't complain if we weren't going to do anything with the file - + # not generating source view and not checking versions or annotating + ($main::no_sourceview && + 0 == scalar(@lcovutil::extractVersionScript) && + !lcovutil::is_filter_enabled()) + ) { + $self->_synthesize($fileCovInfo, + defined($SourceFile::annotateCallback)); + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{synth}{$self->path()} = $end - $begin; + return $self; + } + die($self->path() . + " is not readable or doesn't exist. See the '--substitute', '--filter missing' and '--synthesize-missing' $lcovutil::tool_name options for methods to fix the path or ignore the problem." + ); + } + $self->_bare_load($path); + $self->_synthesize($fileCovInfo, defined($SourceFile::annotateCallback)); + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{load}{$self->path()} = $end - $begin; + return $self; +} + +sub _synthesize +{ + my ($self, $fileCovInfo, $annotate) = @_; + + my $lineData = $fileCovInfo->lineMap(); + my $currentLast = scalar(@{$self->[LINES]}); + my $last_line = 0; + while (my ($l, $d) = each(%$lineData)) { + $last_line = $l if ('<' ne substr($l, 0, 1) && $l > $last_line); + } + my %functionDecls; + if ($lcovutil::func_coverage) { + while (my ($fnName, $funcEntry) = each(%{$fileCovInfo->functionMap()})) + { + my $tla = $funcEntry->hit()->[1]; + # deleted function doesn't count + next if grep(/^$tla$/, ('DUB', 'DCB')); + my $line = $funcEntry->line(); + my $end = $funcEntry->end_line(); + $last_line = $line if $line > $last_line; + $functionDecls{$line} = "/* BEGIN: function \"$fnName\" */" + if ($line >= $currentLast); + if (defined($end)) { + $functionDecls{$end} = "/* END: function \"$fnName\" */" + if ($end >= $currentLast); + if ($end > $last_line) { + # function end is not an executable line, + # but is after last executable line + $last_line = $end; + } + } + } + } + return $self if ($last_line < 1 || + $currentLast >= $last_line); + my $why; + if (0 == $currentLast) { + # Synthesize source data from coverage trace to replace unreadable file + my $suffix = + $lcovutil::ignore[$lcovutil::ERROR_SOURCE] ? + ' - synthesizing fake content.' : + '.'; + lcovutil::ignorable_error($lcovutil::ERROR_SOURCE, + "cannot read '" . $self->path() . + "' - file is empty or unreadable$suffix") + unless $main::no_sourceview; + $why = 'empty or unreadable'; + } else { + my $suffix = ''; + if ($lcovutil::ignore[$lcovutil::ERROR_RANGE]) { + my $num = $last_line - $currentLast; + my $plural = $num == 1 ? 'line' : "$num lines"; + $suffix = ' ... synthesizing fake content for last ' . $plural; + } + lcovutil::ignorable_error($lcovutil::ERROR_RANGE, + $self->path() . ' contains only ' . $currentLast . + ' lines but coverage data refers to line ' . + $last_line . $suffix); + $why = 'not long enough'; + } + # Simulate gcov behavior + my $notFound = "/* " . $self->path() . " $why */"; + my $synth = "/* (content generated from coverage data) */"; + my $idx = 1; + my @fakeline = (undef, # line number + undef); # source text + if ($annotate) { + my $now = + exists($ENV{SOURCE_DATE_EPOCH}) ? + DateTime->from_epoch(epoch => $ENV{SOURCE_DATE_EPOCH}) : + DateTime->now(); + push(@fakeline, 's.n.thetic', # owner + 'faked synthetic user', # full name + $now, # when + _computeAge($now, $self->path()), # age + 'synthesized'); # commit ID + } + for (my $line = $currentLast + 1; $line <= $last_line; $line++) { + my $mod = $idx++ % 20; + my $text; + # if there is function decl here...mark it. + if (exists($functionDecls{$line})) { + $text = $functionDecls{$line}; + } else { + $text = (($mod == 1) ? $notFound : + ($mod == 2) ? $synth : + "/* ... */"); + } + splice(@fakeline, 0, 2, $line, $text); + push(@{$self->[LINES]}, SourceLine->new(@fakeline)); + } + return $self; +} + +sub _bare_load +{ + my ($self, $path) = @_; + + my $lineno = 0; + local *HANDLE; # File handle for reading the diff file + + open(HANDLE, "<", $path) or + die("unable to open '" . $self->path() . "': $!\n"); + while (my $line = ) { + chomp $line; + $line =~ s/\r//g; # Also remove CR from line-end + + $lineno++; + push @{$self->[LINES]}, SourceLine->new($lineno, $line); + } + close(HANDLE) or die("unable to close $path: $!\n"); + return $self; +} + +# a class to hold either line or branch counts for each testcase - +# used by the "--show-details" feature. +package TestcaseTlaCount; + +sub new +{ + # $testcaseCounts: either 'CountData' or 'BranchData' structure + # - the data for this testcase + # $fileDetails: SourceFile structure - data for the filename + # $covtype: 'line' or 'branch' + my ($class, $testcaseCounts, $fileDetails, $covtype) = @_; + + my %tlaData; + if ($covtype != SummaryInfo::FUNCTION_DATA) { + $tlaData{found} = $testcaseCounts->found(); + $tlaData{hit} = $testcaseCounts->hit(); + } else { + $tlaData{found} = + $testcaseCounts->numFunc($main::merge_function_aliases); + $tlaData{hit} = $testcaseCounts->numHit($main::merge_function_aliases); + } + if ($main::show_tla && + defined($fileDetails)) { + for my $tla (("CBC", "GBC", "GIC", "GNC")) { + $tlaData{$tla} = 0; + } + if (SummaryInfo::LINE_DATA == $covtype) { + foreach my $line ($testcaseCounts->keylist()) { + # skip uncovered lines... + next if $testcaseCounts->value($line) == 0; + my $lineData = + $fileDetails->line($line); # "SourceLine" structure + my $tla = $lineData->tla(); + die("unexpected TLA '$tla' in CounntData for line $line") + unless exists($tlaData{$tla}); + $tlaData{$tla} += 1; + } + } elsif (SummaryInfo::BRANCH_DATA == $covtype) { + foreach my $line ($testcaseCounts->keylist()) { + my $lineData = + $fileDetails->line($line); # "SourceLine" structure + my $branchEntry = $lineData->branchElem(); + foreach my $id ($branchEntry->blocks()) { + my $block = $branchEntry->getBlock($id); + foreach my $data (@$block) { + my ($br, $tla) = @$data; + my $count = $br->count(); + die("unexpected branch TLA $tla for count $count at " . + $fileDetails->path() . ":$line") + unless (($count != 0) == exists($tlaData{$tla})); + next if (0 == $count); + $tlaData{$tla} += 1; + } + } + } + } elsif (SummaryInfo::MCDC_DATA == $covtype) { + foreach my $line ($testcaseCounts->keylist()) { + my $lineData = + $fileDetails->line($line); # "SourceLine" structure + my $mcdcEntry = $lineData->mcdcElem(); + while (my ($groupSize, $group) = each(%{$mcdcEntry->groups()})) + { + foreach my $expr (@$group) { + foreach my $sense (0, 1) { + # use 'current' count... + my ($tla, $b_count, $c_count) = + @{$expr->count($sense)}; + die("unexpected branch TLA $tla for count $c_count at " + . $fileDetails->path() + . ":$line") + unless ( + ($c_count != 0) == exists($tlaData{$tla})); + next if (0 == $c_count); + $tlaData{$tla} += 1; + } + } + } + } + } else { + die("$covtype not supported yet") + unless $covtype == SummaryInfo::FUNCTION_DATA; + foreach my $key ($testcaseCounts->keylist()) { + my $func = $testcaseCounts->findKey($key); + my $line = $func->line(); + # differential FunctionEntry + my $lineData = $fileDetails->line($line); + my $differential = $lineData->functionElem(); + my @data; + if ($main::merge_function_aliases) { + push(@data, $differential->hit()); + } else { + my $aliases = $differential->aliases(); + foreach my $alias (keys %$aliases) { + push(@data, $aliases->{$alias}); + } + } + foreach my $d (@data) { + my ($count, $tla) = @$d; + die("unexpected branch TLA $tla for count $count") + unless (($count != 0) == exists($tlaData{$tla})); + #next if (0 == $count); + $tlaData{$tla} += 1; + } + } + } + } + my $self = [$testcaseCounts, $fileDetails, \%tlaData, $covtype]; + bless $self, $class; + return $self; +} + +sub covtype +{ + my $self = shift; + return $self->[3]; +} + +sub count +{ + my ($self, $tla) = @_; + + my $tlaCounts = $self->[2]; + return exists($tlaCounts->{$tla}) ? $tlaCounts->{$tla} : 0; +} + +package GenHtml; + +use constant { + TOP => 0, + WORKLIST => 1, + PENDING => 2, + JOBS => 3, + CHILD_DATA => 4, + RETRY_COUNT => 5, + TMPDIR => 6, + DELAY_TIMER => 7, + CURRENT_PARALLEL => 8, +}; + +sub new +{ + my ($class, $current_data) = @_; + my $self = [ + SummaryInfo->new("top", ''), # top has empty name + [], # worklist: dependencies are complete - this can run immediately + {}, # %pending; # map of name->list of as-yet incomplete dependencies + # This task can be scheduled as soon as its last dependency + # is complete. + [], # jobs which are to be scheduled for execution + {}, # childData - for callback + {}, # jobID -> fail count for this job + File::Temp->newdir("genhtml_XXXX", + DIR => $lcovutil::tmp_dir, + CLEANUP => 1), + 0, # delay timer + 0 # current count of parallel jobs + ]; + bless $self, $class; + + lcovutil::info(1, "Writing temporary data to " . $self->[TMPDIR] . "\n"); + my $pending = $self->[PENDING]; + my $worklist = $self->[WORKLIST]; + my $top_level_summary = $self->[0]; + # no parent data for top-level + my $toplevelPerTestData = + ($main::hierarchical || $main::flat) ? [{}, {}, {}, {}] : undef; + $pending->{""} = [ + ['top', + [$top_level_summary, $toplevelPerTestData, + $top_level_summary->name() + ], + ['root'], + ['root'], + [undef, undef] + ], + {} + ]; + + # sort the worklist so segment will tend to contain files from the same directory + foreach my $f (sort $current_data->files()) { + my $traceInfo = $current_data->data($f); + my $filename = ReadCurrentSource::resolve_path($traceInfo->filename()); + my ($vol, $parentDir, $file) = File::Spec->splitpath($filename); + if (!File::Spec->file_name_is_absolute($filename)) { + if ($parentDir) { + $parentDir = File::Spec->catfile($main::cwd, $parentDir); + } else { + $parentDir = $main::cwd; + } + $filename = File::Spec->catfile($parentDir, $file); + } + my $short_name = + $main::no_prefix ? $filename : + main::apply_prefix($filename, @main::dir_prefix); + my $is_absolute = File::Spec->file_name_is_absolute($short_name); + $short_name =~ s|^$lcovutil::dirseparator||; + my @short_path = File::Spec->splitdir($short_name); + my @path = File::Spec->splitdir($parentDir); + + my @rel_dir_stack = + @short_path; # @rel_dir_stack: relative path to parent dir of $f + pop(@rel_dir_stack); # remove filename from the end + my $pendingParent; + if ($main::hierarchical) { + # excludes trailing '/' + my $base = substr($filename, 0, -(length($short_name) + 1)); + my $relative_path = ""; + my $path = $base; + $pendingParent = + $pending->{""}; # this is the top-level object dependency + my @sp; + my @p = split($lcovutil::dirseparator, substr($base, 0, -1)) + ; # remove trailing '/' + while (scalar(@rel_dir_stack)) { + my $element = shift(@rel_dir_stack); + $relative_path .= $element; + $path .= $lcovutil::dirseparator . $element; + push(@p, $element); + push(@sp, $element); + if (exists($pending->{$path})) { + $pendingParent = $pending->{$path}; + } else { + my $perTestData = [{}, {}, {}, {}]; + + my @dirData = (SummaryInfo->new("directory", $relative_path, + $is_absolute), + $perTestData, + $path); + add_dependency($pendingParent, $path); + my @spc = @sp; + my @pc = @p; + $pendingParent = [ + ['dir', \@dirData, + \@spc, \@pc, + $pendingParent + ], + {} + ]; + die("unexpected pending entry") + if exists($pending->{$dirData[2]}); + $pending->{$dirData[2]} = $pendingParent; + } + $relative_path .= $lcovutil::dirseparator; + } + } elsif (!$main::flat) { + # not hierarchical + my $relative_path = File::Spec->catdir(@rel_dir_stack); + if (!exists($pending->{$parentDir})) { + my $perTestData = [{}, {}, {}, {}]; + my @dirData = (SummaryInfo->new( + "directory", $relative_path, $is_absolute + ), + $perTestData, + $parentDir); + $pending->{$parentDir} = [ + ['dir', \@dirData, \@rel_dir_stack, \@path, + $pending->{$top_level_summary->name()} + ], + {} + ]; + } + $pendingParent = $pending->{$parentDir}; + + # my directory is dependency for the top-level + add_dependency($pending->{$top_level_summary->name()}, $parentDir); + } + + # current file is a dependency of the top-level for a flat (2-level) + # report - or of the parent directory otherwise + my $mergeInto = $main::flat ? $pending->{$top_level_summary->name()} : + $pendingParent; + add_dependency($mergeInto, $f); + # this file is ready to be processed + my @fileData = (SummaryInfo->new("file", $filename), [{}, {}, {}], $f); + push(@$worklist, + ['file', \@fileData, \@short_path, \@path, $mergeInto]); + } + + $self->compute(); + # remove empty directories produced when no data in the directory + # was selected. We want to clean both the directory and its parents. + foreach my $dir (@cleanDirectoryList) { + while ($dir) { + my $d = File::Spec->catfile($main::output_directory, $dir); + if (-d $d) { + print("removing empty $d\n"); + if (!rmdir($d)) { + # directory not empty - stop + last; + } + } + $d = File::Basename::dirname($dir); # look up + last if $d eq $dir; + $dir = $d; + } + } + $lcovutil::profileData{mergeDelayTimer} = $self->[DELAY_TIMER]; + printf("overall delay: %0.3fs\n", $self->[DELAY_TIMER]) + if $main::debugScheduler; + return $self; +} + +sub top() +{ + my $self = shift; + return $self->[0]; +} + +sub add_dependency +{ + my ($parent, $name) = @_; + lcovutil::debug(1, + "add depend $name' in -> " . $parent->[0]->[1]->[2] . "\n") + unless exists($parent->[1]->{$name}); + print("add depend $name -> " . $parent->[0]->[1]->[2] . "\n") + if $main::debugScheduler > 1; + $parent->[1]->{$name} = 1; +} + +sub completed_dependency +{ + # return 0 when last dependency is removed + my ($self, $parent, $name) = @_; + + my $pending = $self->[PENDING]; + die("missing pending '$parent'") + unless exists($pending->{$parent}); + my $pendingParent = $pending->{$parent}; + die("missing pending entry for $name (in $parent") + unless exists($pendingParent->[1]->{$name}); + delete($pendingParent->[1]->{$name}); + print("completed $name -> $parent " . scalar(%{$pendingParent->[1]}) . "\n") + if $main::debugScheduler > 1; + # LCOV_EXCL_START + if ($main::debugScheduler > 1) { + # print parent dependencies + while (my ($k, $d) = each(%{$pendingParent->[1]})) { + die("unexpected parent dependency $k ARRAY: " . $d->[0] . ' ' . + join('/', @{$d->[2]})) + if ('ARRAY' eq ref($d) || 1 != $d); + print(" $k\n"); + } + } + # LCOV_EXCL_STOP + if (!%{$pendingParent->[1]}) { + # no more dependencies - schedule this one + my $p = $pendingParent->[0]; + print("completed dependencies - schedule " . $p->[0] . ' ' . + join('/', @{$p->[2]}) . "\n") + if $main::debugScheduler > 1; + push(@{$self->[WORKLIST]}, $p); + delete($pending->{$parent}); + } +} + +sub merge_one +{ + my ($self, $parentPerTestData, $fullname, $perTest, $summary, + $parentSummary, $parentPath, $parallel) + = @_; + my $type = $summary->type(); + if ('file' eq $type) { + my $base_name = File::Basename::basename($fullname); + for (my $i = 0; $i < scalar(@$perTest); $i++) { + $parentPerTestData->[$i]->{$base_name} = $perTest->[$i]; + } + } elsif ('directory' eq $type) { + for (my $i = 0; $i < scalar(@$perTest); $i++) { + while (my ($basename, $data) = each(%{$perTest->[$i]})) { + $parentPerTestData->[$i]->{$basename} = $data; + } + } + } else { + die("unexpected type $type") + unless $type eq 'top'; + # set the top-level to this (restored) value... + $self->[0] = $summary if $parallel; + } + if ($type ne 'top') { + my $mergeInto = $main::flat ? $self->top() : $parentSummary; + $mergeInto->append($summary); + + my $parentName = $main::flat ? '' : $parentPath; + $self->completed_dependency($parentName, $fullname); + } +} + +sub compute_one +{ + my ($type, $name, $summary, $parentSummary, $perTestData, + $rel_dir, $base_dir, $trunc_dir) + = @_; + + my $start = Time::HiRes::gettimeofday(); + + if ('file' eq $type) { + my ($testdata, $testfncdata, $testbrdata, $testcase_mcdc) = + main::process_file($summary, $parentSummary, + $trunc_dir, $rel_dir, $name); + $perTestData = [$testdata, $testfncdata, $testbrdata, $testcase_mcdc]; + } elsif ('dir' eq $type) { + # process the directory... + main::write_summary_pages($name, 1, # this is a directory, + $summary, $main::show_details, + $rel_dir, $base_dir, $trunc_dir, + $perTestData); + } else { + die("unexpected task") + unless 'top' eq $type; + # Create sorted pages + main::write_summary_pages($name, 0, # 0 == list directories + $summary, + # generate top-level 'details' in flat or hierarchical modes + $main::show_details && + ($main::flat || $main::hierarchical), + ".", "", undef, $perTestData); + } + $summary->checkCoverageCriteria(); + if ('file' eq $type && + 1 < $lcovutil::maxParallelism && + !($main::buildSerializableDatabase || $main::show_details)) { + + if ($main::no_sourceview || + $main::frames || + !$main::show_tla) { + $summary->[SummaryInfo::FILE_DETAILS] = undef; + } elsif ($main::show_tla) { + # just store the location of the first coverpoint in each display + # group - rather than returning the whole detail structure? + $summary->[SummaryInfo::FILE_DETAILS]->simplify(); + } + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{file}{$name} = $end - $start; + + return $perTestData; +} + +sub _waitChild +{ + my ($self, $waitForPending, $noHang) = @_; + + my $currentSize = 0; + my $consumption = 0; + if (0 != $lcovutil::maxMemory) { + $currentSize = lcovutil::current_process_size(); + $consumption = ($self->[CURRENT_PARALLEL] + 1) * $currentSize; + } + + my $reaped = 0; + if ($waitForPending || + $self->[CURRENT_PARALLEL] >= $lcovutil::maxParallelism || + ($self->[CURRENT_PARALLEL] > 1 && $consumption > $lcovutil::maxMemory) + ) { + + my $children = $self->[CHILD_DATA]; + + # wait here for child: + # - while we are oversubscribed + # - too many parallel jobs or + # - consuming too much memory), OR + # - if the worklist is empty but there are pending jobs + # - the pending jobs will be put onto the worklist when + # their dependency finishes + my $worklist = $self->[WORKLIST]; + my $pending = $self->[PENDING]; + lcovutil::info(1, + "memory constraint ($consumption > $lcovutil::maxMemory violated: waiting." + . (scalar(@$worklist) + scalar(keys(%$pending))) + . " remaining\n") + if ($consumption > $lcovutil::maxMemory); + my $start = Time::HiRes::gettimeofday(); + my $child = wait(); + my $end = Time::HiRes::gettimeofday(); + $self->[DELAY_TIMER] += $end - $start; + return 0 unless $child > 0; + my $childstatus = $?; + + unless (exists($children->{$child})) { + lcovutil::report_unknown_child($child); + return $reaped; + } + $self->merge_child($child, $childstatus); + + $reaped += 1; + } + if ($noHang) { + while (1) { + my $start = Time::HiRes::gettimeofday(); + my $child = waitpid(-1, POSIX::WNOHANG); + if (0 < $child) { + my $end = Time::HiRes::gettimeofday(); + $self->[DELAY_TIMER] += $end - $start; + my $childstatus = $?; + my $children = $self->[CHILD_DATA]; + unless (exists($children->{$child})) { + lcovutil::report_unknown_child($child); + next; + } + $self->merge_child($child, $childstatus); + $reaped += 1; + } else { + last; + } + } + } + print("reaped $reaped current:", $self->[CURRENT_PARALLEL], + " jobs:", scalar(@{$self->[JOBS]}), + " work:", scalar(@{$self->[WORKLIST]}), + " pending:", scalar(%{$self->[PENDING]}), + "\n") if ($main::debugScheduler && $reaped); + + return $reaped; +} + +sub _reschedule +{ + my ($self, $jobID, $jobs) = @_; + # clean up + foreach my $task (@$jobs) { + my $selfSummary = $task->[1]; + $selfSummary->unsetDirs(); # reset state + } + push(@{$self->[JOBS]}, [$jobs, $jobID]); # put job back + print(" reschedule $jobID current:", + $self->[CURRENT_PARALLEL], + " jobs: ", scalar(@{$self->[JOBS]}), "\n") + if $main::debugScheduler; +} + +sub _report_fail_and_reschedule +{ + my ($self, $jobId, $jobs, $childPid, $reason) = @_; + my $counts = $self->[RETRY_COUNT]; + if (exists($counts->{$jobId})) { + $counts->{$jobId} += 1; + } else { + $counts->{$jobId} = 1; + } + lcovutil::report_fork_failure("compute job $jobId (child $childPid)", + $reason, 0, $counts->{$jobId}); + $self->_reschedule($jobId, $jobs); +} + +sub _process_child +{ + my ($self, $jobs, $jobId, $startTime) = @_; + + # clear the profile data - we want just my contribution + my $childStart = Time::HiRes::gettimeofday(); + my $tmp = '' . $self->[TMPDIR]; + my $stdout_file = File::Spec->catfile($tmp, "genhtml_$$.log"); + my $stderr_file = File::Spec->catfile($tmp, "genhtml_$$.err"); + my $currentState = lcovutil::initial_state(); + # clear counters so we store count only what we saw in the child + $SourceFile::annotatedFiles = 0; + $SourceFile::totalFiles = 0; + + my $status = 0; + my @rtnData; + my ($stdout, $stderr, $code) = Capture::Tiny::capture { + + foreach my $t (@$jobs) { + my ($task, $sSummary, $rel_dir, $full_dir_path, $base_dir, + $trunc_dir) + = @$t; + my @taskData; + push(@rtnData, \@taskData); + + my ($type, $data, $short_path, $path, $parentData) = @$task; + my ($selfSummary, $perTestData, $name) = @$data; + die("inconsistent") + unless $selfSummary == $sSummary; + my ($parentSummary, $parentPerTestData, $parentPath); + ($parentSummary, $parentPerTestData, $parentPath) = + @{$parentData->[0]->[1]} + unless 'top' eq $type; + + eval { + my $thisTestData = + compute_one($type, $name, $selfSummary, $parentSummary, + $perTestData, $rel_dir, $base_dir, $trunc_dir); + + # clear the parent pointer that we hacked into place. Don't want that + # extra data returned by dumper. + $selfSummary->[SummaryInfo::PARENT] = undef; + $selfSummary->[SummaryInfo::SOURCES] = {} + if $selfSummary->type() ne 'file'; + my $objname = + $selfSummary->type() eq 'top' ? "" : $selfSummary->name(); + my $criteria = $CoverageCriteria::coverageCriteria{$objname} + if exists($CoverageCriteria::coverageCriteria{$objname}); + + push(@taskData, + $thisTestData, $parentPerTestData, + $selfSummary, $criteria); + }; # eval + if ($@) { + $status = 1; + print(STDERR $@); + # @todo maybe this should be an ignorable error + next; + } + + } # foreach + }; # end capture + # print stdout and stderr ... + foreach my $d ([$stdout_file, $stdout], [$stderr_file, $stderr]) { + next + unless ($d->[1]); # only print if there is something to print + my $f = InOutFile->out($d->[0]); + my $h = $f->hdl(); + print($h $d->[1]); + } + + my $file = File::Spec->catfile($tmp, "dumper_$$"); + my $childEnd = Time::HiRes::gettimeofday(); + $lcovutil::profileData{child}{$jobId} = $childEnd - $childStart; + my $data; + eval { + $data = Storable::store([\@rtnData, + [$SourceFile::annotatedFiles, + $SourceFile::totalFiles + ], + lcovutil::compute_update($currentState), + [$childStart, $childEnd] + ], + $file); + my $done = Time::HiRes::gettimeofday(); + printf(" %d: dump %d %d jobs %0.3fs %0.3fMb %s\n", + $jobId, $$, scalar(@$jobs), + $done - $childEnd, + (-f $file ? (-s $file) : 0) / (1024 * 1024), + DateTime->now()) + if ($main::debugScheduler); + }; + if ($@ || !defined($data)) { + lcovutil::ignorable_error($lcovutil::ERROR_PARALLEL, + "Job $jobId: child $$ serialize failed" . ($@ ? ": $@" : '')); + $status = 1; + } + return $status; +} + +sub _segment_worklist +{ + my ($self, $segmentID) = @_; + my $worklist = $self->[WORKLIST]; + + my $nTasks = scalar(@$worklist); + # how many cores are available? + my $nCores = $lcovutil::maxParallelism - $self->[CURRENT_PARALLEL]; + # let's distribute them evenly + # could be more sophisticated and look at the compute time used + # by each of those tasks previously - then balance the expected load + my $tasksPerCore = + $lcovutil::maxParallelism > 1 ? ($nTasks + $nCores - 1) / $nCores : + 1; + $tasksPerCore = $lcovutil::max_tasks_per_core + if ($tasksPerCore > $lcovutil::max_tasks_per_core); + while (@$worklist && + scalar(@{$self->[JOBS]}) <= $nCores) { + my @jobs; + push(@{$self->[JOBS]}, [\@jobs, $segmentID++]); + foreach my $task (splice(@$worklist, 0, $tasksPerCore)) { + + my ($type, $data, $short_path, $path, $parentData) = @$task; + my ($selfSummary, $perTestData, $name) = @$data; + #main::info("$type: $name\n"); + # break initialization into 2 statements - see + # https://stackoverflow.com/questions/26676488/why-is-the-variable-not-available + my ($parentSummary, $parentPerTestData, $parentPath); + ($parentSummary, $parentPerTestData, $parentPath) = + @{$parentData->[0]->[1]} + unless 'top' eq $type; + + my $rel_dir; + my $full_dir_path; + if ($type eq 'file') { + $rel_dir = + File::Spec->catdir(@{$short_path}[0 .. $#$short_path - 1]); + die("file error for " . File::Spec->catdir(@$short_path)) + if '' eq $rel_dir; + $full_dir_path = File::Spec->catdir(@{$path}[0 .. $#$path - 1]); + } elsif ($type eq 'dir') { + $rel_dir = File::Spec->catdir(@$short_path); + $full_dir_path = File::Spec->catdir(@$path); + } else { + $rel_dir = '.'; + $full_dir_path = '.'; + } + $rel_dir = lc($rel_dir) if ($lcovutil::case_insensitive); + my $p = File::Spec->catdir($main::output_directory, $rel_dir); + File::Path::make_path($p) unless -d $p || $main::no_html; + my $base_dir = main::get_relative_base_path($rel_dir); + my $trunc_dir = ($rel_dir eq '') ? 'root' : $rel_dir; + + push(@jobs, + [$task, $selfSummary, $rel_dir, + $full_dir_path, $base_dir, $trunc_dir + ]); + } # foreach task assigned to this core + } + return $segmentID; +} + +sub compute +{ + my $self = shift; + + my $worklist = $self->[WORKLIST]; + my $pending = $self->[PENDING]; + my $joblist = $self->[JOBS]; + my $children = $self->[CHILD_DATA]; + my $failedAttempts = 0; + my $segmentID = 0; + + WORK: + while ($self->[CURRENT_PARALLEL] || + @$joblist || + @$worklist || + %$pending) { + + if (1 < $lcovutil::maxParallelism) { + # wait here for child: + # - while we are oversubscribed + # - too many parallel jobs or + # - consuming too much memory), OR + # - if the worklist is empty and there are no jobs ready to + # go (these are jobs which failed and were reschedule - e.g., + # due to out-of-memory). + # There are two possibilities: + # - there are pending jobs: + # they will be put on the worklist when their dependency + # finishes (at least one of those dependencies must be + # running now - we could go check that...) + # - there are no pending jobs: so there must be just + # one remaining top-level job currently running - + # when it finishes, we are done. + # Also do a nonblocking wait here to collect any children that + # have finished - so we have a better chance to parallelize + # more/have more tasks that we can dispatch + print(" currentParallel:", + $self->[CURRENT_PARALLEL], + " pending:", scalar(%$pending), "\n") + if ($main::debugScheduler > 1 || + ($main::debugScheduler && + 0 == scalar(@$worklist) && + 0 == scalar(@$joblist))); + if ($self->_waitChild( + 0 == scalar(@$worklist) && 0 == scalar(@$joblist), 1 + )) { + # reaped at least one child process + next WORK; + } + } + #LCOV_EXCL_START + unless (@$worklist || @$joblist) { + foreach my $e (keys %$pending) { + # something went wrong - print some debug data + lcovutil::info("me: $e: depends\n"); + my $p = $pending->{$e}; + while (my ($k, $d) = each(%{$p->[1]})) { + lcovutil::info(" $k\n"); + die("unexpected data for depend $k: $d") unless $d == 1; + } + lcovutil::info(" parent: " . $p->[0]->[0] . ' ' . + join('/', @{$p->[0]->[2]}) . "\n"); + } + # can get here if child process hit a 'die' (as opposed to an error) + # and "--keep-going" was specified. + die("unexpected empty worklist??"); + } + #LCOV_EXCL_STOP + + # schedule a few items from the worklist if we can.. + $segmentID = $self->_segment_worklist($segmentID); + + while (@$joblist) { + if (1 < $lcovutil::maxParallelism) { + # need to wait here if insufficient resources + # (only wait until resource available - pending and + # worklist are managed above) + my $reaped = $self->_waitChild(0); + die("unexpected reap count $reaped") if $reaped > 1; + } + my $d = pop(@$joblist); + my ($jobs, $jobId) = @$d; + + my $start = Time::HiRes::gettimeofday(); + foreach my $task (@$jobs) { + # keep track of where this data is stored - need it later for + # top-level 'flat' view + my $selfSummary = $task->[1]; + my $relDir = $task->[2]; + my $fullDir = $task->[3]; + $selfSummary->relativeDir($relDir); + $selfSummary->fullDir($fullDir); + } + + # distribute this job to parallel execution unless there is + # nothing else running. In that case, we won't continue + # until this job is finished - so might as well do it + # locally and not incur parallelism overhead. + if (1 < $lcovutil::maxParallelism && + ($self->[CURRENT_PARALLEL] > 0 || + 0 != scalar(@$joblist) || + scalar(@$jobs) > 1) + ) { + $lcovutil::profileData{nJobs}{$jobId} = scalar(@$jobs); + + $lcovutil::deferWarnings = 1; + my $pid = fork(); + if (!defined($pid)) { + # fork failed + ++$failedAttempts; + # report_fork_failure sleeps a bit if it doesn't error out + lcovutil::report_fork_failure("process segment $jobId", + $!, $failedAttempts); + # put job back into schedule + $self->_reschedule($jobId, $jobs); + next; + } + # fork succeeded - so reset 'consecutive fails' counter + $failedAttempts = 0; + if (0 == $pid) { + # I'm the child + my $status = $self->_process_child($jobs, $jobId, $start); + exit($status); + } else { + $children->{$pid} = [$jobs, $jobId, $start]; + ++$self->[CURRENT_PARALLEL]; + print("forked $jobId current: ", + $self->[CURRENT_PARALLEL], + " jobs:", scalar(@$joblist), "\n") + if $main::debugScheduler; + } + } else { + #not parallel + lcovutil::info(1, "serial execution of task $jobId\n") + if ($lcovutil::maxParallelism > 1); + die("unexpected distribution") unless 1 == scalar(@$jobs); + my ($task, $sSummary, $rel_dir, $full_dir_path, + $base_dir, $trunc_dir) + = @{$jobs->[0]}; + + my ($type, $data, $short_path, $path, $parentData) = @$task; + my ($selfSummary, $perTestData, $name) = @$data; + die("inconsistent") unless $selfSummary == $sSummary; + my ($parentSummary, $parentPerTestData, $parentPath); + ($parentSummary, $parentPerTestData, $parentPath) = + @{$parentData->[0]->[1]} + unless 'top' eq $type; + + my $thisTestData = + compute_one($type, $name, $selfSummary, $parentSummary, + $perTestData, $rel_dir, $base_dir, $trunc_dir); + + $self->merge_one($parentPerTestData, $name, + $thisTestData, $selfSummary, + $parentSummary, $parentPath); + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{$type}{$name} = $end - $start; + } + } # while (job list not empty) + } # while (work remains) +} - # Store sum in %result - $result{$line} = $data1_count; +sub merge_child($$$) +{ + my $mergeStart = Time::HiRes::gettimeofday(); + my ($self, $childPid, $childstatus) = @_; + my $children = $self->[CHILD_DATA]; + + --$self->[CURRENT_PARALLEL]; + my ($jobs, $jobId, $start) = @{$children->{$childPid}}; + delete($children->{$childPid}); + + my $tmp = '' . $self->[TMPDIR]; + my $dumpfile = File::Spec->catfile($tmp, "dumper_$childPid"); + my $childLog = File::Spec->catfile($tmp, "genhtml_$childPid.log"); + my $childErr = File::Spec->catfile($tmp, "genhtml_$childPid.err"); + foreach my $f ($childLog, $childErr) { + if (!-f $f) { + $f = ''; # there was no output + next; + } + if (open(RESTORE, "<", $f)) { + # slurp into a string and eval.. + my $str = do { local $/; }; # slurp whole thing + close(RESTORE) or die("unable to close $f: $!\n"); + unlink $f; + $f = $str; + } else { + $f = "unable to open $f: $!"; + if (0 == $childstatus) { + lcovutil::report_parallel_error('genhtml', + $lcovutil::ERROR_PARALLEL, $childPid, 0, $f, + keys(%$children)); + } + } + } + my $signal = $childstatus & 0xFF; + # non-parallel execution prints stuff to stdout - so we should too + print(STDOUT $childLog) + if ((0 != $childstatus && + $signal != POSIX::SIGKILL && + $lcovutil::max_fork_fails != 0) || + $lcovutil::verbose >= 0); + print(STDERR $childErr); + + # now undump the data ... + my $dStart = Time::HiRes::gettimeofday(); + my $dumped = Storable::retrieve($dumpfile) + if (-f $dumpfile && 0 == $childstatus); + my $dEnd = Time::HiRes::gettimeofday(); + printf(" %d restore %d: %0.3fs %s (parallel %d)\n", + $jobId, $childPid, $dEnd - $dStart, + DateTime->now(), $self->[CURRENT_PARALLEL]) + if $main::debugScheduler; + + if (defined($dumped)) { + eval { + my ($jobData, $countData, $update, $otherData) = @$dumped; + + die("error processing child") + if (scalar(@$jobData) != scalar(@$jobs)); + + $SourceFile::annotatedFiles += $countData->[0]; + $SourceFile::totalFiles += $countData->[1]; + lcovutil::update_state(@$update); + + $lcovutil::profileData{startDelay}{$jobId} = + $otherData->[0] - $start; + $lcovutil::profileData{mergeDelay}{$jobId} = + $mergeStart - $otherData->[1]; + + foreach my $t (@$jobs) { + my $task = $t->[0]; + my ($type, $data, $short_path, $fullname, $parentData) = @$task; + my ($childSummary, $perTestData, $name) = @$data; + my ($parentSummary, $parentPerTestData, $parentPath); + ($parentSummary, $parentPerTestData, $parentPath) = + @{$parentData->[0]->[1]} + unless 'top' eq $type; + + my $taskData = shift(@$jobData); + # there was an error while processing the child + die("error processing child " . scalar(@$taskData)) + if (scalar(@$taskData) != 4); + + my ($thisTestData, $pPerTestData, $summary, $criteria) = + @$taskData; + $childSummary->copyGuts($summary); + $self->merge_one($parentPerTestData, $name, $thisTestData, + $childSummary, $parentSummary, $parentPath, 1); + + # and save the deserialized criteria data + if (defined($criteria)) { + my $name = + $childSummary->type() eq 'top' ? "" : + $childSummary->name(); + $CoverageCriteria::coverageCriteria{$name} = $criteria; + $criteria->[1] = 0 if $criteria->[1] eq ''; + my $v = $criteria->[1]; + die('unexpected criteria data \'' . + join(' ', @$criteria) . '\'') + unless (Scalar::Util::looks_like_number($v)); + $CoverageCriteria::coverageCriteriaStatus = + ($v != 0 || 0 != scalar(@{$criteria->[2]})) ? $v : 0; + } + } # foreach job + }; + if ($@) { + $childstatus = 1 << 8 unless $childstatus; + print(STDERR $@); + lcovutil::report_parallel_error('genhtml', + $lcovutil::ERROR_PARALLEL, $childPid, $childstatus, + "unable to deserialize $dumpfile: $@", + keys(%$children)); + } + } + if (!-f $dumpfile) { + $self->_report_fail_and_reschedule($jobId, $jobs, $childPid, + "serialized data $dumpfile not found"); + } elsif (!defined($dumped) || $childstatus != 0) { + my $signal = $childstatus & 0xFF; + if (POSIX::SIGKILL == $signal) { + $self->_report_fail_and_reschedule($jobId, $jobs, $childPid, + "killed by OS - possibly due to out-of-memory"); + } else { + #print("stdout: $childLog\nstderr: $childErr\n"); + my $msg = "compute job $jobId"; + $msg .= ": unable to deserialize $dumpfile" + unless defined($dumped); + lcovutil::report_parallel_error('genhtml', $lcovutil::ERROR_CHILD, + $childPid, $childstatus, $msg, keys(%$children)); + } + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{segment}{$jobId} = $end - $start; + $lcovutil::profileData{merge_segment}{$jobId} = $end - $mergeStart; + + unlink $dumpfile + if -f $dumpfile; +} - $found++; - if ($data1_count > 0) { $hit++; } - } +package main; - # Add lines unique to data2_ref - foreach $line (keys(%$data2_ref)) - { - # Skip lines already in data1_ref - if (defined($data1_ref->{$line})) { next; } +# Global variables & initialization - # Copy count from data2_ref - $result{$line} = $data2_ref->{$line}; +lcovutil::save_cmd_line(\@ARGV, "$FindBin::RealBin"); + +# TraceFile Instance containing all data from the 'current' .info file +# - constructed at start of GenHtml +our $current_data; +# TraceFile Instance containing all data from the baseline .info file (if any) +# - constructed in GenHtml (if needed) +our $base_data; +# Instance containing all data from diff file +our $diff_data = DiffMap->new(); +our @opt_dir_prefix; # Array of prefixes to remove from all sub directories +our @dir_prefix; +our %test_description; # Hash containing test descriptions if available +our $current_date = get_date_string(undef); + +our @info_filenames; # List of .info files to use as data source +our $header_title; # Title at top of HTML report page (above table) +our $footer; # String at bottom of HTML report page +our $test_title; # Title for output as written to each page header +our $output_directory; # Name of directory in which to store output +our @base_filenames; # Optional: names of files containing baseline data +our $age_basefile; # how old is the baseline data file? +our $baseline_title; # Optional name of baseline - written to page headers +our $baseline_date; # Optional date that baseline was created +our $diff_filename; # Optional name of file containing baseline data +our $strip; # If set, strip leading directories when applying diff +our $desc_filename; # Name of file containing test descriptions +our $css_filename; # Optional name of external stylesheet file to use +our $show_details; # If set, generate detailed directory view +our $no_prefix; # If set, do not remove filename prefix +our $show_tla; # categorize coverage data (or not) +our $show_functionProportions = 0 + ; # show proportion of lines/branches contained in the function which are hit +our $show_hitTotalCol; # include the 'hit' or 'missed' total count in tables + # - this is part of the 'legacy' view + # - also enabled when full differential categories are used + # (i.e., but not when there is no baseline data - so no + # categories apart from 'GNC' and 'UNC' +our $use_legacyLabels; +our $show_dateBins; # show last modified and last author info +our $show_ownerBins; # show list of people who have edited code + # (in this file/this directory/etc) +our $show_nonCodeOwners; # show last modified and last author info for + # non-code lines (e.g., comments) +our $show_zeroTlaColumns; # unless set, improve readability by removing + # summary table columns which contain + # only zero (blank) entries +our $show_simplifiedColors; +our $treatNewFileAsBaseline; +our $elide_path_mismatch = + 0; # handle case that file in 'baseline' and 'current' .info + # data matches some name in the 'diff' file such that + # the basename is the same but the pathname is different + # - then pretend that the names DID match +our $synthesizeMissingFile = + 0; # create file content if not found for annotation +our $hierarchical = 0; # if true: show directory hierarchy + # default: legacy two-level report +our $flat = 0; # if true: single table of all files in top level + +our $sort_tables = 1; # If set, provide directory listings with sorted entries +our $no_sort; # Disable sort +our $frames; # If set, use frames for source code view +our $keep_descriptions; # If set, do not remove unused test case descriptions +our $suppress_function_aliases; # if set, don't show list of collapsed + # function aliases +our $merge_function_aliases; +our $no_sourceview; # If set, do not create a source code view for each file +our $no_html; # don't generate HTML if set +our $legend; # If set, include legend in output +our $tab_size = 8; # Number of spaces to use in place of tab +our $html_prolog_file; # Custom HTML prolog file (up to and including ) +our $html_epilog_file; # Custom HTML epilog file (from onwards) +our $html_prolog; # Actual HTML prolog +our $html_epilog; # Actual HTML epilog +our $html_ext = "html"; # Extension for generated HTML files +our $html_gzip = 0; # Compress with gzip +our $opt_missed; # List/sort lines by missed counts +our $dark_mode; # Use dark mode palette or normal +our $charset = "UTF-8"; # Default charset for HTML pages +our @fileview_sortlist; +our @fileview_sortname = ("", "-sort-l", "-sort-f", "-sort-b", '-sort-m'); +our @fileview_prefixes = (""); +our @funcview_sortlist; +our @rate_name = ("Lo", "Med", "Hi"); +our @rate_png = ("ruby.png", "amber.png", "emerald.png"); +our $rc_desc_html = 0; # lcovrc: genhtml_desc_html +our $deprecated_highlight; # ignored former option - $found++; - if ($result{$line} > 0) { $hit++; } - } +# simplify/shorten names in 'function detail table' +our @simplifyFunctionScript; # the arg list +our $simplifyFunctionCallback; # the actual callback - return (\%result, $found, $hit); -} +our $cwd = cwd(); # Current working directory +# for debugging +our $verboseScopeRegexp; # dump categorization processing if match # -# merge_checksums(ref1, ref2, filename) -# -# REF1 and REF2 are references to hashes containing a mapping -# -# line number -> checksum -# -# Merge checksum lists defined in REF1 and REF2 and return reference to -# resulting hash. Die if a checksum for a line is defined in both hashes -# but does not match. +# Code entry point # -sub merge_checksums($$$) -{ - my $ref1 = $_[0]; - my $ref2 = $_[1]; - my $filename = $_[2]; - my %result; - my $line; - - foreach $line (keys(%{$ref1})) - { - if (defined($ref2->{$line}) && - ($ref1->{$line} ne $ref2->{$line})) - { - die("ERROR: checksum mismatch at $filename:$line\n"); - } - $result{$line} = $ref1->{$line}; - } +$SIG{__WARN__} = \&lcovutil::warn_handler; +$SIG{__DIE__} = \&lcovutil::die_handler; + +STDERR->autoflush; +STDOUT->autoflush; + +my @datebins; +my (@rc_date_bins, @rc_annotate_script, @rc_select_script, @rc_date_labels, + @rc_simplifyFunctionScript); + +my %genhtml_rc_opts = ( + "genhtml_css_file" => \$css_filename, + "genhtml_header" => \$header_title, + "genhtml_footer" => \$footer, + "genhtml_line_field_width" => \$line_field_width, + "genhtml_overview_width" => \$overview_width, + "genhtml_nav_resolution" => \$nav_resolution, + "genhtml_nav_offset" => \$nav_offset, + "genhtml_keep_descriptions" => \$keep_descriptions, + "genhtml_no_prefix" => \$no_prefix, + "genhtml_no_source" => \$no_sourceview, + "genhtml_num_spaces" => \$tab_size, + "genhtml_frames" => \$frames, + "genhtml_legend" => \$legend, + "genhtml_html_prolog" => \$html_prolog_file, + "genhtml_html_epilog" => \$html_epilog_file, + "genhtml_html_extension" => \$html_ext, + "genhtml_html_gzip" => \$html_gzip, + "genhtml_precision" => \$lcovutil::default_precision, + "genhtml_function_coverage" => \$lcovutil::func_coverage, + "genhtml_branch_coverage" => \$lcovutil::br_coverage, + "genhtml_hi_limit" => \$hi_limit, + "genhtml_med_limit" => \$med_limit, + "genhtml_line_hi_limit" => \$ln_hi_limit, + "genhtml_line_med_limit" => \$ln_med_limit, + "genhtml_function_hi_limit" => \$fn_hi_limit, + "genhtml_function_med_limit" => \$fn_med_limit, + "genhtml_branch_hi_limit" => \$br_hi_limit, + "genhtml_branch_med_limit" => \$br_med_limit, + "genhtml_mcdc_hi_limit" => \$mcdc_hi_limit, + "genhtml_mcdc_med_limit" => \$mcdc_med_limit, + "genhtml_branch_field_width" => \$br_field_width, + "genhtml_mcdc_field_width" => \$mcdc_field_width, + "genhtml_owner_field_width" => \$owner_field_width, + "genhtml_age_field_width" => \$age_field_width, + "genhtml_sort" => \$sort_tables, + "genhtml_charset" => \$charset, + "genhtml_desc_html" => \$rc_desc_html, + 'merge_function_aliases' => \$merge_function_aliases, + 'suppress_function_aliases' => \$suppress_function_aliases, + "genhtml_missed" => \$opt_missed, + "genhtml_dark_mode" => \$dark_mode, + "genhtml_hierarchical" => \$hierarchical, + "genhtml_flat_view" => \$flat, + "genhtml_show_havigation" => \$show_tla, + "genhtml_show_noncode_owners" => \$show_nonCodeOwners, + "genhtml_show_function_proportion" => \$show_functionProportions, + 'genhtml_show_owner_table' => \$show_ownerBins, + "genhtml_demangle_cpp" => \@lcovutil::cpp_demangle, + "genhtml_demangle_cpp_tool" => \$lcovutil::cpp_demangle_tool, + "genhtml_demangle_cpp_params" => \$lcovutil::cpp_demangle_params, + 'genhtml_annotate_script' => \@rc_annotate_script, + 'genhtml_annotate_tooltip' => \$SourceFile::annotateTooltip, + "select_script" => \@rc_select_script, + "simplify_function" => \@rc_simplifyFunctionScript, + 'num_context_lines' => \$InInterestingRegion::num_context_lines, + 'genhtml_date_bins' => \@rc_date_bins, + 'genhtml_date_labels' => \@rc_date_labels, + 'truncate_owner_table' => \@truncateOwnerTableLevels, + 'owner_table_entries' => \$ownerTableElements, + 'compact_summary_tables' => \$compactSummaryTables, + 'genhtml_synthesize_missing' => \$synthesizeMissingFile, + 'scope_regexp' => \$verboseScopeRegexp,); + +my $save; +my $serialize; +my $validateHTML = exists($ENV{LCOV_VALIDATE}); + +my %genhtml_options = ( + "output-directory|o=s" => \$output_directory, + "header-title=s" => \$header_title, + "footer=s" => \$footer, + "title|t=s" => \$test_title, + "description-file|d=s" => \$desc_filename, + "keep-descriptions|k" => \$keep_descriptions, + "css-file|c=s" => \$css_filename, + "baseline-file|b=s" => \@base_filenames, + "baseline-title=s" => \$baseline_title, + "baseline-date=s" => \$baseline_date, + "current-date=s" => \$current_date, + "diff-file=s" => \$diff_filename, + "annotate-script=s" => \@SourceFile::annotateScript, + "select-script=s" => \@SummaryInfo::selectCallbackScript, + "simplify-script=s" => \@simplifyFunctionScript, + "new-file-as-baseline" => \$treatNewFileAsBaseline, + 'elide-path-mismatch' => \$elide_path_mismatch, + 'synthesize-missing' => \$synthesizeMissingFile, + # if 'show-owners' is set: generate the owner table + # if it is passed a value: show all the owners, + # regardless of whether they have uncovered code or not + 'show-owners:s' => \$show_ownerBins, + 'show-noncode' => \$show_nonCodeOwners, + 'show-zero-columns' => \$show_zeroTlaColumns, + 'simplified-colors' => \$show_simplifiedColors, + "date-bins=s" => \@datebins, + 'date-labels=s' => \@SummaryInfo::ageGroupHeader, + "prefix|p=s" => \@opt_dir_prefix, + "num-spaces=i" => \$tab_size, + "no-prefix" => \$no_prefix, + "no-sourceview" => \$no_sourceview, + 'no-html' => \$no_html, + "show-details|s" => \$show_details, + "frames|f" => \$frames, + "highlight" => \$deprecated_highlight, + "legend" => \$legend, + 'save' => \$save, + 'serialize=s' => \$serialize, + 'scheduler+' => \$debugScheduler, + "html-prolog=s" => \$html_prolog_file, + "html-epilog=s" => \$html_epilog_file, + "html-extension=s" => \$html_ext, + "html-gzip" => \$html_gzip, + "hierarchical" => \$hierarchical, + "flat" => \$flat, + "sort-tables" => \$sort_tables, + "no-sort" => \$no_sort, + "precision=i" => \$lcovutil::default_precision, + "missed" => \$opt_missed, + "dark-mode" => \$dark_mode, + "show-navigation" => \$show_tla, + "show-proportion" => \$show_functionProportions, + "merge-aliases" => \$merge_function_aliases, + "suppress-aliases" => \$suppress_function_aliases, + 'validate' => \$validateHTML,); + +# remove ambiguous entry from common table - +# (genhtml has '--sort-inputs' and '--sort-tables') +# handle the (no obsolete) '--sort' option +Getopt::Long::Configure("pass_through", "no_auto_abbrev"); +my $obsoleteSort; +Getopt::Long::GetOptions('sort' => \$obsoleteSort); +Getopt::Long::Configure('default'); - foreach $line (keys(%{$ref2})) - { - $result{$line} = $ref2->{$line}; - } +# Parse command line options +if ( + !lcovutil::parseOptions(\%genhtml_rc_opts, \%genhtml_options, + \$output_directory) +) { + print(STDERR "Use $tool_name --help to get usage information\n"); + exit(1); +} - return \%result; +if (defined($obsoleteSort)) { + $sort_tables = $obsoleteSort; + lcovutil::ignorable_warning($lcovutil::ERROR_DEPRECATED, + "option '--sort' is deprecated and will be removed in a future release; please use '--sort-tables' instead." + ); } +$merge_function_aliases = 1 + if ($suppress_function_aliases || + defined($lcovutil::cov_filter[$lcovutil::FILTER_FUNCTION_ALIAS])); -# -# merge_func_data(funcdata1, funcdata2, filename) -# +lcovutil::ignorable_error($lcovutil::ERROR_DEPRECATED, + "option '--highlight' has been removed.") + if ($deprecated_highlight); +$buildSerializableDatabase = 1 if $serialize; +$no_sourceview = 1 if $no_html; -sub merge_func_data($$$) -{ - my ($funcdata1, $funcdata2, $filename) = @_; - my %result; - my $func; +# Copy related values if not specified +$ln_hi_limit = $hi_limit if (!defined($ln_hi_limit)); +$ln_med_limit = $med_limit if (!defined($ln_med_limit)); +$fn_hi_limit = $hi_limit if (!defined($fn_hi_limit)); +$fn_med_limit = $med_limit if (!defined($fn_med_limit)); +$br_hi_limit = $hi_limit if (!defined($br_hi_limit)); +$br_med_limit = $med_limit if (!defined($br_med_limit)); +$mcdc_hi_limit = $hi_limit if (!defined($mcdc_hi_limit)); +$mcdc_med_limit = $med_limit if (!defined($mcdc_med_limit)); +$frames = undef unless (defined($frames) && $frames); + +foreach my $rc ([\@datebins, \@rc_date_bins], + [\@SummaryInfo::ageGroupHeader, \@rc_date_labels], + [\@SourceFile::annotateScript, \@rc_annotate_script], + [\@SummaryInfo::selectCallbackScript, \@rc_select_script], + [\@simplifyFunctionScript, \@rc_simplifyFunctionScript], + +) { + @{$rc->[0]} = @{$rc->[1]} unless (@{$rc->[0]}); +} - if (defined($funcdata1)) { - %result = %{$funcdata1}; - } +foreach my $cb ([\$SourceFile::annotateCallback, \@SourceFile::annotateScript], + [\$SummaryInfo::selectCallback, + \@SummaryInfo::selectCallbackScript + ], + [\$simplifyFunctionCallback, \@simplifyFunctionScript], +) { + lcovutil::configure_callback($cb->[0], @{$cb->[1]}) + if scalar(@{$cb->[1]}); +} - foreach $func (keys(%{$funcdata2})) { - my $line1 = $result{$func}; - my $line2 = $funcdata2->{$func}; +# we won't apply simplifications if we don't generate the table +# (we still check that the callback is valid - even though we don't use it) +$simplifyFunctionCallback = undef + if $no_sourceview; - if (defined($line1) && ($line1 != $line2)) { - warn("WARNING: function data mismatch at ". - "$filename:$line2\n"); - next; - } - $result{$func} = $line2; - } +if (defined($lcovutil::stop_on_error) && + !$lcovutil::stop_on_error) { + # in the spirit of "don't stop" - don't worry about missing files. + $synthesizeMissingFile = 1; +} - return \%result; +# Merge sort options +$sort_tables = 0 + if ($no_sort); +die( + "unsupported use of mutually exclusive '--flat' and '--hierachical' options") + if ($flat && $hierarchical); + +$show_tla = 1 + if (@base_filenames) || defined($diff_filename); +if ($show_tla && + (0 == scalar(@base_filenames) && + !defined($diff_filename)) +) { + # no baseline - so not a differential report. + # modify some settings to generate corresponding RTL code. + $use_legacyLabels = 1; + SummaryInfo::noBaseline(); +} else { + $show_hitTotalCol = 1; } +if (@SourceFile::annotateScript) { + $show_dateBins = 1; + if (0 == scalar(@datebins)) { + # default: 7, 30, 180 days + @datebins = @SummaryInfo::defaultCutpoints; + } else { + my %uniqify = + map { $_, 1 } + split($lcovutil::split_char, + join($lcovutil::split_char, @datebins)); + @datebins = sort(keys %uniqify); + } + SummaryInfo::setAgeGroups(@datebins); + + if (defined($show_ownerBins)) { + @truncateOwnerTableLevels = + split($lcovutil::split_char, + join($lcovutil::split_char, @truncateOwnerTableLevels)); + foreach my $l (@truncateOwnerTableLevels) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "Unknown 'truncate_owner_table' level '$l': should be 'top', 'directory', or 'file'." + ) unless grep(/^$l$/, ('top', 'directory', 'file')); + } + + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "Unsupported value 'owner_table_entries = $ownerTableElements': expected positive integer." + ) + unless (!defined($ownerTableElements) || + (Scalar::Util::looks_like_number($ownerTableElements) && + 0 < $ownerTableElements)); + } +} else { + $treatNewFileAsBaseline = undef; + die("\"--show-owners\" option requires \"--annotate-script\" for revision control integration" + ) if defined($show_ownerBins); + die("\"--date-bins\" option requires \"--annotate-script\" for revision control integraion" + ) if (0 != scalar(@datebins)); +} +if (0 != (defined($diff_filename) ^ (0 != scalar(@base_filenames)))) { + if (@base_filenames) { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "Specified --baseline-file without --diff-file: assuming no source differences. Hope that is OK.\n" + ); + } else { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "Specified --diff-file without --baseline-file: assuming baseline code coverage was empty (nothing covered). Hope that is OK\n" + ); + # OK..just assume that the baseline is empty... + } + $show_tla = 1; +} -# -# add_fnccount(fnccount1, fnccount2) -# -# Add function call count data. Return list (fnccount_added, f_found, f_hit) -# +if (defined($header_title)) { + $title = $header_title; +} else { + # use the default title bar. + $title =~ s/ differential// # not a differential report, if no baseline... + unless defined($diff_filename) || 0 != scalar(@base_filenames); +} +push(@fileview_prefixes, "-date") + if ($show_dateBins); +push(@fileview_prefixes, "-owner") + if (defined($show_ownerBins)); + +# use LCOV original colors if no baseline file +# (so no differential coverage) +if ($use_legacyLabels || + !$show_tla || + (defined($show_simplifiedColors) && + $show_simplifiedColors) +) { + lcovutil::use_vanilla_color(); +} -sub add_fnccount($$) -{ - my ($fnccount1, $fnccount2) = @_; - my %result; - my $fn_found; - my $fn_hit; - my $function; +if ($dark_mode) { + # if 'dark_mode' is set, then update the color maps + # For the moment - just reverse the foreground and background + my %reverse; + while (my ($key, $value) = each(%lcovutil::normal_palette)) { + $reverse{lc($value)} = $key; + } + + foreach my $tla (@SummaryInfo::tlaPriorityOrder) { + # swap + my $bg = $lcovutil::tlaColor{$tla}; + + my $key = substr($bg, 1); + # if this color is in normal_palette, then swap to dark_palette + # version. Otherwise, just swap the 'tlaColor' and 'tlaTextColor' + # (foreground and background) + if (exists($reverse{lc($key)})) { + my $k = $reverse{lc($key)}; + $lcovutil::tlaColor{$tla} = '#' . uc($lcovutil::dark_palette{$k}); + } else { + $lcovutil::tlaColor{$tla} = $lcovutil::tlaTextColor{$tla}; + } + $lcovutil::tlaTextColor{$tla} = $bg; + } +} + +@info_filenames = AggregateTraces::find_from_glob(@ARGV); - if (defined($fnccount1)) { - %result = %{$fnccount1}; - } - foreach $function (keys(%{$fnccount2})) { - $result{$function} += $fnccount2->{$function}; - } - $fn_found = scalar(keys(%result)); - $fn_hit = 0; - foreach $function (keys(%result)) { - if ($result{$function} > 0) { - $fn_hit++; - } - } +# Split the list of prefixes if needed +parse_dir_prefix(@opt_dir_prefix); - return (\%result, $fn_found, $fn_hit); +# Check for info filename +if (!@info_filenames) { + die("No filename specified\n" . + "Use $tool_name --help to get usage information\n"); } -# -# add_testfncdata(testfncdata1, testfncdata2) -# -# Add function call count data for several tests. Return reference to -# added_testfncdata. -# +# Generate a title if none is specified +$test_title = compute_title(\@ARGV, \@info_filenames) + unless $test_title; + +if (@base_filenames) { + my @base_patterns = @base_filenames unless $baseline_title; + @base_filenames = AggregateTraces::find_from_glob(@base_filenames); + + $baseline_title = compute_title(\@base_patterns, \@base_filenames) + unless $baseline_title; + my $baseline_create; + + if ($baseline_date) { + eval { + my $epoch = Date::Parse::str2time($baseline_date); + $baseline_create = DateTime->from_epoch(epoch => $epoch); + }; + if ($@) { + #did not parse + lcovutil::info( + "failed to parse date '$baseline_date' - falling back to file creation time\n" + ); + } + } + if (!defined($baseline_create)) { + # if not specified, use 'last modified' of first baseline trace file + my $create = (stat($base_filenames[0]))[9]; + $baseline_create = DateTime->from_epoch(epoch => $create); + $baseline_date = get_date_string($create) + unless defined($baseline_date); + } + $age_basefile = + $baseline_create->delta_days(DateTime->now())->in_units('days'); +} -sub add_testfncdata($$) -{ - my ($testfncdata1, $testfncdata2) = @_; - my %result; - my $testname; +# Make sure css_filename is an absolute path (in case we're changing +# directories) +if ($css_filename) { + if (!File::Spec->file_name_is_absolute($css_filename)) { + $css_filename = File::Spec->catfile($cwd, $css_filename); + } +} - foreach $testname (keys(%{$testfncdata1})) { - if (defined($testfncdata2->{$testname})) { - my $fnccount; +# Make sure tab_size is within valid range +if ($tab_size < 1) { + print(STDERR "ERROR: invalid number of spaces specified: $tab_size!\n"); + exit(1); +} - # Function call count data for this testname exists - # in both data sets: add - ($fnccount) = add_fnccount( - $testfncdata1->{$testname}, - $testfncdata2->{$testname}); - $result{$testname} = $fnccount; - next; - } - # Function call count data for this testname is unique to - # data set 1: copy - $result{$testname} = $testfncdata1->{$testname}; - } +# Get HTML prolog and epilog +$html_prolog = get_html_prolog($html_prolog_file); +$html_epilog = get_html_epilog($html_epilog_file); - # Add count data for testnames unique to data set 2 - foreach $testname (keys(%{$testfncdata2})) { - if (!defined($result{$testname})) { - $result{$testname} = $testfncdata2->{$testname}; - } - } - return \%result; +# Issue a warning if --no-sourceview is enabled together with --frames +if (defined($frames)) { + if ($no_sourceview) { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "option --frames disabled because --no-sourceview was specified."); + $frames = undef; + } elsif ($show_tla) { + lcovutil::info( + "Note: file table to source location navigation hyperlinks are disabled when --frames is enabled\n" + ); + } } +# Issue a warning if --no-prefix is enabled together with --prefix +if ($no_prefix && @dir_prefix) { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "option --prefix disabled because --no-prefix was ignored"); + @dir_prefix = (); +} -# -# brcount_to_db(brcount) -# -# Convert brcount data to the following format: -# -# db: line number -> block hash -# block hash: block number -> branch hash -# branch hash: branch number -> taken value -# - -sub brcount_to_db($) -{ - my ($brcount) = @_; - my $line; - my $db; +@fileview_sortlist = ($SORT_FILE); +@funcview_sortlist = ($SORT_FILE); - # Add branches to database - foreach $line (keys(%{$brcount})) { - my $brdata = $brcount->{$line}; +if ($sort_tables) { + push(@fileview_sortlist, $SORT_LINE); + push(@fileview_sortlist, $SORT_FUNC) if ($lcovutil::func_coverage); + push(@fileview_sortlist, $SORT_BRANCH) if ($lcovutil::br_coverage); + push(@fileview_sortlist, $SORT_MCDC) if ($lcovutil::mcdc_coverage); + push(@funcview_sortlist, $SORT_LINE); + if ($show_functionProportions) { + push(@funcview_sortlist, $SORT_MISSING_LINE); + push(@funcview_sortlist, $SORT_MISSING_BRANCH) + if ($lcovutil::br_coverage); + push(@funcview_sortlist, $SORT_MISSING_MCDC) + if ($lcovutil::mcdc_coverage); + } +} - foreach my $entry (split(/:/, $brdata)) { - my ($block, $branch, $taken) = split(/,/, $entry); - my $old = $db->{$line}->{$block}->{$branch}; +if ($frames) { + # Include genpng code needed for overview image generation + do(File::Spec->catfile($tool_dir, 'genpng')); +} - if (!defined($old) || $old eq "-") { - $old = $taken; - } elsif ($taken ne "-") { - $old += $taken; - } +# Make sure precision is within valid range +check_precision(); - $db->{$line}->{$block}->{$branch} = $old; - } - } +# Make sure output_directory exists, create it if necessary +$output_directory = '.' + if !defined($output_directory); +if ($output_directory && !-d $output_directory) { + info(1, "make_path $output_directory\n"); + File::Path::make_path($output_directory); +} - return $db; +if ($save && $output_directory) +{ # save copy of .info and diff files: useful for debugging user cases later + foreach my $d (['baseline_', @base_filenames], + ['', $diff_filename], + ['current_', @info_filenames] + ) { + my $prefix = shift @$d; + $prefix = '' unless @base_filenames; + foreach my $from (@$d) { + next unless defined($from); + my $to = File::Spec->catfile($output_directory, + $prefix . File::Basename::basename($from)); + File::Copy::copy($from, $to) unless -f $to; + } + } } +# save command line in output directory - useful for later debugging: +my $f = File::Spec->catfile($output_directory, 'cmd_line'); +open(CMD, '>', $f) or die("unable to open $f: $!"); +print(CMD $lcovutil::profileData{config}{cmdLine} . "\n"); +close(CMD) or die("unable to close $f: $!\n"); -# -# db_to_brcount(db[, brcount]) -# -# Convert branch coverage data back to brcount format. If brcount is specified, -# the converted data is directly inserted in brcount. -# +my $exit_status = 0; +# Do something +my $now = Time::HiRes::gettimeofday(); +my $top; +eval { $top = gen_html(); }; +if ($@) { + $exit_status = 1; + print(STDERR $@); +} -sub db_to_brcount($;$) -{ - my ($db, $brcount) = @_; - my $line; - my $br_found = 0; - my $br_hit = 0; +my $then = Time::HiRes::gettimeofday(); +$lcovutil::profileData{overall} = $then - $now; - # Convert database back to brcount format - foreach $line (sort({$a <=> $b} keys(%{$db}))) { - my $ldata = $db->{$line}; - my $brdata; - my $block; +if (0 == $exit_status) { + # warn about unused include/exclude directives + lcovutil::warn_file_patterns(); + ReadCurrentSource::warn_sourcedir_patterns(); +} - foreach $block (sort({$a <=> $b} keys(%{$ldata}))) { - my $bdata = $ldata->{$block}; - my $branch; +# now check the coverage criteria (if any) +if (0 == $exit_status && + ($CoverageCriteria::coverageCriteriaStatus || + @CoverageCriteria::coverageCriteriaScript) +) { + CoverageCriteria::summarize(); + # fail for signal or status + $exit_status = (($CoverageCriteria::coverageCriteriaStatus & 0xFF) | + ($CoverageCriteria::coverageCriteriaStatus >> 8)); +} - foreach $branch (sort({$a <=> $b} keys(%{$bdata}))) { - my $taken = $bdata->{$branch}; +if ($serialize) { + #my $f = File::Spec->catfile($output_directory, 'coverage.dat'); + my $data; + eval { $data = Storable::store($top, $serialize); }; + if ($@ || !defined($data)) { + print("unable to serialize coverage data", $@ ? ": $@" : ''); + $exit_status = 1; + } +} - $br_found++; - $br_hit++ if ($taken ne "-" && $taken > 0); - $brdata .= "$block,$branch,$taken:"; - } - } - $brcount->{$line} = $brdata; - } +lcovutil::cleanup_callbacks(); +lcovutil::save_profile(File::Spec->catfile($output_directory, 'genhtml'), + File::Spec->catfile($output_directory, 'profile.html')); - return ($brcount, $br_found, $br_hit); +if (0 == $exit_status && $validateHTML) { + ValidateHTML->new($output_directory, '.' . $html_ext); } +# exit with non-zero status if --keep-going and some errors detected +$exit_status = 1 + if (0 == $exit_status && lcovutil::saw_error()); + +exit($exit_status); # -# brcount_db_combine(db1, db2, op) +# print_usage(handle) # -# db1 := db1 op db2, where -# db1, db2: brcount data as returned by brcount_to_db -# op: one of $BR_ADD and BR_SUB +# Print usage information. # -sub brcount_db_combine($$$) + +sub print_usage(*) { - my ($db1, $db2, $op) = @_; + local *HANDLE = $_[0]; - foreach my $line (keys(%{$db2})) { - my $ldata = $db2->{$line}; + print(HANDLE <{$block}; +Create HTML output for coverage data found in TRACEFILE. Note that TRACEFILE +may also be a list of filenames. - foreach my $branch (keys(%{$bdata})) { - my $taken = $bdata->{$branch}; - my $new = $db1->{$line}->{$block}->{$branch}; +COMMON OPTIONS + -h, --help Print this help, then exit + --version Print version number, then exit + -v, --verbose Increase verbosity level + -q, --quiet Decrease verbosity level (e.g. to turn off + progress messages) + --debug Increase debug verbosity level + --config-file FILENAME Specify configuration file location + --rc SETTING=VALUE Override configuration file setting + --ignore-errors ERRORS Continue after ERRORS (see man page for + full list of errors and their meaning) + --keep-going Do not stop if an error occurs + --tempdir DIRNAME Write temporary and intermediate data here + --preserve Keep intermediate files for debugging - if (!defined($new) || $new eq "-") { - $new = $taken; - } elsif ($taken ne "-") { - if ($op == $BR_ADD) { - $new += $taken; - } elsif ($op == $BR_SUB) { - $new -= $taken; - $new = 0 if ($new < 0); - } - } +OPERATION + -o, --output-directory OUTDIR Write HTML output to OUTDIR + -d, --description-file DESCFILE Read test case descriptions from DESCFILE + -k, --keep-descriptions Do not remove unused test descriptions + -b, --baseline-file BASEFILE Use BASEFILE as baseline file glob match pattern + --annotate-script SCRIPT Use SCRIPT to get revision control data + --criteria-script SCRIPT Use SCRIPT to check for acceptance criteria + --version-script SCRIPT Use SCRIPT to check for compatibility of + source code and coverage data + --resolve-script SCRIPT Call script to find source file frpm path + --(no-)checksum Compare (ignore) source line checksum + --diff-file UDIFF Unified diff file UDIFF describes source + code changes between baseline and current + --new-file-as-baseline Classify new files as baseline data + --elide-path-mismatch Identify matching files if their basename + matches even though dirname does not + --date-bins day[,day,...] Use DAY number of days as upper age limit + for the corresponding date bin + -p, --prefix PREFIX Remove PREFIX from all directory names + --no-prefix Do not remove prefix from directory names + --(no-)function-coverage Enable (disable) function coverage display + --(no-)branch-coverage Enable (disable) branch coverage display + --filter FILTERS Apply FILTERS to input data (see man page + for full list of filters and their effects) + --include PATTERN Only show output for files matching PATTERN + --exclude PATTERN Skip output for files matching PATTERN + --source-directory DIR Search DIR for source files + --substitute REGEXP Change source file names according to REGEXP + --erase-functions REGEXP Exclude data for functions matching REGEXP + --omit-lines REGEXP Ignore data in lines matching REGEXP + --forget-test-names Merge data for all tests names + --synthesize-missing Generate fake source for missing files + -j, --parallel [N] Use parallel processing with at most N jobs + --memory MB Use at most MB memory in parallel processing + --profile [FILENAME] Write performance statistics to FILENAME + (default: OUTDIR/genhtml.json) + --save Write copy of input files to OUTDIR + +HTML OUTPUT + -f, --frames Use HTML frames for source code view + -t, --title TITLE Use TITLE as label for current data + --baseline-title TITLE Use TITLE as label for baseline data + --current-date DATE Use DATE as date label for current data + --baseline-date DATE Use DATE as date label for baseline data + -c, --css-file CSSFILE Use external style sheet file CSSFILE + --header-title BANNER Banner at top of each HTML page + --footer FOOTER Footer at bottom of each HTML page + --no-sourceview Do not create source code view + --num-spaces NUM Replace tabs with NUM spaces in source view + --legend Include color legend in HTML output + --html-prolog FILE Use FILE as HTML prolog for generated pages + --html-epilog FILE Use FILE as HTML epilog for generated pages + --html-extension EXT Use EXT as filename extension for pages + --html-gzip Use gzip to compress HTML + --(no-)sort Enable (disable) sorted coverage views + --demangle-cpp [OPT] Demangle C++ function names + --precision NUM Set precision of coverage rate + --missed Show miss counts as negative numbers + --dark-mode Use the dark-mode CSS + --simplified-colors Use reduced color scheme for categories + --hierarchical Generate multilevel HTML report, + matching source code directory structure + --flat Generate flat HTML report, with all files + listed on top-level page + -s, --show-details Generate detailed directory view + --show-owners [all] Show owner summary table. If optional + value provided, show all the owners, + regardless of whether they have uncovered + code or not + --show-noncode Show author in summary table even if none + of their lines are recognized as code + --show-zero-columns Keep summary columns for categories with + no entries (default: remove) + --show-navigation Include 'goto first hit/not hit' and + 'goto next hit/not hit' hyperlinks in + non-differential source code detail page + --show-proportion Show function coverage rates + --suppress-aliases Merge data for function aliases + +For more information see the genhtml man page. +END_OF_USAGE - $db1->{$line}->{$block}->{$branch} = $new; - } - } - } } - # -# brcount_db_get_found_and_hit(db) +# print_overall_rate(trace, ln_do, fn_do, br_do, mcdc_do, summary) # -# Return (br_found, br_hit) for db. +# Print overall coverage rates for the specified coverage types. # -sub brcount_db_get_found_and_hit($) +sub print_overall_rate($$$$$$) { - my ($db) = @_; - my ($br_found , $br_hit) = (0, 0); - - foreach my $line (keys(%{$db})) { - my $ldata = $db->{$line}; - - foreach my $block (keys(%{$ldata})) { - my $bdata = $ldata->{$block}; - - foreach my $branch (keys(%{$bdata})) { - my $taken = $bdata->{$branch}; - - $br_found++; - $br_hit++ if ($taken ne "-" && $taken > 0); - } - } - } - - return ($br_found, $br_hit); + my ($currentTrace, $ln_do, $fn_do, $br_do, $mcdc_do, $summary) = @_; + + # use verbosity level -1: so print unless user says "-q -q"...really quiet + info(-1, "Overall coverage rate:\n"); + my @types; + push(@types, SummaryInfo::LINE_DATA) if $ln_do; + push(@types, SummaryInfo::FUNCTION_DATA) if $fn_do; + push(@types, SummaryInfo::BRANCH_DATA) if $br_do; + push(@types, SummaryInfo::MCDC_DATA) if $mcdc_do; + + # use source file count from current - we don;t care about files + # that were deleted and are in baseline + info(-1, " source files: %d\n", scalar($currentTrace->files())); + my $width = length("source files"); + my $indent = ' '; + for my $type (@types) { + my $name = SummaryInfo::type2str($type); + my $plural = "ch" eq substr($name, -2, 2) ? "es" : "s"; + my $label = "$name$plural"; + my $fill = '.' x ($width - length($label)); + info(-1, + " $name$plural$fill: %s\n", + get_overall_line($summary->get("found", $type), + $summary->get("hit", $type), + $name)); + if ($main::show_tla) { + for my $tla (@SummaryInfo::tlaPriorityOrder) { + my $v = $summary->get($tla, $type); + next if $v == 0; + my $label = + $main::use_legacyLabels ? + $SummaryInfo::tlaToLegacySrcLabel{$tla} : + $tla; + my $f = '.' x ($width - (length($indent) + length($label))); + info(-1, " $indent$label$f: $v\n"); + } + } + } + summarize_cov_filters(); + summarize_messages(); } - -# combine_brcount(brcount1, brcount2, type, inplace) # -# If add is BR_ADD, add branch coverage data and return list brcount_added. -# If add is BR_SUB, subtract the taken values of brcount2 from brcount1 and -# return brcount_sub. If inplace is set, the result is inserted into brcount1. +# gen_html() +# +# Generate a set of HTML pages from contents of .info file INFO_FILENAME. +# Files will be written to the current directory. If provided, test case +# descriptions will be read from .tests file TEST_FILENAME and included +# in output. +# +# Die on error. # -sub combine_brcount($$$;$) +sub gen_html() { - my ($brcount1, $brcount2, $type, $inplace) = @_; - my ($db1, $db2); - - $db1 = brcount_to_db($brcount1); - $db2 = brcount_to_db($brcount2); - brcount_db_combine($db1, $db2, $type); - - return db_to_brcount($db1, $inplace ? $brcount1 : undef); + # "Read + + # Read in all specified .info files + my $now = Time::HiRes::gettimeofday(); + my $readSourceFile = ReadCurrentSource->new(); + ($current_data) = AggregateTraces::merge($readSourceFile, @info_filenames); + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{parse_current} = $then - $now; + + info("Found %d entries.\n", scalar($current_data->files())); + + # Read and apply diff data if specified - need this before we + # try to read and process the baseline.. + if ($diff_filename) { + $now = Time::HiRes::gettimeofday(); + info("Reading diff file $diff_filename\n"); + $diff_data->load($diff_filename, $current_data, + \@lcovutil::build_directory); + $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{parse_diff} = $then - $now; + } + + # Read and apply baseline data if specified + if (@base_filenames) { + $now = Time::HiRes::gettimeofday(); + my $readBaseSource = ReadBaselineSource->new($diff_data); + $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{parse_source} = $then - $now; + # Read baseline file + + $now = Time::HiRes::gettimeofday(); + ($base_data) = AggregateTraces::merge($readBaseSource, @base_filenames); + info("Found %d baseline entries.\n", scalar($base_data->files())); + $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{parse_baseline} = $then - $now; + } elsif (defined($diff_filename)) { + # no data.. + $base_data = TraceFile->new(); + } + + if ($diff_filename) { + # check for files which appear in the udiff but which dont appear + # in either the current or baseline trace data. Those may be + # mapping issues - different pathname in .info file vs udiff + if (!$diff_data->check_path_consistency($base_data, $current_data)) { + lcovutil::ignorable_error($lcovutil::ERROR_PATH, + "possible path inconsistency in baseline/current/udiff data"); + } + } + + if ($no_prefix) { + # User requested that we leave filenames alone + info("User asked not to remove filename prefix\n"); + } elsif (!@dir_prefix) { + # Get prefix common to most directories in list + my $prefix = get_prefix(1, $current_data->files()); + + if ($prefix) { + info("Found common filename prefix \"$prefix\"\n"); + $dir_prefix[0] = $prefix; + } else { + info("No common filename prefix found!\n"); + $no_prefix = 1; + } + } else { + my $msg = "Using user-specified filename prefix "; + my $dirs = $current_data->directories(); + my $i = 0; + # somewhat of a hack: the layout code doesn't react well when + # the 'prefix' directory contains source files (as opposed to + # containing a directory which contains source files). + # Rather than trying to handle that special case, just munge the + # prefix to be something we like better. + while ($i <= $#dir_prefix) { + my $p = $dir_prefix[$i]; + # remove redundant /'s + $p =~ s/$lcovutil::dirseparator+$//; + $p = substr($p, 0, -1) + if $lcovutil::dirseparator eq substr($p, -1); + while (exists($dirs->{$p}) && $p) { + $p = File::Basename::dirname($p); + } + unless ($p) { + lcovutil::info("skipping prefix $dir_prefix[$i]\n"); + splice(@dir_prefix, $i, 1); + next; + } + lcovutil::info( + "using prefix '$p' (rather than '$dir_prefix[$i]')\n") + if ($p ne $dir_prefix[$i]); + $dir_prefix[$i] = $p; + $msg .= ", " unless 0 == $i; + $msg .= "\"" . $p . "\""; + ++$i; + } + info($msg . "\n"); + } + + # Read in test description file if specified + if ($desc_filename) { + info("Reading test description file $desc_filename\n"); + %test_description = %{read_testfile($desc_filename)}; + + # Remove test descriptions which are not referenced + # from %current_data if user didn't tell us otherwise + if (!$keep_descriptions) { + remove_unused_descriptions(); + } + } + + # add quotes to args - so string concat works if there are embedded spaces, etc + foreach my $s (\@SourceFile::annotateScript, + \@CoverageCriteria::coverageCriteriaScript, + \@lcovutil::extractVersionScript) { + my $count = scalar(@$s); + next unless $count > 1; + foreach my $e (@$s) { + $e = "'$e'" if ($e =~ /\s/); + } + } + + unless ($no_html) { + info(1, "Writing .css and .png files.\n"); + write_css_file(); + write_png_files(); + } + if ($html_gzip) { + info(1, "Writing .htaccess file.\n"); + write_htaccess_file(); + } + + info("Generating output.\n"); + + my $genhtml = GenHtml->new($current_data); + + if (@SourceFile::annotateScript && + 0 == $SourceFile::annotatedFiles) { + lcovutil::ignorable_error($lcovutil::ERROR_ANNOTATE_SCRIPT, + "\"--annotate-script '" . + join(' ', @SourceFile::annotateScript) . + "'\" did not find any revision-controlled files in your sandbox" + ); + } + + # Check if there are any test case descriptions to write out + if (%test_description) { + info("Writing test case description file.\n"); + write_description_file(\%test_description, $genhtml->top()); + } + + print_overall_rate($current_data, 1, $lcovutil::func_coverage, + $lcovutil::br_coverage, $lcovutil::mcdc_coverage, + $genhtml->top()); + + return $genhtml->top(); } - # -# add_testbrdata(testbrdata1, testbrdata2) -# -# Add branch coverage data for several tests. Return reference to -# added_testbrdata. +# html_create(handle, filename) # -sub add_testbrdata($$) +sub html_create($$) { - my ($testbrdata1, $testbrdata2) = @_; - my %result; - my $testname; - - foreach $testname (keys(%{$testbrdata1})) { - if (defined($testbrdata2->{$testname})) { - my $brcount; + my $handle = $_[0]; + my $filename = File::Spec->catfile($output_directory, $_[1]); + $filename = lc($filename) if $lcovutil::case_insensitive; + + if ($html_gzip) { + open($handle, "|-", "gzip -c > $filename'") or + die("cannot open $filename for writing (gzip): $!\n"); + } else { + open($handle, ">", $filename) or + die("cannot open $filename for writing: $!\n"); + } +} - # Branch coverage data for this testname exists - # in both data sets: add - ($brcount) = combine_brcount($testbrdata1->{$testname}, - $testbrdata2->{$testname}, $BR_ADD); - $result{$testname} = $brcount; - next; - } - # Branch coverage data for this testname is unique to - # data set 1: copy - $result{$testname} = $testbrdata1->{$testname}; - } +# $ctrls = [$view_type, $sort_type, $bin_prefix] +# $perTestcaseResult = [\%line, \%func, \%branch] +#sub write_dir_page($$$$$$$;$) +sub write_dir_page +{ + my ($callback_type, $ctrls, $page_suffix, + $title, $rel_dir, $base_dir, + $trunc_dir, $summary, $perTestcaseResult) = @_; + + my $bin_prefix = $ctrls->[3]; + # Generate directory overview page including details + html_create(*HTML_HANDLE, + File::Spec->catfile( + $rel_dir, "index$bin_prefix$page_suffix.$html_ext" + )); + if (!defined($trunc_dir)) { + $trunc_dir = ""; + } + $title .= " - " if ($trunc_dir ne ""); + write_html_prolog(*HTML_HANDLE, $base_dir, "LCOV - $title$trunc_dir"); + my $activeTlaColsForType = + write_header(*HTML_HANDLE, $callback_type, $ctrls, $trunc_dir, + $rel_dir, $summary, undef, undef); + if (0 != $summary->sources()) { + write_file_table(*HTML_HANDLE, $callback_type, $base_dir, + $perTestcaseResult, + $summary, $ctrls, $activeTlaColsForType); + } else { + my $msg = + "Coverage data table is empty - no coverpoints exist in the selected subset."; + write_html(*HTML_HANDLE, < + + +
$msg
+
+END_OF_HTML + } + write_html_epilog(*HTML_HANDLE, $base_dir); + close(*HTML_HANDLE) or die("unable to close HTML handle: $!\n"); +} - # Add count data for testnames unique to data set 2 - foreach $testname (keys(%{$testbrdata2})) { - if (!defined($result{$testname})) { - $result{$testname} = $testbrdata2->{$testname}; - } - } - return \%result; +sub write_summary_pages($$$$$$$$) +{ + my ($name, $summaryType, $summary, $show_details, + $rel_dir, $base_dir, $trunc_dir, $testhashes) = @_; + + my $callback_type = $summaryType == 1 ? 'directory' : 'top'; + foreach my $c ($summary->sources()) { + my $child = $summary->get_source($c); + # filter this one out if no data + if ($child->is_empty()) { + $summary->remove_source($c); + } + } + return if $main::no_html; + if (0 == $summary->sources() && $summaryType == 1) { + # remove empty directory + push(@cleanDirectoryList, $rel_dir); + return; + } + + if ($main::show_tla) { + info('Processing ' + . + ($summaryType == 1 ? ('directory: ' . $summary->name()) : + 'top level:') . + "\n"); + my $sep = $summaryType == 1 ? $summary->name() : ''; + foreach my $t ('line', + $lcovutil::br_coverage ? 'branch' : undef, + $lcovutil::mcdc_coverage ? 'mcdc' : undef, + $lcovutil::func_coverage ? 'function' : undef + ) { + my $s = $summary->tlaSummary($t) if defined($t); + next unless $s; + info(" $t: $s\n"); + } + } + + my $start = Time::HiRes::gettimeofday(); + my @summaryBins; + if (defined($main::show_ownerBins)) { + if (0 != scalar($summary->owners( + $main::show_ownerBins, SummaryInfo::LINE_DATA + )) + || + ( $lcovutil::br_coverage && + 0 != scalar($summary->owners( + $main::show_ownerBins, SummaryInfo::BRANCH_DATA + ))) + || + ( $lcovutil::mcdc_coverage && + 0 != scalar($summary->owners( + $main::show_ownerBins, SummaryInfo::MCDC_DATA + ))) + ) { + # at least one owner will appear - so table will be referenced + push(@summaryBins, 'owner'); + } else { + lcovutil::info(1, + $summary->name() . " has no visible owners..eliding page\n"); + } + } + push(@summaryBins, 'date') if defined($main::show_dateBins); + + my $singleSource = 1 == scalar($summary->sources()); + my @dirPageCalls; + foreach my $sort_type (@main::fileview_sortlist) { + my @ctrls = ($summaryType, # 1 == 'list files' + "name", # primary key + $sort_type, "", $singleSource); + my $sort_str = $main::fileview_sortname[$sort_type]; + # 'fileview prefixes' is ('', '-date', '-owner) + foreach my $bin_prefix (@main::fileview_prefixes) { + # Generate directory overview page (without details) + # no per-testcase data in this page... + $ctrls[3] = $bin_prefix; + # need copy because we are calling multiple child processes + my @copy = @ctrls; + push(@dirPageCalls, + [\@copy, $sort_str, $test_title, $rel_dir, + $base_dir, $trunc_dir, $summary + ]); + + if ($show_details) { + # Generate directory overview page including details + push(@dirPageCalls, + [\@copy, "-detail" . $sort_str, + $test_title, $rel_dir, + $base_dir, $trunc_dir, + $summary, $testhashes + ]); + } + } + $ctrls[3] = ""; # no bin... + foreach my $primary_key (@summaryBins) { + # we don't associate function owner - so we elide 'function' columns in the + # 'owner detail' pages - and thus won't create a sort-by-function link. + # Don't generate the unreferenced page + next if $sort_type == $SORT_FUNC && $primary_key eq 'owner'; + # we elide the 'line' sort links for date/owner page if there + # is only one file + next + if $sort_type == $SORT_LINE && + $primary_key ne 'name' && + scalar($summary->sources()) < 2; + $ctrls[1] = $primary_key; + my @copy = @ctrls; + push(@dirPageCalls, + [\@copy, '-bin_' . $primary_key . $sort_str, + $test_title, $rel_dir, $base_dir, $trunc_dir, $summary + ]); + } + last if $singleSource; + } + + foreach my $params (@dirPageCalls) { + write_dir_page($callback_type, @$params); + last # only write 'index.html' - not the sorted versions + if ($summary->is_empty()); + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{html}{$name} = $end - $start; } +sub write_function_page($$$$$$$$$$$$$$$) +{ + # Generate function table for this file + my ($fileCovInfo, $base_dir, $rel_dir, $trunc_dir, + $base_name, $title, $sumcount, $funcdata, + $testfncdata, $sumbrcount, $testbrdata, $mcdc, + $testcase_mcdc, $sort_type, $summary) = @_; + # $fileCovInfo is array [function hash, line hash, branch hash] + my $filename; + if ($sort_type == $main::SORT_FILE) { + $filename = File::Spec->catfile($rel_dir, "$base_name.func.$html_ext"); + } elsif ($sort_type == $main::SORT_LINE) { + # by declaration line number + $filename = + File::Spec->catfile($rel_dir, "$base_name.func-c.$html_ext"); + } elsif ($sort_type == $main::SORT_MISSING_LINE) { + # by number of un-exercised lines + $filename = + File::Spec->catfile($rel_dir, "$base_name.func-l.$html_ext"); + } elsif ($sort_type == $main::SORT_MISSING_MCDC) { + # by number of un-exercised MC/DC expressions + return unless %{$fileCovInfo->[3]}; + $filename = + File::Spec->catfile($rel_dir, "$base_name.func-m.$html_ext"); + } else { + die("Unexpected sort $sort_type") + unless ($sort_type == $main::SORT_MISSING_BRANCH); + # don't emit page if there are no branches in the file + return unless %{$fileCovInfo->[2]}; + # by declaration line number + $filename = + File::Spec->catfile($rel_dir, "$base_name.func-b.$html_ext"); + } + html_create(*HTML_HANDLE, $filename); + my $pagetitle = "LCOV - $title - " . + File::Spec->catfile($trunc_dir, $base_name) . " - functions"; + write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle); + write_header(*HTML_HANDLE, + 'file', # function table always written from 'file' level + [4, 'name', $sort_type,], + File::Spec->catfile($trunc_dir, $base_name), + File::Spec->catfile($rel_dir, $base_name), + $summary, undef, $funcdata->[0]); + write_function_table(*HTML_HANDLE, $fileCovInfo, + "$base_name.gcov.$html_ext", $sumcount, + $funcdata, $testfncdata, + $sumbrcount, $testbrdata, + $mcdc, $testcase_mcdc, + $base_name, $base_dir, + $sort_type); + write_html_epilog(*HTML_HANDLE, $base_dir, 1); + close(*HTML_HANDLE) or die("unable to close HTML handle: $!\n"); +} # -# combine_info_entries(entry_ref1, entry_ref2, filename) -# -# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2. -# Return reference to resulting hash. +# process_file(parent_dir_summary, trunc_dir, rel_dir, filename) # -sub combine_info_entries($$$) +sub process_file($$$$$) { - my $entry1 = $_[0]; # Reference to hash containing first entry - my $testdata1; - my $sumcount1; - my $funcdata1; - my $checkdata1; - my $testfncdata1; - my $sumfnccount1; - my $testbrdata1; - my $sumbrcount1; - - my $entry2 = $_[1]; # Reference to hash containing second entry - my $testdata2; - my $sumcount2; - my $funcdata2; - my $checkdata2; - my $testfncdata2; - my $sumfnccount2; - my $testbrdata2; - my $sumbrcount2; - - my %result; # Hash containing combined entry - my %result_testdata; - my $result_sumcount = {}; - my $result_funcdata; - my $result_testfncdata; - my $result_sumfnccount; - my $result_testbrdata; - my $result_sumbrcount; - my $lines_found; - my $lines_hit; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - - my $testname; - my $filename = $_[2]; - - # Retrieve data - ($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1, - $sumfnccount1, $testbrdata1, $sumbrcount1) = get_info_entry($entry1); - ($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2, - $sumfnccount2, $testbrdata2, $sumbrcount2) = get_info_entry($entry2); - - # Merge checksums - $checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename); - - # Combine funcdata - $result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename); - - # Combine function call count data - $result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2); - ($result_sumfnccount, $fn_found, $fn_hit) = - add_fnccount($sumfnccount1, $sumfnccount2); - - # Combine branch coverage data - $result_testbrdata = add_testbrdata($testbrdata1, $testbrdata2); - ($result_sumbrcount, $br_found, $br_hit) = - combine_brcount($sumbrcount1, $sumbrcount2, $BR_ADD); - - # Combine testdata - foreach $testname (keys(%{$testdata1})) - { - if (defined($testdata2->{$testname})) - { - # testname is present in both entries, requires - # combination - ($result_testdata{$testname}) = - add_counts($testdata1->{$testname}, - $testdata2->{$testname}); - } - else - { - # testname only present in entry1, add to result - $result_testdata{$testname} = $testdata1->{$testname}; - } - - # update sum count hash - ($result_sumcount, $lines_found, $lines_hit) = - add_counts($result_sumcount, - $result_testdata{$testname}); - } - - foreach $testname (keys(%{$testdata2})) - { - # Skip testnames already covered by previous iteration - if (defined($testdata1->{$testname})) { next; } - - # testname only present in entry2, add to result hash - $result_testdata{$testname} = $testdata2->{$testname}; - - # update sum count hash - ($result_sumcount, $lines_found, $lines_hit) = - add_counts($result_sumcount, - $result_testdata{$testname}); - } - - # Calculate resulting sumcount - - # Store result - set_info_entry(\%result, \%result_testdata, $result_sumcount, - $result_funcdata, $checkdata1, $result_testfncdata, - $result_sumfnccount, $result_testbrdata, - $result_sumbrcount, $lines_found, $lines_hit, - $fn_found, $fn_hit, $br_found, $br_hit); - - return(\%result); -} - - -# -# combine_info_files(info_ref1, info_ref2) -# -# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return -# reference to resulting hash. -# - -sub combine_info_files($$) -{ - my %hash1 = %{$_[0]}; - my %hash2 = %{$_[1]}; - my $filename; - - foreach $filename (keys(%hash2)) - { - if ($hash1{$filename}) - { - # Entry already exists in hash1, combine them - $hash1{$filename} = - combine_info_entries($hash1{$filename}, - $hash2{$filename}, - $filename); - } - else - { - # Entry is unique in both hashes, simply add to - # resulting hash - $hash1{$filename} = $hash2{$filename}; - } - } - - return(\%hash1); + my ($fileSummary, $parent_dir_summary, $trunc_dir, $rel_dir, $filename) = + @_; + my $trunc_name = apply_prefix($filename, @dir_prefix); + info("Processing file $trunc_name" + . + (($main::diff_filename && $diff_data->containsFile($filename)) ? + ' (source code changed)' : + '') . + "\n"); + + my $base_name = basename($filename); + my $base_dir = get_relative_base_path($rel_dir); + my @source; + my $pagetitle; + local *HTML_HANDLE; + + my $fileData = $current_data->data($filename); # TraceInfo struct + my ($testdata, $sumcount, $funcdata, + $checkdata, $testfncdata, $testbrdata, + $sumbrcount, $mcdc_summary, $testcase_mcdc) = $fileData->get_info(); + + my ($lines_found, $lines_hit, $br_found, $br_hit, + $mcdc_found, $mcdc_hit, $fn_found, $fn_hit) + = ($fileData->found(), $fileData->hit(), + $fileData->branch_found(), $fileData->branch_hit(), + $fileData->mcdc_found(), $fileData->mcdc_hit(), + $fileData->function_found(), $fileData->function_hit()); + + $fileSummary->lines_found($lines_found); + $fileSummary->lines_hit($lines_hit); + $fileSummary->function_found($fn_found); + $fileSummary->function_hit($fn_hit); + $fileSummary->branch_found($br_found); + $fileSummary->branch_hit($br_hit); + $fileSummary->mcdc_found($mcdc_found); + $fileSummary->mcdc_hit($mcdc_hit); + + # handle case that file was moved between baseline and current + my $baseline_filename = $diff_data->baseline_file_name($filename); + # when looking up the baseline file, handle the case that the + # pathname does not match exactly - see comment in TraceFile::data + my $fileBase = $base_data->data($baseline_filename, 1) + if defined($base_data); + my $fileCurrent = $current_data->data($filename); + # build coverage differential categories + my $now = Time::HiRes::gettimeofday(); + my $fileCovInfo = + FileCoverageInfo->new($filename, + $fileBase, + $fileCurrent, + $diff_data, + defined($main::verboseScopeRegexp) && + $filename =~ m/$main::verboseScopeRegexp/); + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{categorize}{$filename} = $then - $now; + + my $r = " lines=$lines_found hit=$lines_hit"; + $r .= " functions=$fn_found hit=$fn_hit" + if $lcovutil::func_coverage && $fn_found != 0; + $r .= " branches=$br_found hit=$br_hit" + if $lcovutil::br_coverage && $br_found != 0; + $r .= " MC/DC=$mcdc_found hit=$mcdc_hit" + if $lcovutil::mcdc_coverage && $mcdc_found != 0; + info($r . "\n"); + + my $fileHasNoBaselineInfo = $main::treatNewFileAsBaseline && + (!defined($fileBase) || + ($fileBase->is_empty() && @main::base_filenames)); + # if this file is older than the baseline and there is no associated + # baseline data - then it appears to have been added to the build + # recently + # We want to treat the code as "CBC" or "UBC" (not "GIC" and "UIC") + # because we only just turned section "on" - and we don't want the + # coverage ratchet to fail the build if UIC is nonzero + + # NOTE: SourceFile constructor modifies some input data: + # - $fileSummary struct is also modified: update total counts in + # each bucket, counts in each date range + # - $fileCovInfo: change GIC->CBC, UIC->UBC if $fineNotInBaseline and + # source code is older than baseline file + my $srcfile = SourceFile->new($filename, $fileSummary, $fileCovInfo, + $fileHasNoBaselineInfo); + my $endSrc = Time::HiRes::gettimeofday(); + $lcovutil::profileData{source}{$filename} = $endSrc - $then; + + if (@main::base_filenames) { + # summarize the bin data if we had baseline data for comparison + foreach my $t (['line', $lines_found], + ['branch', $lcovutil::br_coverage ? $br_found : 0], + ['mcdc', $lcovutil::mcdc_coverage ? $mcdc_found : 0], + ['function', $lcovutil::func_coverage ? $fn_found : 0] + ) { + next if 0 == $t->[1]; + info(' ' . + $t->[0] . ': ' . $fileSummary->tlaSummary($t->[0]) . "\n"); + } + } + + if ($srcfile->is_empty()) { + return; + } + # somewhat of a hack: we are ultimately going to merge $fileSummary + # (the data for this particular file) into $parent_dir (the data + # for the parent directory) - but we need to do that in the caller + # (because we are building $fileSummary in a child process that we are + # going to pass back. But we also use the parent and its name + # in HTML generation... + # we clear this setting before we return the generated summary + $fileSummary->setParent($parent_dir_summary); + + my $from = Time::HiRes::gettimeofday(); + # Return after this point in case user asked us not to generate + # source code view + if (!$no_sourceview) { + # Generate source code view for this file + my $differentialFunctionMap; + if ($lcovutil::func_coverage) { + $differentialFunctionMap = $fileCovInfo->functionMap(); + if ($SummaryInfo::selectCallback) { + # prune hash to remove functions which are outside the + # selected region + my $lineCovMap = $fileCovInfo->lineMap(); + my %tempMap; + while (my ($f, $fn) = each(%$differentialFunctionMap)) { + my $line = $fn->line(); + $tempMap{$f} = $fn if exists($lineCovMap->{$line}); + } + $differentialFunctionMap = \%tempMap; + } + } + html_create(*HTML_HANDLE, + File::Spec->catfile($rel_dir, "$base_name.gcov.$html_ext")); + $pagetitle = "LCOV - $test_title - " . + File::Spec->catfile($trunc_dir, $base_name); + write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle); + write_header(*HTML_HANDLE, + 'file', + [2, 'name', 0], + File::Spec->catfile($trunc_dir, $base_name), + File::Spec->catfile($rel_dir, $base_name), + $fileSummary, + $srcfile, + $differentialFunctionMap); + + @source = write_source(*HTML_HANDLE, $srcfile, $sumcount, + $checkdata, $fileCovInfo, $funcdata, + $sumbrcount, $mcdc_summary); + + write_html_epilog(*HTML_HANDLE, $base_dir, 1); + close(*HTML_HANDLE) or die("unable to close HTML handle: $!\n"); + + if ($lcovutil::func_coverage) { + # Create function tables + my $lineCovMap = $fileCovInfo->lineMap(); + # simply map between function leader name and differential data + # compute number of hit/missed coverpoints in each function + my %lineCov; + my %branchCov; + my %mcdcCov; + if ($main::show_functionProportions) { + while (my ($name, $funcEntry) = each(%$differentialFunctionMap)) + { + my $lineData = $funcEntry->findMyLines($sumcount); + if (defined($lineData)) { + my $found = 0; + my $hit = 0; + foreach my $d (@$lineData) { + ++$found; + ++$hit if (0 != $d->[1]); + } + $lineCov{$name} = [$found, $hit]; + } + if ($lcovutil::br_coverage) { + my $branchData = + $funcEntry->findMyBranches($sumbrcount); + if (defined($branchData) && + 0 != scalar(@$branchData)) { + # there are branches here.. + my $found = 0; + my $hit = 0; + foreach my $branch (@$branchData) { + my ($f, $h) = $branch->totals(); + $found += $f; + $hit += $h; + } + $branchCov{$name} = [$found, $hit]; + } + } + if ($lcovutil::mcdc_coverage) { + my $mcdcData = $funcEntry->findMyMcdc($mcdc_summary); + if (defined($mcdcData) && + 0 != scalar(@$mcdcData)) { + # there are MC/DC expressions here.. + my $found = 0; + my $hit = 0; + foreach my $mcdc (@$mcdcData) { + my ($f, $h) = $mcdc->totals(); + $found += $f; + $hit += $h; + } + $mcdcCov{$name} = [$found, $hit]; + } + } + } + } + if (%$differentialFunctionMap) { + foreach my $sort_type (@funcview_sortlist) { + write_function_page([$differentialFunctionMap, + \%lineCov, \%branchCov, \%mcdcCov, + ], + $base_dir, + $rel_dir, + $trunc_dir, + $base_name, + $test_title, + $sumcount, + $funcdata, + $testfncdata, + $sumbrcount, + $testbrdata, + $mcdc_summary, + $testcase_mcdc, + $sort_type, + $fileSummary); + } + } + } + + # Additional files are needed in case of frame output + if ($frames) { + # Create overview png file + my $simplified = defined($main::show_simplifiedColors) && + $main::show_simplifiedColors; + my $png = File::Spec->catfile($rel_dir, "$base_name.gcov.png"); + $png = lc($png) if $lcovutil::case_insensitive; + gen_png(File::Spec->catfile($output_directory, $png), + $main::show_tla && !$simplified, + $main::dark_mode, + $overview_width, + $tab_size, + @source); + + # Create frameset page + html_create(*HTML_HANDLE, + File::Spec->catfile( + $rel_dir, "$base_name.gcov.frameset.$html_ext" + )); + write_frameset(*HTML_HANDLE, $base_dir, $base_name, $pagetitle); + close(*HTML_HANDLE) or die("unable to close HTML handle: $!\n"); + + # Write overview frame + html_create(*HTML_HANDLE, + File::Spec->catfile( + $rel_dir, "$base_name.gcov.overview.$html_ext" + )); + write_overview(*HTML_HANDLE, $base_dir, $base_name, + $pagetitle, scalar(@source)); + close(*HTML_HANDLE) or die("unable to close HTML handle: $!\n"); + } + } + my $to = Time::HiRes::gettimeofday(); + $lcovutil::profileData{html}{$filename} = $to - $from; + return ($testdata, $testfncdata, $testbrdata, $testcase_mcdc); } +sub compute_title($$) +{ + my ($patterns, $info_files) = @_; + + my $title; + if (1 == scalar(@$info_files)) { + # just one coverage DB file + $title = basename($info_files->[0]); + } elsif (1 == scalar(@$patterns)) { + # just one pattern... + $title = $patterns->[0]; + $title = substr($title, length($main::cwd) + 1) + if (File::Spec->file_name_is_absolute($title) && + length($main::cwd) < length($title) && + $main::cwd eq substr($title, 0, length($main::cwd))); + } else { + $title = scalar(@$info_files) . ' coverage DB files'; + } + return $title; +} # # get_prefix(min_dir, filename_list) @@ -2381,78 +8272,76 @@ sub combine_info_files($$) sub get_prefix($@) { - my ($min_dir, @filename_list) = @_; - my %prefix; # mapping: prefix -> sum of lengths - my $current; # Temporary iteration variable - - # Find list of prefixes - foreach (@filename_list) - { - # Need explicit assignment to get a copy of $_ so that - # shortening the contained prefix does not affect the list - $current = $_; - while ($current = shorten_prefix($current)) - { - $current .= "/"; - - # Skip rest if the remaining prefix has already been - # added to hash - if (exists($prefix{$current})) { last; } - - # Initialize with 0 - $prefix{$current}="0"; - } - - } - - # Remove all prefixes that would cause filenames to have less than - # the minimum number of parent directories - foreach my $filename (@filename_list) { - my $dir = dirname($filename); - - for (my $i = 0; $i < $min_dir; $i++) { - delete($prefix{$dir."/"}); - $dir = shorten_prefix($dir); - } - } - - # Check if any prefix remains - return undef if (!%prefix); - - # Calculate sum of lengths for all prefixes - foreach $current (keys(%prefix)) - { - foreach (@filename_list) - { - # Add original length - $prefix{$current} += length($_); - - # Check whether prefix matches - if (substr($_, 0, length($current)) eq $current) - { - # Subtract prefix length for this filename - $prefix{$current} -= length($current); - } - } - } - - # Find and return prefix with minimal sum - $current = (keys(%prefix))[0]; - - foreach (keys(%prefix)) - { - if ($prefix{$_} < $prefix{$current}) - { - $current = $_; - } - } - - $current =~ s/\/$//; - - return($current); + my ($min_dir, @filename_list) = @_; + my %prefix; # mapping: prefix -> sum of lengths + my $current; # Temporary iteration variable + + # Find list of prefixes + my @munged; + foreach (@filename_list) { + # Need explicit assignment to get a copy of $_ so that + # shortening the contained prefix does not affect the list + my $current = ReadCurrentSource::resolve_path($_); + my ($vol, $parentDir, $file) = File::Spec->splitpath($current); + if (!File::Spec->file_name_is_absolute($current)) { + if ($parentDir) { + $parentDir = File::Spec->catfile($main::cwd, $parentDir); + } else { + $parentDir = $main::cwd; + } + $current = File::Spec->catfile($parentDir, $file); + } + push(@munged, $current); + while ($current = shorten_prefix($current)) { + # Skip rest if the remaining prefix has already been + # added to hash + if (exists($prefix{$current})) { last; } + + # Initialize with 0 + $prefix{$current} = "0"; + } + + } + + # Remove all prefixes that would cause filenames to have less than + # the minimum number of parent directories + foreach my $filename (@munged) { + my $dir = dirname($filename); + + for (my $i = 0; $i < $min_dir; $i++) { + delete($prefix{$dir}); + $dir = shorten_prefix($dir); + } + } + + # Check if any prefix remains + return undef if (!%prefix); + + # Calculate sum of lengths for all prefixes + foreach $current (keys(%prefix)) { + foreach (@munged) { + # Add original length + $prefix{$current} += length($_); + + # Check whether prefix matches + if (substr($_, 0, length($current)) eq $current) { + # Subtract prefix length for this filename + $prefix{$current} -= length($current); + } + } + } + + # Find and return prefix with minimal sum + $current = (keys(%prefix))[0]; + + foreach (keys(%prefix)) { + if ($prefix{$_} < $prefix{$current}) { + $current = $_; + } + } + return ($current); } - # # shorten_prefix(prefix) # @@ -2461,33 +8350,11 @@ sub get_prefix($@) sub shorten_prefix($) { - my @list = split("/", $_[0]); - - pop(@list); - return join("/", @list); -} - - - -# -# get_dir_list(filename_list) -# -# Return sorted list of directories for each entry in given FILENAME_LIST. -# - -sub get_dir_list(@) -{ - my %result; + my ($vol, $dir, $name) = File::Spec->splitpath($_[0]); - foreach (@_) - { - $result{shorten_prefix($_)} = ""; - } - - return(sort(keys(%result))); + return File::Spec->catdir($vol, $dir); } - # # get_relative_base_path(subdirectory) # @@ -2499,25 +8366,20 @@ sub get_dir_list(@) sub get_relative_base_path($) { - my $result = ""; - my $index; - - # Make an empty directory path a special case - if (!$_[0]) { return(""); } - - # Count number of /s in path - $index = ($_[0] =~ s/\//\//g); - - # Add a ../ to $result for each / in the directory path + 1 - for (; $index>=0; $index--) - { - $result .= "../"; - } - - return $result; + # Make an empty directory path a special case + if (!$_[0]) { return (""); } + + # Count number of /s in path + my $index = ($_[0] =~ s/$lcovutil::dirseparator/$lcovutil::dirseparator/g); + + # Add a ../ to $result for each / in the directory path + 1 + my $result = ""; + for (; $index >= 0; $index--) { + $result .= "..$lcovutil::dirseparator"; + } + return $result; } - # # read_testfile(test_filename) # @@ -2535,61 +8397,62 @@ sub get_relative_base_path($) sub read_testfile($) { - my %result; - my $test_name; - my $changed_testname; - local *TEST_HANDLE; - - open(TEST_HANDLE, "<", $_[0]) - or die("ERROR: cannot open $_[0]!\n"); - - while () - { - chomp($_); - - # Match lines beginning with TN: - if (/^TN:\s+(.*?)\s*$/) - { - # Store name for later use - $test_name = $1; - if ($test_name =~ s/\W/_/g) - { - $changed_testname = 1; - } - } - - # Match lines beginning with TD: - if (/^TD:\s+(.*?)\s*$/) - { - if (!defined($test_name)) { - die("ERROR: Found test description without prior test name in $_[0]:$.\n"); - } - # Check for empty line - if ($1) - { - # Add description to hash - $result{$test_name} .= " $1"; - } - else - { - # Add empty line - $result{$test_name} .= "\n\n"; - } - } - } - - close(TEST_HANDLE); - - if ($changed_testname) - { - warn("WARNING: invalid characters removed from testname in ". - "descriptions file $_[0]\n"); - } - - return \%result; + my $file = shift; + my %result; + my $test_name; + my $changed_testname; + local *TEST_HANDLE; + + open(TEST_HANDLE, "<", $file) or + die("cannot open description file '$file': $!\n"); + + while () { + chomp($_); + s/\r//g; + # Match lines beginning with TN: + next if /^#/; # skip comment + if (/^TN:\s*(.*?)\s*$/) { + # Store name for later use + $test_name = $1; + if ($test_name =~ s/\W/_/g) { + $changed_testname = 1; + } + } + + # Match lines beginning with TD: + if (/^TD:\s*(.*?)\s*$/) { + if (!defined($test_name)) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$file\":$.: Found test description without prior test name." + ); + next; + } + # Check for empty line + if ($1) { + # Add description to hash + $result{$test_name} .= " $1"; + } else { + # Add empty line + $result{$test_name} .= "\n\n"; + } + } + } + + close(TEST_HANDLE) or die("unable to close HTML file: $!\n"); + if (!%result) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "no test descriptions found in '$file'."); + } + + if ($changed_testname) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "invalid characters removed from testname in descriptions file '$file'." + ); + } + + return \%result; } - # # escape_html(STRING) # @@ -2599,77 +8462,53 @@ sub read_testfile($) sub escape_html($) { - my $string = $_[0]; + my $string = $_[0]; - if (!$string) { return ""; } + if (!$string) { return ""; } - $string =~ s/&/&/g; # & -> & - $string =~ s/ < - $string =~ s/>/>/g; # > -> > - $string =~ s/\"/"/g; # " -> " + $string =~ s/&/&/g; # & -> & + $string =~ s/ < + $string =~ s/>/>/g; # > -> > + $string =~ s/\"/"/g; # " -> " - while ($string =~ /^([^\t]*)(\t)/) - { - my $replacement = " "x($tab_size - (length($1) % $tab_size)); - $string =~ s/^([^\t]*)(\t)/$1$replacement/; - } + while ($string =~ /^([^\t]*)(\t)/) { + my $replacement = " " x ($tab_size - (length($1) % $tab_size)); + $string =~ s/^([^\t]*)(\t)/$1$replacement/; + } - $string =~ s/\n/
/g; # \n ->
+ $string =~ s/\n/
/g; # \n ->
- return $string; + return $string; } - # # get_date_string() # # Return the current date in the form: yyyy-mm-dd # -sub get_date_string() -{ - my $year; - my $month; - my $day; - my $hour; - my $min; - my $sec; - my @timeresult; - - if (defined $ENV{'SOURCE_DATE_EPOCH'}) - { - @timeresult = gmtime($ENV{'SOURCE_DATE_EPOCH'}); - } - else - { - @timeresult = localtime(); - } - ($year, $month, $day, $hour, $min, $sec) = - @timeresult[5, 4, 3, 2, 1, 0]; - - return sprintf("%d-%02d-%02d %02d:%02d:%02d", $year+1900, $month+1, - $day, $hour, $min, $sec); -} - - -# -# create_sub_dir(dir_name) -# -# Create subdirectory DIR_NAME if it does not already exist, including all its -# parent directories. -# -# Die on error. -# - -sub create_sub_dir($) +sub get_date_string($) { - my ($dir) = @_; - - system("mkdir", "-p" ,$dir) - and die("ERROR: cannot create directory $dir!\n"); + my $time = $_[0]; + my @timeresult; + + if (!$time) { + if (defined $ENV{'SOURCE_DATE_EPOCH'}) { + @timeresult = gmtime($ENV{'SOURCE_DATE_EPOCH'}); + } else { + @timeresult = localtime(); + } + } else { + @timeresult = localtime($time); + } + my ($year, $month, $day, $hour, $min, $sec) = @timeresult[5, 4, 3, 2, 1, 0]; + + return + sprintf("%d-%02d-%02d %02d:%02d:%02d", + $year + 1900, + $month + 1, $day, $hour, $min, $sec); } - # # write_description_file(descriptions, overall_found, overall_hit, # total_fn_found, total_fn_hit, total_br_found, @@ -2683,41 +8522,34 @@ sub create_sub_dir($) # Die on error. # -sub write_description_file($$$$$$$) +sub write_description_file($$) { - my %description = %{$_[0]}; - my $found = $_[1]; - my $hit = $_[2]; - my $fn_found = $_[3]; - my $fn_hit = $_[4]; - my $br_found = $_[5]; - my $br_hit = $_[6]; - my $test_name; - local *HTML_HANDLE; - - html_create(*HTML_HANDLE,"descriptions.$html_ext"); - write_html_prolog(*HTML_HANDLE, "", "LCOV - test case descriptions"); - write_header(*HTML_HANDLE, 3, "", "", $found, $hit, $fn_found, - $fn_hit, $br_found, $br_hit, 0); + my %description = %{$_[0]}; + my $summary = $_[1]; + my $test_name; + local *HTML_HANDLE; - write_test_table_prolog(*HTML_HANDLE, - "Test case descriptions - alphabetical list"); + html_create(*HTML_HANDLE, "descriptions.$html_ext"); - foreach $test_name (sort(keys(%description))) - { - my $desc = $description{$test_name}; + write_html_prolog(*HTML_HANDLE, "", "LCOV - test case descriptions"); + write_header(*HTML_HANDLE, 'top', [3, 'name', 0], + "", "", $summary, undef, undef); - $desc = escape_html($desc) if (!$rc_desc_html); - write_test_table_entry(*HTML_HANDLE, $test_name, $desc); - } + write_test_table_prolog(*HTML_HANDLE, + "Test case descriptions - alphabetical list"); - write_test_table_epilog(*HTML_HANDLE); - write_html_epilog(*HTML_HANDLE, ""); + foreach $test_name (sort(keys(%description))) { + my $desc = $description{$test_name}; - close(*HTML_HANDLE); -} + $desc = escape_html($desc) if (!$rc_desc_html); + write_test_table_entry(*HTML_HANDLE, $test_name, $desc); + } + write_test_table_epilog(*HTML_HANDLE); + write_html_epilog(*HTML_HANDLE, ""); + close(*HTML_HANDLE) or die("unable to close HTML handle: $!\n"); +} # # write_png_files() @@ -2730,137 +8562,210 @@ sub write_description_file($$$$$$$) sub write_png_files() { - my %data; - local *PNG_HANDLE; - - $data{"ruby.png"} = - [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, - 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, - 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, - 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x18, 0x10, 0x5d, 0x57, - 0x34, 0x6e, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, - 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, - 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, - 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, - 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0x35, 0x2f, - 0x00, 0x00, 0x00, 0xd0, 0x33, 0x9a, 0x9d, 0x00, 0x00, 0x00, - 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, - 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, - 0x82]; - $data{"amber.png"} = - [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, - 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, - 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, - 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x28, 0x04, 0x98, 0xcb, - 0xd6, 0xe0, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, - 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, - 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, - 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, - 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xe0, 0x50, - 0x00, 0x00, 0x00, 0xa2, 0x7a, 0xda, 0x7e, 0x00, 0x00, 0x00, - 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, - 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, - 0x82]; - $data{"emerald.png"} = - [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, - 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, - 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, - 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x22, 0x2b, 0xc9, 0xf5, - 0x03, 0x33, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, - 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, - 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, - 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, - 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0x1b, 0xea, 0x59, - 0x0a, 0x0a, 0x0a, 0x0f, 0xba, 0x50, 0x83, 0x00, 0x00, 0x00, - 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, - 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, - 0x82]; - $data{"snow.png"} = - [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, - 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, - 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, - 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x1e, 0x1d, 0x75, 0xbc, - 0xef, 0x55, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, - 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, - 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, - 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, - 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, - 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, - 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, - 0x82]; - $data{"glass.png"} = - [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, - 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, - 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, - 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, - 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, - 0x01, 0x74, 0x52, 0x4e, 0x53, 0x00, 0x40, 0xe6, 0xd8, 0x66, - 0x00, 0x00, 0x00, 0x01, 0x62, 0x4b, 0x47, 0x44, 0x00, 0x88, - 0x05, 0x1d, 0x48, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, - 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, - 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, - 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x13, 0x0f, 0x08, 0x19, 0xc4, - 0x40, 0x56, 0x10, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, - 0x54, 0x78, 0x9c, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x01, 0x48, 0xaf, 0xa4, 0x71, 0x00, 0x00, 0x00, 0x00, 0x49, - 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82]; - $data{"updown.png"} = - [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, - 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x0a, - 0x00, 0x00, 0x00, 0x0e, 0x08, 0x06, 0x00, 0x00, 0x00, 0x16, - 0xa3, 0x8d, 0xab, 0x00, 0x00, 0x00, 0x3c, 0x49, 0x44, 0x41, - 0x54, 0x28, 0xcf, 0x63, 0x60, 0x40, 0x03, 0xff, 0xa1, 0x00, - 0x5d, 0x9c, 0x11, 0x5d, 0x11, 0x8a, 0x24, 0x23, 0x23, 0x23, - 0x86, 0x42, 0x6c, 0xa6, 0x20, 0x2b, 0x66, 0xc4, 0xa7, 0x08, - 0x59, 0x31, 0x23, 0x21, 0x45, 0x30, 0xc0, 0xc4, 0x30, 0x60, - 0x80, 0xfa, 0x6e, 0x24, 0x3e, 0x78, 0x48, 0x0a, 0x70, 0x62, - 0xa2, 0x90, 0x81, 0xd8, 0x44, 0x01, 0x00, 0xe9, 0x5c, 0x2f, - 0xf5, 0xe2, 0x9d, 0x0f, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x49, - 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82] if ($sort); - foreach (keys(%data)) - { - open(PNG_HANDLE, ">", $_) - or die("ERROR: cannot create $_!\n"); - binmode(PNG_HANDLE); - print(PNG_HANDLE map(chr,@{$data{$_}})); - close(PNG_HANDLE); - } + my %data; + local *PNG_HANDLE; + + $data{"ruby.png"} = + $dark_mode ? + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, + 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, + 0x45, 0x80, 0x1b, 0x18, 0x00, 0x00, 0x00, 0x39, 0x4a, 0x74, + 0xf4, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x08, + 0xd7, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xe2, + 0x21, 0xbc, 0x33, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, + 0x44, 0xae, 0x42, 0x60, 0x82 + ] : + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, + 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, + 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x18, 0x10, 0x5d, 0x57, + 0x34, 0x6e, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, + 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, + 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, + 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, + 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0x35, 0x2f, + 0x00, 0x00, 0x00, 0xd0, 0x33, 0x9a, 0x9d, 0x00, 0x00, 0x00, + 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, + 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, + 0x82 + ]; + + $data{"amber.png"} = + $dark_mode ? + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, + 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, + 0x45, 0x99, 0x86, 0x30, 0x00, 0x00, 0x00, 0x51, 0x83, 0x43, + 0xd7, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x08, + 0xd7, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xe2, + 0x21, 0xbc, 0x33, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, + 0x44, 0xae, 0x42, 0x60, 0x82 + ] : + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, + 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, + 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x28, 0x04, 0x98, 0xcb, + 0xd6, 0xe0, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, + 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, + 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, + 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, + 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xe0, 0x50, + 0x00, 0x00, 0x00, 0xa2, 0x7a, 0xda, 0x7e, 0x00, 0x00, 0x00, + 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, + 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, + 0x82 + ]; + $data{"emerald.png"} = + $dark_mode ? + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, + 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, + 0x45, 0x00, 0x66, 0x00, 0x0a, 0x0a, 0x0a, 0xa4, 0xb8, 0xbf, + 0x60, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x08, + 0xd7, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xe2, + 0x21, 0xbc, 0x33, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, + 0x44, 0xae, 0x42, 0x60, 0x82 + ] : + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, + 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, + 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x22, 0x2b, 0xc9, 0xf5, + 0x03, 0x33, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, + 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, + 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, + 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, + 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0x1b, 0xea, 0x59, + 0x0a, 0x0a, 0x0a, 0x0f, 0xba, 0x50, 0x83, 0x00, 0x00, 0x00, + 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, + 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, + 0x82 + ]; + $data{"snow.png"} = + $dark_mode ? + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, + 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, + 0x45, 0xdd, 0xdd, 0xdd, 0x00, 0x00, 0x00, 0xae, 0x9c, 0x6c, + 0x92, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x08, + 0xd7, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xe2, + 0x21, 0xbc, 0x33, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, + 0x44, 0xae, 0x42, 0x60, 0x82 + ] : + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, + 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, + 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x1e, 0x1d, 0x75, 0xbc, + 0xef, 0x55, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, + 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, + 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, + 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, + 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, + 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, + 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, + 0x82 + ]; + + $data{"glass.png"} = [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, + 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 0xdb, 0x56, + 0xca, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, + 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, + 0x05, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, + 0x45, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x55, + 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, 0x01, 0x74, + 0x52, 0x4e, 0x53, 0x00, 0x40, 0xe6, 0xd8, 0x66, + 0x00, 0x00, 0x00, 0x01, 0x62, 0x4b, 0x47, 0x44, + 0x00, 0x88, 0x05, 0x1d, 0x48, 0x00, 0x00, 0x00, + 0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0b, + 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 0xdd, + 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, + 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x13, 0x0f, 0x08, + 0x19, 0xc4, 0x40, 0x56, 0x10, 0x00, 0x00, 0x00, + 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0x9c, 0x63, + 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0x48, + 0xaf, 0xa4, 0x71, 0x00, 0x00, 0x00, 0x00, 0x49, + 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82 + ]; + + if ($sort_tables) { + $data{"updown.png"} = + $dark_mode ? + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x0e, 0x08, 0x06, 0x00, 0x00, 0x00, 0x16, + 0xa3, 0x8d, 0xab, 0x00, 0x00, 0x00, 0x43, 0x49, 0x44, 0x41, + 0x54, 0x28, 0xcf, 0x63, 0x60, 0x40, 0x03, 0x77, 0xef, 0xde, + 0xfd, 0x7f, 0xf7, 0xee, 0xdd, 0xff, 0xe8, 0xe2, 0x8c, 0xe8, + 0x8a, 0x90, 0xf9, 0xca, 0xca, 0xca, 0x8c, 0x18, 0x0a, 0xb1, + 0x99, 0x82, 0xac, 0x98, 0x11, 0x9f, 0x22, 0x64, 0xc5, 0x8c, + 0x84, 0x14, 0xc1, 0x00, 0x13, 0xc3, 0x80, 0x01, 0xea, 0xbb, + 0x91, 0xf8, 0xe0, 0x21, 0x29, 0xc0, 0x89, 0x89, 0x42, 0x06, + 0x62, 0x13, 0x05, 0x00, 0xe1, 0xd3, 0x2d, 0x91, 0x93, 0x15, + 0xa4, 0xb2, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, + 0xae, 0x42, 0x60, 0x82 + ] : + [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, + 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x0e, 0x08, 0x06, 0x00, 0x00, 0x00, 0x16, + 0xa3, 0x8d, 0xab, 0x00, 0x00, 0x00, 0x3c, 0x49, 0x44, 0x41, + 0x54, 0x28, 0xcf, 0x63, 0x60, 0x40, 0x03, 0xff, 0xa1, 0x00, + 0x5d, 0x9c, 0x11, 0x5d, 0x11, 0x8a, 0x24, 0x23, 0x23, 0x23, + 0x86, 0x42, 0x6c, 0xa6, 0x20, 0x2b, 0x66, 0xc4, 0xa7, 0x08, + 0x59, 0x31, 0x23, 0x21, 0x45, 0x30, 0xc0, 0xc4, 0x30, 0x60, + 0x80, 0xfa, 0x6e, 0x24, 0x3e, 0x78, 0x48, 0x0a, 0x70, 0x62, + 0xa2, 0x90, 0x81, 0xd8, 0x44, 0x01, 0x00, 0xe9, 0x5c, 0x2f, + 0xf5, 0xe2, 0x9d, 0x0f, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x49, + 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82 + ]; + } + + foreach (keys(%data)) { + my $f = File::Spec->catfile($main::output_directory, $_); + open(PNG_HANDLE, ">", $f) or + die("cannot create $f: $!\n"); + binmode(PNG_HANDLE); + print(PNG_HANDLE map(chr, @{$data{$_}})); + close(PNG_HANDLE) or die("unable to close PNG handle: $!\n"); + } } - # # write_htaccess_file() # sub write_htaccess_file() { - local *HTACCESS_HANDLE; - my $htaccess_data; + local *HTACCESS_HANDLE; + my $htaccess_data; - open(*HTACCESS_HANDLE, ">", ".htaccess") - or die("ERROR: cannot open .htaccess for writing!\n"); + my $f = File::Spec->catdir($main::output_directory, '.htaccess'); + open(*HTACCESS_HANDLE, ">", $f) or + die("cannot open $f for writing: $!\n"); - $htaccess_data = (<<"END_OF_HTACCESS") + $htaccess_data = (<<"END_OF_HTACCESS") AddEncoding x-gzip .html END_OF_HTACCESS - ; + ; - print(HTACCESS_HANDLE $htaccess_data); - close(*HTACCESS_HANDLE); + print(HTACCESS_HANDLE $htaccess_data); + close(*HTACCESS_HANDLE) or die("unable to close .htaccess: $!\n"); } - # # write_css_file() # @@ -2870,558 +8775,789 @@ END_OF_HTACCESS sub write_css_file() { - local *CSS_HANDLE; - - # Check for a specified external style sheet file - if ($css_filename) - { - # Simply copy that file - system("cp", $css_filename, "gcov.css") - and die("ERROR: cannot copy file $css_filename!\n"); - return; - } - - open(CSS_HANDLE, ">", "gcov.css") - or die ("ERROR: cannot open gcov.css for writing!\n"); - - - # ************************************************************* - - my $css_data = ($_=<<"END_OF_CSS") - /* All views: initial background and text color */ - body - { - color: #000000; - background-color: #FFFFFF; - } - - /* All views: standard link format*/ - a:link - { - color: #284FA8; - text-decoration: underline; - } - - /* All views: standard link - visited format */ - a:visited - { - color: #00CB40; - text-decoration: underline; - } - - /* All views: standard link - activated format */ - a:active - { - color: #FF0040; - text-decoration: underline; - } - - /* All views: main title format */ - td.title - { - text-align: center; - padding-bottom: 10px; - font-family: sans-serif; - font-size: 20pt; - font-style: italic; - font-weight: bold; - } - - /* All views: header item format */ - td.headerItem - { - text-align: right; - padding-right: 6px; - font-family: sans-serif; - font-weight: bold; - vertical-align: top; - white-space: nowrap; - } - - /* All views: header item value format */ - td.headerValue - { - text-align: left; - color: #284FA8; - font-family: sans-serif; - font-weight: bold; - white-space: nowrap; - } - - /* All views: header item coverage table heading */ - td.headerCovTableHead - { - text-align: center; - padding-right: 6px; - padding-left: 6px; - padding-bottom: 0px; - font-family: sans-serif; - font-size: 80%; - white-space: nowrap; - } - - /* All views: header item coverage table entry */ - td.headerCovTableEntry - { - text-align: right; - color: #284FA8; - font-family: sans-serif; - font-weight: bold; - white-space: nowrap; - padding-left: 12px; - padding-right: 4px; - background-color: #DAE7FE; - } - - /* All views: header item coverage table entry for high coverage rate */ - td.headerCovTableEntryHi - { - text-align: right; - color: #000000; - font-family: sans-serif; - font-weight: bold; - white-space: nowrap; - padding-left: 12px; - padding-right: 4px; - background-color: #A7FC9D; - } - - /* All views: header item coverage table entry for medium coverage rate */ - td.headerCovTableEntryMed - { - text-align: right; - color: #000000; - font-family: sans-serif; - font-weight: bold; - white-space: nowrap; - padding-left: 12px; - padding-right: 4px; - background-color: #FFEA20; - } - - /* All views: header item coverage table entry for ow coverage rate */ - td.headerCovTableEntryLo - { - text-align: right; - color: #000000; - font-family: sans-serif; - font-weight: bold; - white-space: nowrap; - padding-left: 12px; - padding-right: 4px; - background-color: #FF0000; - } - - /* All views: header legend value for legend entry */ - td.headerValueLeg - { - text-align: left; - color: #000000; - font-family: sans-serif; - font-size: 80%; - white-space: nowrap; - padding-top: 4px; - } - - /* All views: color of horizontal ruler */ - td.ruler - { - background-color: #6688D4; - } - - /* All views: version string format */ - td.versionInfo - { - text-align: center; - padding-top: 2px; - font-family: sans-serif; - font-style: italic; - } - - /* Directory view/File view (all)/Test case descriptions: - table headline format */ - td.tableHead - { - text-align: center; - color: #FFFFFF; - background-color: #6688D4; - font-family: sans-serif; - font-size: 120%; - font-weight: bold; - white-space: nowrap; - padding-left: 4px; - padding-right: 4px; - } - - span.tableHeadSort - { - padding-right: 4px; - } - - /* Directory view/File view (all): filename entry format */ - td.coverFile - { - text-align: left; - padding-left: 10px; - padding-right: 20px; - color: #284FA8; - background-color: #DAE7FE; - font-family: monospace; - } - - /* Directory view/File view (all): bar-graph entry format*/ - td.coverBar - { - padding-left: 10px; - padding-right: 10px; - background-color: #DAE7FE; - } - - /* Directory view/File view (all): bar-graph outline color */ - td.coverBarOutline - { - background-color: #000000; - } - - /* Directory view/File view (all): percentage entry for files with - high coverage rate */ - td.coverPerHi - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #A7FC9D; - font-weight: bold; - font-family: sans-serif; - } - - /* Directory view/File view (all): line count entry for files with - high coverage rate */ - td.coverNumHi - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #A7FC9D; - white-space: nowrap; - font-family: sans-serif; - } - - /* Directory view/File view (all): percentage entry for files with - medium coverage rate */ - td.coverPerMed - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #FFEA20; - font-weight: bold; - font-family: sans-serif; - } - - /* Directory view/File view (all): line count entry for files with - medium coverage rate */ - td.coverNumMed - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #FFEA20; - white-space: nowrap; - font-family: sans-serif; - } - - /* Directory view/File view (all): percentage entry for files with - low coverage rate */ - td.coverPerLo - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #FF0000; - font-weight: bold; - font-family: sans-serif; - } - - /* Directory view/File view (all): line count entry for files with - low coverage rate */ - td.coverNumLo - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #FF0000; - white-space: nowrap; - font-family: sans-serif; - } - - /* File view (all): "show/hide details" link format */ - a.detail:link - { - color: #B8D0FF; - font-size:80%; - } - - /* File view (all): "show/hide details" link - visited format */ - a.detail:visited - { - color: #B8D0FF; - font-size:80%; - } - - /* File view (all): "show/hide details" link - activated format */ - a.detail:active - { - color: #FFFFFF; - font-size:80%; - } - - /* File view (detail): test name entry */ - td.testName - { - text-align: right; - padding-right: 10px; - background-color: #DAE7FE; - font-family: sans-serif; - } - - /* File view (detail): test percentage entry */ - td.testPer - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #DAE7FE; - font-family: sans-serif; - } - - /* File view (detail): test lines count entry */ - td.testNum - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #DAE7FE; - font-family: sans-serif; - } - - /* Test case descriptions: test name format*/ - dt - { - font-family: sans-serif; - font-weight: bold; - } - - /* Test case descriptions: description table body */ - td.testDescription - { - padding-top: 10px; - padding-left: 30px; - padding-bottom: 10px; - padding-right: 30px; - background-color: #DAE7FE; - } - - /* Source code view: function entry */ - td.coverFn - { - text-align: left; - padding-left: 10px; - padding-right: 20px; - color: #284FA8; - background-color: #DAE7FE; - font-family: monospace; - } - - /* Source code view: function entry zero count*/ - td.coverFnLo - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #FF0000; - font-weight: bold; - font-family: sans-serif; - } - - /* Source code view: function entry nonzero count*/ - td.coverFnHi - { - text-align: right; - padding-left: 10px; - padding-right: 10px; - background-color: #DAE7FE; - font-weight: bold; - font-family: sans-serif; - } - - /* Source code view: source code format */ - pre.source - { - font-family: monospace; - white-space: pre; - margin-top: 2px; - } - - /* Source code view: line number format */ - span.lineNum - { - background-color: #EFE383; - } - - /* Source code view: format for lines which were executed */ - td.lineCov, - span.lineCov - { - background-color: #CAD7FE; - } - - /* Source code view: format for Cov legend */ - span.coverLegendCov - { - padding-left: 10px; - padding-right: 10px; - padding-bottom: 2px; - background-color: #CAD7FE; - } - - /* Source code view: format for lines which were not executed */ - td.lineNoCov, - span.lineNoCov - { - background-color: #FF6230; - } - - /* Source code view: format for NoCov legend */ - span.coverLegendNoCov - { - padding-left: 10px; - padding-right: 10px; - padding-bottom: 2px; - background-color: #FF6230; - } - - /* Source code view (function table): standard link - visited format */ - td.lineNoCov > a:visited, - td.lineCov > a:visited - { - color: black; - text-decoration: underline; - } - - /* Source code view: format for lines which were executed only in a - previous version */ - span.lineDiffCov - { - background-color: #B5F7AF; - } - - /* Source code view: format for branches which were executed - * and taken */ - span.branchCov - { - background-color: #CAD7FE; - } - - /* Source code view: format for branches which were executed - * but not taken */ - span.branchNoCov - { - background-color: #FF6230; - } - - /* Source code view: format for branches which were not executed */ - span.branchNoExec - { - background-color: #FF6230; - } - - /* Source code view: format for the source code heading line */ - pre.sourceHeading - { - white-space: pre; - font-family: monospace; - font-weight: bold; - margin: 0px; - } - - /* All views: header legend value for low rate */ - td.headerValueLegL - { - font-family: sans-serif; - text-align: center; - white-space: nowrap; - padding-left: 4px; - padding-right: 2px; - background-color: #FF0000; - font-size: 80%; - } - - /* All views: header legend value for med rate */ - td.headerValueLegM - { - font-family: sans-serif; - text-align: center; - white-space: nowrap; - padding-left: 2px; - padding-right: 2px; - background-color: #FFEA20; - font-size: 80%; - } - - /* All views: header legend value for hi rate */ - td.headerValueLegH - { - font-family: sans-serif; - text-align: center; - white-space: nowrap; - padding-left: 2px; - padding-right: 4px; - background-color: #A7FC9D; - font-size: 80%; - } - - /* All views except source code view: legend format for low coverage */ - span.coverLegendCovLo - { - padding-left: 10px; - padding-right: 10px; - padding-top: 2px; - background-color: #FF0000; - } - - /* All views except source code view: legend format for med coverage */ - span.coverLegendCovMed - { - padding-left: 10px; - padding-right: 10px; - padding-top: 2px; - background-color: #FFEA20; - } - - /* All views except source code view: legend format for hi coverage */ - span.coverLegendCovHi - { - padding-left: 10px; - padding-right: 10px; - padding-top: 2px; - background-color: #A7FC9D; - } + local *CSS_HANDLE; + + my $f = File::Spec->catdir($main::output_directory, 'gcov.css'); + # Check for a specified external style sheet file + if ($css_filename) { + # Simply copy that file + system("cp", $css_filename, $f) and + die("cannot copy file $css_filename: $!\n"); + return; + } + + open(CSS_HANDLE, ">", $f) or + die("cannot open $f for writing: $!\n"); + + # ************************************************************* + + # ************************************************************* + my $ownerBackground = "#COLOR_17"; # very light pale grey/blue + my $ownerCovHi = "#COLOR_18"; # light green + my $ownerCovMed = "#COLOR_19"; # light yellow + my $ownerCovLo = "#COLOR_20"; # lighter red + # use same background color as file entry unless in hierarchical report + my $directoryBackground = $main::hierarchical ? '#COLOR_18' : '#COLOR_06'; + my $css_data = ($_ = <<"END_OF_CSS") + /* All views: initial background and text color */ + body + { + color: #COLOR_00; + background-color: #COLOR_14; + } + + /* All views: standard link format*/ + a:link + { + color: #COLOR_15; + text-decoration: underline; + } + + /* All views: standard link - visited format */ + a:visited + { + color: #COLOR_01; + text-decoration: underline; + } + + /* All views: standard link - activated format */ + a:active + { + color: #COLOR_11; + text-decoration: underline; + } + + /* All views: main title format */ + td.title + { + text-align: center; + padding-bottom: 10px; + font-family: sans-serif; + font-size: 20pt; + font-style: italic; + font-weight: bold; + } + /* table footnote */ + td.footnote + { + text-align: left; + padding-left: 100px; + padding-right: 10px; + background-color: #COLOR_08; /* light blue table background color */ + /* dark blue table header color + background-color: #COLOR_03; */ + white-space: nowrap; + font-family: sans-serif; + font-style: italic; + font-size:70%; + } + /* "Line coverage date bins" leader */ + td.subTableHeader + { + text-align: center; + padding-bottom: 6px; + font-family: sans-serif; + font-weight: bold; + vertical-align: center; + } + + /* All views: header item format */ + td.headerItem + { + text-align: right; + padding-right: 6px; + font-family: sans-serif; + font-weight: bold; + vertical-align: top; + white-space: nowrap; + } + + /* All views: header item value format */ + td.headerValue + { + text-align: left; + color: #COLOR_15; + font-family: sans-serif; + font-weight: bold; + white-space: nowrap; + } + + /* All views: header item coverage table heading */ + td.headerCovTableHead + { + text-align: center; + padding-right: 6px; + padding-left: 6px; + padding-bottom: 0px; + font-family: sans-serif; + white-space: nowrap; + } + + /* All views: header item coverage table entry */ + td.headerCovTableEntry + { + text-align: right; + color: #COLOR_15; + font-family: sans-serif; + font-weight: bold; + white-space: nowrap; + padding-left: 12px; + padding-right: 4px; + background-color: #COLOR_08; + } + + /* All views: header item coverage table entry for high coverage rate */ + td.headerCovTableEntryHi + { + text-align: right; + color: #COLOR_00; + font-family: sans-serif; + font-weight: bold; + white-space: nowrap; + padding-left: 12px; + padding-right: 4px; + background-color: #COLOR_04; + } + + /* All views: header item coverage table entry for medium coverage rate */ + td.headerCovTableEntryMed + { + text-align: right; + color: #COLOR_00; + font-family: sans-serif; + font-weight: bold; + white-space: nowrap; + padding-left: 12px; + padding-right: 4px; + background-color: #COLOR_13; + } + + /* All views: header item coverage table entry for ow coverage rate */ + td.headerCovTableEntryLo + { + text-align: right; + color: #COLOR_00; + font-family: sans-serif; + font-weight: bold; + white-space: nowrap; + padding-left: 12px; + padding-right: 4px; + background-color: #COLOR_10; + } + + /* All views: header legend value for legend entry */ + td.headerValueLeg + { + text-align: left; + color: #COLOR_00; + font-family: sans-serif; + font-size: 80%; + white-space: nowrap; + padding-top: 4px; + } + + /* All views: color of horizontal ruler */ + td.ruler + { + background-color: #COLOR_03; + } + + /* All views: version string format */ + td.versionInfo + { + text-align: center; + padding-top: 2px; + font-family: sans-serif; + font-style: italic; + } + + /* Directory view/File view (all)/Test case descriptions: + table headline format */ + td.tableHead + { + text-align: center; + color: #COLOR_14; + background-color: #COLOR_03; + font-family: sans-serif; + font-size: 120%; + font-weight: bold; + white-space: nowrap; + padding-left: 4px; + padding-right: 4px; + } + + span.tableHeadSort + { + padding-right: 4px; + } + + /* Directory view/File view (all): filename entry format */ + td.coverFile + { + text-align: left; + padding-left: 10px; + padding-right: 20px; + color: #COLOR_15; + background-color: #COLOR_08; + font-family: monospace; + } + + /* Directory view/File view (all): directory name entry format */ + td.coverDirectory + { + text-align: left; + padding-left: 10px; + padding-right: 20px; + color: #COLOR_15; + background-color: $directoryBackground; + font-family: monospace; + } + + /* Directory view/File view (all): filename entry format */ + td.overallOwner + { + text-align: center; + font-weight: bold; + font-family: sans-serif; + background-color: #COLOR_08; + padding-right: 10px; + padding-left: 10px; + } + + /* Directory view/File view (all): filename entry format */ + td.ownerName + { + text-align: right; + font-style: italic; + font-family: sans-serif; + background-color: $ownerBackground; + padding-right: 10px; + padding-left: 20px; + } + + /* Directory view/File view (all): bar-graph entry format*/ + td.coverBar + { + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_08; + } + + /* Directory view/File view (all): bar-graph entry format*/ + td.owner_coverBar + { + padding-left: 10px; + padding-right: 10px; + background-color: $ownerBackground; + } + + /* Directory view/File view (all): bar-graph outline color */ + td.coverBarOutline + { + background-color: #COLOR_00; + } + + /* Directory view/File view (all): percentage entry for files with + high coverage rate */ + td.coverPerHi + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_04; + font-weight: bold; + font-family: sans-serif; + } + + /* 'owner' entry: slightly lighter color than 'coverPerHi' */ + td.owner_coverPerHi + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: $ownerCovHi; + font-weight: bold; + font-family: sans-serif; + } + + /* Directory view/File view (all): line count entry */ + td.coverNumDflt + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_08; + white-space: nowrap; + font-family: sans-serif; + } + + /* td background color and font for the 'owner' section of the table */ + td.ownerTla + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: $ownerBackground; + white-space: nowrap; + font-family: sans-serif; + font-style: italic; + } + + /* Directory view/File view (all): line count entry for files with + high coverage rate */ + td.coverNumHi + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_04; + white-space: nowrap; + font-family: sans-serif; + } + + td.owner_coverNumHi + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: $ownerCovHi; + white-space: nowrap; + font-family: sans-serif; + } + + /* Directory view/File view (all): percentage entry for files with + medium coverage rate */ + td.coverPerMed + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_13; + font-weight: bold; + font-family: sans-serif; + } + + td.owner_coverPerMed + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: $ownerCovMed; + font-weight: bold; + font-family: sans-serif; + } + + /* Directory view/File view (all): line count entry for files with + medium coverage rate */ + td.coverNumMed + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_13; + white-space: nowrap; + font-family: sans-serif; + } + + td.owner_coverNumMed + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: $ownerCovMed; + white-space: nowrap; + font-family: sans-serif; + } + + /* Directory view/File view (all): percentage entry for files with + low coverage rate */ + td.coverPerLo + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_10; + font-weight: bold; + font-family: sans-serif; + } + + td.owner_coverPerLo + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: $ownerCovLo; + font-weight: bold; + font-family: sans-serif; + } + + /* Directory view/File view (all): line count entry for files with + low coverage rate */ + td.coverNumLo + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_10; + white-space: nowrap; + font-family: sans-serif; + } + + td.owner_coverNumLo + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: $ownerCovLo; + white-space: nowrap; + font-family: sans-serif; + } + + /* File view (all): "show/hide details" link format */ + a.detail:link + { + color: #COLOR_06; + font-size:80%; + } + + /* File view (all): "show/hide details" link - visited format */ + a.detail:visited + { + color: #COLOR_06; + font-size:80%; + } + + /* File view (all): "show/hide details" link - activated format */ + a.detail:active + { + color: #COLOR_14; + font-size:80%; + } + + /* File view (detail): test name entry */ + td.testName + { + text-align: right; + padding-right: 10px; + background-color: #COLOR_08; + font-family: sans-serif; + } + + /* File view (detail): test percentage entry */ + td.testPer + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_08; + font-family: sans-serif; + } + + /* File view (detail): test lines count entry */ + td.testNum + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_08; + font-family: sans-serif; + } + + /* Test case descriptions: test name format*/ + dt + { + font-family: sans-serif; + font-weight: bold; + } + + /* Test case descriptions: description table body */ + td.testDescription + { + padding-top: 10px; + padding-left: 30px; + padding-bottom: 10px; + padding-right: 30px; + background-color: #COLOR_08; + } + + /* Source code view: function entry */ + td.coverFn + { + text-align: left; + padding-left: 10px; + padding-right: 20px; + color: #COLOR_15; + background-color: #COLOR_08; + font-family: monospace; + } + + /* Source code view: function entry zero count*/ + td.coverFnLo + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_10; + font-weight: bold; + font-family: sans-serif; + } + + /* Source code view: function entry nonzero count*/ + td.coverFnHi + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_08; + font-weight: bold; + font-family: sans-serif; + } + + td.coverFnAlias + { + text-align: right; + padding-left: 10px; + padding-right: 20px; + color: #COLOR_15; + /* make this a slightly different color than the leader - otherwise, + otherwise the alias is hard to distinguish in the table */ + background-color: #COLOR_17; /* very light pale grey/blue */ + font-family: monospace; + } + + /* Source code view: function entry zero count*/ + td.coverFnAliasLo + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: $ownerCovLo; /* lighter red */ + font-family: sans-serif; + } + + /* Source code view: function entry nonzero count*/ + td.coverFnAliasHi + { + text-align: right; + padding-left: 10px; + padding-right: 10px; + background-color: #COLOR_08; + font-weight: bold; + font-family: sans-serif; + } + + /* Source code view: source code format */ + pre.source + { + font-family: monospace; + white-space: pre; + margin-top: 2px; + } + + /* elided/removed code */ + span.elidedSource + { + font-family: sans-serif; + /*font-size: 8pt; */ + font-style: italic; + background-color: lightgrey; + } + + /* Source code view: line number format */ + span.lineNum + { + background-color: #COLOR_09; + } + + /* Source code view: line number format when there are deleted + lines in the corresponding location */ + span.lineNumWithDelete + { + foreground-color: #COLOR_09; + background-color: lightgrey; + } + + /* Source code view: format for Cov legend */ + span.coverLegendCov + { + padding-left: 10px; + padding-right: 10px; + padding-bottom: 2px; + background-color: #COLOR_07; + } + + /* Source code view: format for NoCov legend */ + span.coverLegendNoCov + { + padding-left: 10px; + padding-right: 10px; + padding-bottom: 2px; + background-color: #COLOR_12; + } + + /* Source code view: format for the source code heading line */ + pre.sourceHeading + { + white-space: pre; + font-family: monospace; + font-weight: bold; + margin: 0px; + } + + /* All views: header legend value for low rate */ + td.headerValueLegL + { + font-family: sans-serif; + text-align: center; + white-space: nowrap; + padding-left: 4px; + padding-right: 2px; + background-color: #COLOR_10; + font-size: 80%; + } + + /* All views: header legend value for med rate */ + td.headerValueLegM + { + font-family: sans-serif; + text-align: center; + white-space: nowrap; + padding-left: 2px; + padding-right: 2px; + background-color: #COLOR_13; + font-size: 80%; + } + + /* All views: header legend value for hi rate */ + td.headerValueLegH + { + font-family: sans-serif; + text-align: center; + white-space: nowrap; + padding-left: 2px; + padding-right: 4px; + background-color: #COLOR_04; + font-size: 80%; + } + + /* All views except source code view: legend format for low coverage */ + span.coverLegendCovLo + { + padding-left: 10px; + padding-right: 10px; + padding-top: 2px; + background-color: #COLOR_10; + } + + /* All views except source code view: legend format for med coverage */ + span.coverLegendCovMed + { + padding-left: 10px; + padding-right: 10px; + padding-top: 2px; + background-color: #COLOR_13; + } + + /* All views except source code view: legend format for hi coverage */ + span.coverLegendCovHi + { + padding-left: 10px; + padding-right: 10px; + padding-top: 2px; + background-color: #COLOR_04; + } + + a.branchTla:link + { + color: #COLOR_00; + } + + a.branchTla:visited + { + color: #COLOR_00; + } + + a.mcdcTla:link + { + color: #COLOR_00; + } + + a.mcdcTla:visited + { + color: #COLOR_00; + } END_OF_CSS - ; - - # ************************************************************* - - - # Remove leading tab from all lines - $css_data =~ s/^\t//gm; - - print(CSS_HANDLE $css_data); - - close(CSS_HANDLE); + ; + + foreach my $tla (@SummaryInfo::tlaPriorityOrder) { + my $title = $SummaryInfo::tlaToTitle{$tla}; + my $color = $lcovutil::tlaColor{$tla}; + foreach my $elem ("td", "span") { + my $align = $elem eq 'td' ? "right" : "left"; + $css_data .= ($_ = <<"END_OF_SPAN") + + /* Source code view/table entry background: format for lines classified as "$title" */ + $elem.tla$tla + { + text-align: $align; + background-color: $color; + } + $elem.tlaBg$tla { + background-color: $color; + } +END_OF_SPAN + ; + } + # the href anchor tag background + $css_data .= ($_ = <<"END_OF_SPAN"); + a.tlaBg$tla { + background-color: $color; + color: #COLOR_00; + } + + td.headerCovTableHead$tla { + text-align: center; + padding-right: 6px; + padding-left: 6px; + padding-bottom: 0px; + font-family: sans-serif; + white-space: nowrap; + background-color: $color; + } +END_OF_SPAN + } + + # 'span' tags for date bins... + # probably should have one for each bin... + $css_data .= ($_ = <<"END_OF_DATE_SPAN") + + /* Source code view: format for date/owner bin that is not hit */ + span.missBins + { + background-color: #COLOR_10 /* red */ + } +END_OF_DATE_SPAN + ; + + # ************************************************************* + + # Remove leading tab from all lines + $css_data =~ s/^\t//gm; + $css_data =~ s/^ //gm; # and 8 spaces... + + # Apply palette + my $palette = + $dark_mode ? \%lcovutil::dark_palette : \%lcovutil::normal_palette; + + while (my ($key, $color) = each(%$palette)) { + $css_data =~ s/$key/$color/gm; + } + + print(CSS_HANDLE $css_data); + + close(CSS_HANDLE) or die("unable to close CSS handle: $!\n"); } - # # get_bar_graph_code(base_dir, cover_found, cover_hit) # @@ -3431,55 +9567,42 @@ END_OF_CSS sub get_bar_graph_code($$$) { - my ($base_dir, $found, $hit) = @_; - my $rate; - my $alt; - my $width; - my $remainder; - my $png_name; - my $graph_code; - - # Check number of instrumented lines - if ($_[1] == 0) { return ""; } - - $alt = rate($hit, $found, "%"); - $width = rate($hit, $found, undef, 0); - $remainder = 100 - $width; - - # Decide which .png file to use - $png_name = $rate_png[classify_rate($found, $hit, $med_limit, - $hi_limit)]; - - if ($width == 0) - { - # Zero coverage - $graph_code = (<$alt + my ($base_dir, $found, $hit) = @_; + my $graph_code; + + # Check number of instrumented lines + if ($found == 0) { return ""; } + + my $alt = rate($hit, $found, "%"); + my $width = rate($hit, $found, undef, 0); + my $remainder = 100 - $width; + + # Decide which .png file to use + my $png_name = + $rate_png[classify_rate($found, $hit, $ln_med_limit, $ln_hi_limit)]; + + if ($width == 0) { + # Zero coverage + $graph_code = (<$alt END_OF_HTML - ; - } - elsif ($width == 100) - { - # Full coverage - $graph_code = (<$alt + } elsif ($width == 100) { + # Full coverage + $graph_code = (<$alt END_OF_HTML - ; - } - else - { - # Positive coverage - $graph_code = (<$alt$alt + } else { + # Positive coverage + $graph_code = (<$alt$alt END_OF_HTML - ; - } + } - # Remove leading tabs from all lines - $graph_code =~ s/^\t+//gm; - chomp($graph_code); + # Remove leading tabs from all lines + $graph_code =~ s/^\t+//gm; + chomp($graph_code); - return($graph_code); + return ($graph_code); } # @@ -3490,22 +9613,20 @@ END_OF_HTML sub classify_rate($$$$) { - my ($found, $hit, $med, $hi) = @_; - my $rate; - - if ($found == 0) { - return 2; - } - $rate = rate($hit, $found); - if ($rate < $med) { - return 0; - } elsif ($rate < $hi) { - return 1; - } - return 2; + my ($found, $hit, $med, $hi) = @_; + + if ($found == 0) { + return 2; + } + my $rate = rate($hit, $found); + if ($rate < $med) { + return 0; + } elsif ($rate < $hi) { + return 1; + } + return 2; } - # # write_html(filehandle, html_code) # @@ -3515,17 +9636,16 @@ sub classify_rate($$$$) sub write_html(*$) { - local *HTML_HANDLE = $_[0]; - my $html_code = $_[1]; + local *HTML_HANDLE = $_[0]; + my $html_code = $_[1]; - # Remove leading tab from all lines - $html_code =~ s/^\t//gm; + # Remove leading tab from all lines + $html_code =~ s/^\t//gm; - print(HTML_HANDLE $html_code) - or die("ERROR: cannot write HTML data ($!)\n"); + print(HTML_HANDLE $html_code) or + die("cannot write HTML data ($!)\n"); } - # # write_html_prolog(filehandle, base_dir, pagetitle) # @@ -3536,18 +9656,17 @@ sub write_html(*$) sub write_html_prolog(*$$) { - my $basedir = $_[1]; - my $pagetitle = $_[2]; - my $prolog; + my $basedir = $_[1]; + my $pagetitle = $_[2]; + my $prolog; - $prolog = $html_prolog; - $prolog =~ s/\@pagetitle\@/$pagetitle/g; - $prolog =~ s/\@basedir\@/$basedir/g; + $prolog = $html_prolog; + $prolog =~ s/\@pagetitle\@/$pagetitle/g; + $prolog =~ s/\@basedir\@/$basedir/g; - write_html($_[0], $prolog); + write_html($_[0], $prolog); } - # # write_header_prolog(filehandle, base_dir) # @@ -3556,23 +9675,21 @@ sub write_html_prolog(*$$) sub write_header_prolog(*$) { - # ************************************************************* + # ************************************************************* - write_html($_[0], < - $title - + write_html($_[0], < + $title + - - - + + - + write_html($_[0], < +
+ END_OF_HTML - ; - # ************************************************************* + # ************************************************************* } - # # write_header_line(handle, content) # @@ -3581,36 +9698,32 @@ END_OF_HTML sub write_header_line(*@) { - my ($handle, @content) = @_; - my $entry; - - write_html($handle, " \n"); - foreach $entry (@content) { - my ($width, $class, $text, $colspan) = @{$entry}; - - if (defined($width)) { - $width = " width=\"$width\""; - } else { - $width = ""; - } - if (defined($class)) { - $class = " class=\"$class\""; - } else { - $class = ""; - } - if (defined($colspan)) { - $colspan = " colspan=\"$colspan\""; - } else { - $colspan = ""; - } - $text = "" if (!defined($text)); - write_html($handle, - " $text\n"); - } - write_html($handle, " \n"); + my ($handle, @content) = @_; + + write_html($handle, " \n"); + # label order has to match data that is passed + my @labels = qw/width class colspan title/; + foreach my $entry (@content) { + my @d = @$entry; + my $text = splice(@d, 2, 1) if (scalar(@d) > 1); + # entry may not contain some data - e.g., does not have colspan or title + die("unexpected entry format") unless scalar(@labels) >= scalar(@d); + my $str = " whatever' + write_html($handle, $str); + } + write_html($handle, " \n"); # then end the row } - # # write_header_epilog(filehandle, base_dir) # @@ -3619,198 +9732,651 @@ sub write_header_line(*@) sub write_header_epilog(*$) { - # ************************************************************* + # ************************************************************* - write_html($_[0], < -
-
+ + - - + + END_OF_HTML - ; - - # ************************************************************* + # ************************************************************* } - # -# write_file_table_prolog(handle, file_heading, ([heading, num_cols], ...)) +# write_file_table_prolog(handle, file_heading, binHeading, primary_key, ([heading, num_cols], ...)) # # Write heading for file table. # -sub write_file_table_prolog(*$@) +sub write_file_table_prolog(*$$$@) { - my ($handle, $file_heading, @columns) = @_; - my $num_columns = 0; - my $file_width; - my $col; - my $width; - - $width = 20 if (scalar(@columns) == 1); - $width = 10 if (scalar(@columns) == 2); - $width = 8 if (scalar(@columns) > 2); - - foreach $col (@columns) { - my ($heading, $cols) = @{$col}; - - $num_columns += $cols; - } - $file_width = 100 - $num_columns * $width; - - # Table definition - write_html($handle, < - + my ($handle, $file_heading, $bin_heading, $primary_key, @columns) = @_; + my $num_columns = 0; + my $file_width = 40; + my $col; + my $width; + + foreach $col (@columns) { + my ($heading, $cols, $titles) = @{$col}; + + $num_columns += $cols; + } + $num_columns++ if (defined($bin_heading)); + $width = int((100 - $file_width) / $num_columns); + + # Table definition + write_html($handle, < +
+ + + +END_OF_HTML + if (defined($bin_heading)) { + # owner or date column + write_html($handle, < +END_OF_HTML + } + # Empty first row + foreach $col (@columns) { + my ($heading, $cols) = @{$col}; + + while ($cols-- > 0) { + write_html($handle, < +END_OF_HTML + } + } + # Next row + if ($primary_key eq "name") { + my $spanType = defined($bin_heading) ? "colspan" : "rowspan"; + ++$num_columns; + write_html($handle, < - - + + END_OF_HTML - # Empty first row - foreach $col (@columns) { - my ($heading, $cols) = @{$col}; + } else { + my $t = ucfirst($primary_key); + # a bit of a hack...just substitute the primary key and related + # strings into the 'file heading' link - so we display the + # 'sort' widget + if ($primary_key eq 'owner' && + $file_heading =~ /^([^ ]+) - while ($cols-- > 0) { - write_html($handle, < + + + +END_OF_HTML + } + # Heading row + foreach $col (@columns) { + my ($heading, $cols, $titles) = @{$col}; + my $colspan = ""; + my $rowspan = ""; + $colspan = " colspan=$cols" if ($cols > 1); + $rowspan = " rowspan=2" if (!defined($titles)); + write_html($handle, <$heading END_OF_HTML - } - } - # Next row - write_html($handle, < - - - + } + write_html($handle, < + +END_OF_HTML + + # title row + if (defined($bin_heading)) { + # Next row + my $str = ucfirst($bin_heading); + write_html($handle, <Name + END_OF_HTML - # Heading row - foreach $col (@columns) { - my ($heading, $cols) = @{$col}; - my $colspan = ""; - - $colspan = " colspan=$cols" if ($cols > 1); - write_html($handle, <$heading + } + + foreach $col (@columns) { + my ($heading, $cols, $titles) = @{$col}; + my $colspan = ""; + my $rowspan = ""; + + if (defined($titles)) { + foreach my $t (@$titles) { + my $span = ""; + my $popup = ''; + if ("ARRAY" eq ref($t)) { + my ($tla, $num, $help) = @$t; + $span = " colspan=" . $num if $num > 1; + $popup = " title=\"$help\"" + if (defined $help); + $t = $tla; + } + write_html($handle, < $t END_OF_HTML - } - write_html($handle, < + } + } + } + write_html($handle, < END_OF_HTML + return $num_columns; } +sub escape_id($) +{ + my ($name) = @_; + + # Name/ID attribute requirements according to HTML 4.01 Transitional + $name =~ s/[^A-Za-z0-9-_:.]/_/g; + + return $name; +} -# write_file_table_entry(handle, base_dir, filename, page_link, -# ([ found, hit, med_limit, hi_limit, graph ], ..) +# write_file_table_entry(handle, base_dir, +# [ name, [filename, fileDetails, fileHref, cbdata], +# activeTlaCols, +# rowspan, primary_key, is_secondary, fileview, +# page_type, page_link, dirSummary, showDetailCol, +# asterisk ], +# ([ found, hit, med_limit, hi_limit, graph ], ..) # # Write an entry of the file table. +# $fileview: 0 == 'table is listing directories', 1 == 'list files' # -sub write_file_table_entry(*$$$@) +sub write_file_table_entry(*$$@) { - my ($handle, $base_dir, $filename, $page_link, @entries) = @_; - my $file_code; - my $entry; - my $esc_filename = escape_html($filename); - - # Add link to source if provided - if (defined($page_link) && $page_link ne "") { - $file_code = "$esc_filename"; - } else { - $file_code = $esc_filename; - } + my ($handle, $base_dir, $data, @entries) = @_; + my ($name, $callbackData, $activeTlaCols, + $rowspan, $primary_key, $is_secondary, + $fileview, $page_type, $page_link, + $dirSummary, $showBinDetailColumn, $asterisk) = @$data; + my ($filename, $fileSummary, $fileDetails, $file_link, $cbData) = + @$callbackData; + die("unexpected callback arg types: write_file_table_entry(" . + ref($fileSummary) . ', ' . + ref($fileDetails) . + ", $file_link, " . (defined($cbData) ? $cbData : 'undef') . ')') + unless (!defined($fileDetails) || 'SourceFile' eq ref($fileDetails)) && + 'SummaryInfo' eq ref($fileSummary); + my $esc_name = escape_html($name); + if ($main::flat && + !$is_secondary) { + my $relDir = $fileSummary->relativeDir(); + die("relative directory not set") unless $relDir; + $esc_name = escape_html(File::Spec->catfile($relDir, $name)) + if '.' ne $relDir; + } + my $obj_name = $esc_name + . + ($fileSummary->type() eq 'directory' && + $primary_key eq 'name' ? escape_html($lcovutil::dirseparator) : + ''); + my $namecode = $obj_name; + my $owner; + my $full_path = + 'directory' eq $fileSummary->type() ? $fileSummary->fullDir() : + $fileSummary->name(); + $full_path = escape_html($full_path); + + # Add link to source if provided + my $source_link; + my $suppressFileHref = + $main::no_sourceview && $fileSummary->type() eq 'file'; + if ($is_secondary == 2) { + if (defined($page_link) && + $page_link ne "") { + # could use $fileSummary->name() in the popup + $source_link = + $suppressFileHref ? $obj_name : + ("type() . " $full_path\">$obj_name"); + } elsif (defined($file_link) && + $file_link ne "") { + my $target = $file_link; + if ($main::flat) { + $target = + File::Spec->catfile($fileSummary->relativeDir(), $target); + } + if ($fileSummary->type() eq 'directory') { + $target = File::Basename::dirname($target); + $target = File::Basename::basename($target) + if ($main::hierarchical); + } + $target = escape_html($target); + $source_link = + $suppressFileHref ? $target : + ( + "" + . $target . ""); + } else { + $source_link = $fileSummary->name(); + $source_link = File::Basename::basename($source_link) + if ($main::hierarchical); + $source_link = escape_html($source_link); + } + } + my $anchor = 'NAME'; + if (!$is_secondary && + defined($page_link) && + $page_link ne "") { + # could use $fileSummary->name() in the popup + $namecode = + $suppressFileHref ? $obj_name : + ("type() . " $full_path\">$obj_name"); + $owner = ""; + } elsif ($is_secondary && + $primary_key ne 'name' && + defined($file_link) && + $file_link ne "") { + if ($main::flat) { + $file_link = + File::Spec->catfile($fileSummary->relativeDir(), $file_link); + } elsif ($main::hierarchical && + 'directory' eq $fileSummary->type()) { + die("no parent for " . $fileSummary->name()) + unless defined($fileSummary->parent()); + my $pname = $fileSummary->parent()->name(); + die("unexpected parent '$pname' in $file_link") + unless (length($pname) < length($file_link) && + $pname eq substr($file_link, 0, length($pname))); + $file_link = substr($file_link, length($pname) + 1); + + #lcovutil::info("$filename: path is $file_link, base is '$base_dir' full path $full_path\n"); + } + $namecode = + $suppressFileHref ? $obj_name : + ( + "" + . $obj_name . ""); + } elsif ($is_secondary && + $primary_key ne 'name' && + $name eq $filename) { + # get here when we suppressed the sourceview - so the file link + # is not defined + $namecode = $obj_name; + } elsif (defined($primary_key)) { + $namecode = $obj_name; + if (!$suppressFileHref) { + # we want the the HREF anchor on the column 1 entry - + # the column 0 entry may span many rows - so navigation to that + # entry (e.g., to find all the files in the "(7..30] days" bin) + # may be rendered such that the first element in the bin is not + # visible (you have to scroll up to see it). + # the fix is to put the anchor in the next column + if ($primary_key eq 'owner') { + $anchor = 'NAME'; + } elsif ($primary_key eq 'date') { + die("unexpected bin") + unless (!$is_secondary && + exists($SummaryInfo::ageHeaderToBin{$name})) || + (defined($cbData) && + $cbData <= $#SummaryInfo::ageGroupHeader); + my $bin = + $is_secondary ? $cbData : + $SummaryInfo::ageHeaderToBin{$name}; + $anchor = "NAME"; + } + } + } + + my $tableHref; + if (defined($file_link) && + !$main::no_sourceview && + $main::show_tla && + defined($fileDetails)) { + + $tableHref = "href=\"$file_link#L__LINE__\""; + # href to anchor in frame doesn't seem to work in either firefox + # or chrome. However, this seems like the right syntax. + $tableHref = undef # $tableHref . ' target="source"' + if $main::frames; + } + # First column: name + my $nameClass = + $fileSummary->type() eq 'directory' ? 'coverDirectory' : 'coverFile'; + my $prefix; + if ($is_secondary) { + $prefix = "owner_"; + } else { + $prefix = ""; + $nameClass = 'ownerName' if $primary_key ne 'name'; + } + if ($is_secondary && + ( $primary_key eq 'name' || + ($primary_key ne 'name' && + $fileview == 0)) + ) { + if ($fileview == 0 && + $primary_key ne 'name' && + $main::flat) { + # link to (flat) file detail + my $relDir = $fileSummary->relativeDir(); + die("relative directory not set") unless $relDir; + $namecode = + $suppressFileHref ? $obj_name : + ("catfile($relDir, "$filename.gcov.$html_ext") . + "\" title=\"Click to go to $esc_name source detail\">$obj_name" + ); + } else { + # link to the entry in date/owner 'summary' table (keyed other way up) + $namecode = "$obj_name"; + } + } + write_html($handle, " \n"); + my $elide_note = ''; + if ($is_secondary == 2) { + die("unexpected cbData for $page_type, $primary_key, $name, " . + (defined($cbData) ? $cbData : 'undef')) + unless ($page_type eq 'owner' || + ($page_type eq 'date' && + ((defined($cbData) && + $cbData <= $#SummaryInfo::ageGroupHeader) || + (!defined($cbData) && + exists($SummaryInfo::ageHeaderToBin{$name}))))); + my $entry; + if ($primary_key eq 'name') { + die("source_link undefined: " + . + ( + defined($fileSummary) ? + ( + $fileSummary->type() . " $name:" . $fileSummary->name()) + : + "")) unless defined($source_link); + $entry = $source_link; + } else { + # select the appropriate bin... + my $id = + defined($cbData) ? + ($page_type eq 'owner' ? $cbData : + $SummaryInfo::ageGroupHeader[$cbData]) : + $name; + # need an anchor here - this is the destination of an HREF from + # the header 'owner' or 'date' summary table + $entry = '$id"; + } + my $class = + $primary_key eq 'name' ? + ($fileSummary->type() eq 'directory' ? 'coverDirectory' : + 'coverFile') : + 'ownerName'; + write_html($handle, " \n"); + $elide_note = '∗∗'; + } + my $span = (1 == $rowspan) ? "" : " rowspan=$rowspan"; + write_html($handle, + " \n"); + + # no 'owner' column if the entire directory is not part of the project + # (i.e., no files in this directory are in the repo) + if ((defined($showBinDetailColumn) && $dirSummary->hasOwnerInfo()) || + (defined($primary_key) && + $primary_key ne 'name' && + !$is_secondary) + ) { + $anchor =~ s/NAME/Total/; + $anchor .= "$asterisk" + if defined($asterisk); + write_html($handle, <$anchor +END_OF_HTML + } + foreach my $entry (@entries) { + my ($found, $hit, $med, $hi, $graph, $summary, $covType) = @{$entry}; + my $bar_graph; + my $class; + my $rate; + + # Generate bar graph if requested + if ($graph) { + if (!$is_secondary) { + $class = $prefix . 'coverBar'; + $bar_graph = get_bar_graph_code($base_dir, $found, $hit); + } else { + # graph is distracting for the second-level elements - skip them + $bar_graph = ""; + $class = 'coverFile'; + } + write_html($handle, < + $bar_graph + +END_OF_HTML + } + # Get rate color and text + if ($found == 0) { + $rate = "-"; + $class = "Hi"; + } else { + $rate = rate($hit, $found, " %"); + $class = $rate_name[classify_rate($found, $hit, $med, $hi)]; + } + # Show negative number of items without coverage + $hit -= $found # negative number + if ($main::opt_missed); - # First column: filename - write_html($handle, < - + write_html($handle, <$rate END_OF_HTML - # Columns as defined - foreach $entry (@entries) { - my ($found, $hit, $med, $hi, $graph) = @{$entry}; - my $bar_graph; - my $class; - my $rate; - - # Generate bar graph if requested - if ($graph) { - $bar_graph = get_bar_graph_code($base_dir, $found, - $hit); - write_html($handle, < - $bar_graph - + if ($summary) { + my @keys = ("found"); + if ($main::show_hitTotalCol) { + push(@keys, $opt_missed ? "missed" : "hit"); + } + if ($main::show_tla) { + push(@keys, @{$activeTlaCols->{$covType}}); + } + foreach my $key (@keys) { + my $count = $summary->get($key); + #print("$name: $key " . $summary->get($key)); + $class = $page_type ne "owner" ? "coverNumDflt" : "ownerTla"; + my $v = ""; + if (defined($count) && 0 != $count) { + $count = -$count if $key eq 'missed'; + $v = $count; + # want to colorize the UNC, LBC, UIC rows if not zero + $class = "tla$key" + if (!$main::use_legacyLabels && + grep(/^$key$/, ("UNC", "LBC", "UIC"))); + + my $column = ''; + # look in file details to build link to first line + # '$tableHref is undef unless the data we need is available + if (defined($tableHref) && + !grep(/^$key$/, ('DUB', 'DCB')) && + grep(/^$key$/, @SummaryInfo::tlaPriorityOrder)) { + my $line; + my $label = + $main::use_legacyLabels ? + $SummaryInfo::tlaToLegacySrcLabel{$key} : + $key; + # need to keep $fileDetails object so we know the + # 'first location in ...' for hyperlink from the + # file table to the line in the file + my $title = "\"Go to first $label " . + SummaryInfo::type2str($covType) . ' '; + if ('fileOrDir' eq $page_type) { + # go to first line of the indicated type in the file + $line = $fileDetails->nextTlaGroup($key) + if $covType == SummaryInfo::LINE_DATA; + $line = $fileDetails->nextBranchTlaGroup($key) + if $covType == SummaryInfo::BRANCH_DATA; + $line = $fileDetails->nextMcdcTlaGroup($key) + if $covType == SummaryInfo::MCDC_DATA; + } elsif ('owner' eq $page_type) { + my $owner = $summary->owner(); + $column = $owner; + $title .= "in '$owner' bin "; + $line = $fileDetails->nextInOwnerBin($owner, $key) + if $covType == SummaryInfo::LINE_DATA; + $line = + $fileDetails->nextBranchInOwnerBin($owner, $key) + if $covType == SummaryInfo::BRANCH_DATA; + $line = + $fileDetails->nextMcdcInOwnerBin($owner, $key) + if $covType == SummaryInfo::MCDC_DATA; + } elsif ('date' eq $page_type) { + my $agebin = $summary->bin(); + $column = $agebin; + $title .= + "in '$SummaryInfo::ageGroupHeader[$agebin]' bin "; + $line = $fileDetails->nextInDateBin($agebin, $key) + if $covType == SummaryInfo::LINE_DATA; + $line = + $fileDetails->nextBranchInDateBin($agebin, $key) + if $covType == SummaryInfo::BRANCH_DATA; + $line = + $fileDetails->nextMcdcInDateBin($agebin, $key) + if $covType == SummaryInfo::MCDC_DATA; + } else { + die("unexpected page detail type '$page_type'"); + } + $title .= "in $filename\""; + if ($class eq "tla$key") { + # add another CSS style to set the TLA-specific background + # color + $class .= " tlaBg$key"; + } + if (defined($line)) { + my $href = $tableHref; + $href =~ s/__LINE__/$line/; + $v = "$v END_OF_HTML - } - # Get rate color and text - if ($found == 0) { - $rate = "-"; - $class = "Hi"; - } else { - $rate = rate($hit, $found, " %"); - $class = $rate_name[classify_rate($found, $hit, - $med, $hi)]; - } - if ($opt_missed) { - # Show negative number of items without coverage - $hit = -($found - $hit); - } - write_html($handle, <$rate - + } # foreach key + + } else { + write_html($handle, <$hit / $found END_OF_HTML - } - # End of row - write_html($handle, < + } + } + # End of row + write_html($handle, < END_OF_HTML } - # -# write_file_table_detail_entry(filehandle, test_name, ([found, hit], ...)) +# write_file_table_detail_entry(filehandle, base_dir, test_name, bin_type, activeTlaCols, ([found, hit], ...)) # # Write entry for detail section in file table. # -sub write_file_table_detail_entry(*$@) +sub write_file_table_detail_entry(*$$$$@) { - my ($handle, $test, @entries) = @_; - my $entry; - - if ($test eq "") { - $test = "<unnamed>"; - } elsif ($test =~ /^(.*),diff$/) { - $test = $1." (converted)"; - } - # Testname - write_html($handle, < - -END_OF_HTML - # Test data - foreach $entry (@entries) { - my ($found, $hit) = @{$entry}; - my $rate = rate($hit, $found, " %"); - - write_html($handle, <$rate - + my ($handle, $base_dir, $test, $showBinDetail, $activeTlaCols, @entries) = + @_; + + if ($test eq "") { + $test = "<unnamed>"; + } elsif ($test =~ /^(.*),diff$/) { + $test = $1 . " (converted)"; + } + # Testname + write_html($handle, < + END_OF_HTML - } - - write_html($handle, < - -END_OF_HTML - - # ************************************************************* + # Test data + foreach my $entry (@entries) { + my ($found, $hit, $covtype, $callback) = @{$entry}; + my $rate = rate($hit, $found, " %"); + if (SummaryInfo::LINE_DATA == $covtype && + defined($showBinDetail)) { + write_html($handle, " \n"); + } + write_html($handle, " \n"); + write_html($handle, " \n"); + if ($main::show_hitTotalCol) { + write_html($handle, + " \n"); + } + if ($main::show_tla) { + foreach my $tla (@{$activeTlaCols->{$covtype}}) { + my $count = $callback->count($tla); + $count = "" if 0 == $count; + write_html($handle, " \n"); + } + } + } + write_html($handle, " \n"); } - # # write_file_table_epilog(filehandle) # @@ -3819,20 +10385,15 @@ END_OF_HTML sub write_file_table_epilog(*) { - # ************************************************************* - - write_html($_[0], < - -
+ # ************************************************************* + write_html($_[0], < + +
END_OF_HTML - ; - - # ************************************************************* } - # # write_test_table_prolog(filehandle, table_heading) # @@ -3841,30 +10402,24 @@ END_OF_HTML sub write_test_table_prolog(*$) { - # ************************************************************* - - write_html($_[0], < -


$file_heading
$t$file_heading
$file_heading
$str
$entry$namecode$elide_note$file_code$hit / $found$test$hit / $found$test$rate$found$hit$count
+ # ************************************************************* - - - + write_html($_[0], < +

- - - + + + - - + - -
$_[1]
$_[1]
-
+
+
END_OF_HTML - ; - # ************************************************************* + # ************************************************************* } - # # write_test_table_entry(filehandle, test_name, test_description) # @@ -3873,18 +10428,17 @@ END_OF_HTML sub write_test_table_entry(*$$) { - # ************************************************************* + # ************************************************************* + my $name = escape_id($_[1]); - write_html($_[0], <$_[1]  + write_html($_[0], <$_[1] 
$_[2]

END_OF_HTML - ; - # ************************************************************* + # ************************************************************* } - # # write_test_table_epilog(filehandle) # @@ -3893,251 +10447,494 @@ END_OF_HTML sub write_test_table_epilog(*) { - # ************************************************************* + # ************************************************************* - write_html($_[0], < -
- -
+ write_html($_[0], < + + + + +
END_OF_HTML - ; - # ************************************************************* + # ************************************************************* } - sub fmt_centered($$) { - my ($width, $text) = @_; - my $w0 = length($text); - my $w1 = $width > $w0 ? int(($width - $w0) / 2) : 0; - my $w2 = $width > $w0 ? $width - $w0 - $w1 : 0; + my ($width, $text) = @_; + my $w0 = length($text); + my $w1 = $width > $w0 ? int(($width - $w0) / 2) : 0; + my $w2 = $width > $w0 ? $width - $w0 - $w1 : 0; - return (" "x$w1).$text.(" "x$w2); + return (" " x $w1) . $text . (" " x $w2); } - # # write_source_prolog(filehandle) # # Write start of source code table. # -sub write_source_prolog(*) +sub write_source_prolog(*$$$) { - my $lineno_heading = " "; - my $branch_heading = ""; - my $line_heading = fmt_centered($line_field_width, "Line data"); - my $source_heading = " Source code"; - - if ($br_coverage) { - $branch_heading = fmt_centered($br_field_width, "Branch data"). - " "; - } - # ************************************************************* - - write_html($_[0], < - -
- - - -
${lineno_heading}${branch_heading}${line_heading} ${source_heading}
+ my ($handle, $fileHasProjectData, $showBranches, $showMcdc) = @_; + my $lineno_heading = " " x 9; + my $branch_heading = ""; + my $mcdc_heading = ""; + my $tlaWidth = 4; + my $age_heading = ""; + my $owner_heading = ""; + my $tla_heading = ""; + + if (defined($main::show_dateBins) && + $fileHasProjectData) { + $age_heading = fmt_centered(5, "Age"); + $owner_heading = fmt_centered(20, "Owner"); + } + if (defined($main::show_tla)) { + $tla_heading = fmt_centered($tlaWidth, "TLA"); + } + my $line_heading = fmt_centered($line_field_width, "Line data"); + my $source_heading = " Source code"; + + if ($showBranches) { + $branch_heading = fmt_centered($br_field_width, "Branch data") . " "; + } + if ($showMcdc) { + $mcdc_heading = fmt_centered($mcdc_field_width, "MC/DC data") . " "; + } + + # ************************************************************* + + write_html($handle, < + +
+ + + +
${age_heading} ${owner_heading} ${lineno_heading}${branch_heading}${mcdc_heading}${tla_heading}${line_heading} ${source_heading}
 END_OF_HTML
-	;
-
-	# *************************************************************
-}
-
-sub cmp_blocks($$)
-{
-	my ($a, $b) = @_;
-	my ($fa, $fb) = ($a->[0], $b->[0]);
 
-	return $fa->[0] <=> $fb->[0] if ($fa->[0] != $fb->[0]);
-	return $fa->[1] <=> $fb->[1];
+    # *************************************************************
 }
 
 #
-# get_branch_blocks(brdata)
-#
-# Group branches that belong to the same basic block.
+# get_block_len(block)
 #
-# Returns: [block1, block2, ...]
-# block:   [branch1, branch2, ...]
-# branch:  [block_num, branch_num, taken_count, text_length, open, close]
+# Calculate total text length of all branches in a block of branches.
 #
 
-sub get_branch_blocks($)
+sub get_block_len($)
 {
-	my ($brdata) = @_;
-	my $last_block_num;
-	my $block = [];
-	my @blocks;
-
-	return () if (!defined($brdata));
-
-	# Group branches
-	foreach my $entry (split(/:/, $brdata)) {
-		my ($block_num, $branch, $taken) = split(/,/, $entry);
-		my $br;
-
-		if (defined($last_block_num) && $block_num != $last_block_num) {
-			push(@blocks, $block);
-			$block = [];
-		}
-		$br = [$block_num, $branch, $taken, 3, 0, 0];
-		push(@{$block}, $br);
-		$last_block_num = $block_num;
-	}
-	push(@blocks, $block) if (scalar(@{$block}) > 0);
-
-	# Add braces to first and last branch in group
-	foreach $block (@blocks) {
-		$block->[0]->[$BR_OPEN] = 1;
-		$block->[0]->[$BR_LEN]++;
-		$block->[scalar(@{$block}) - 1]->[$BR_CLOSE] = 1;
-		$block->[scalar(@{$block}) - 1]->[$BR_LEN]++;
-	}
+    my ($block) = @_;
+    my $len = 0;
 
-	return sort(cmp_blocks @blocks);
+    foreach my $branch (@{$block}) {
+        $len += $branch->[$BR_LEN];
+    }
+    return $len;
 }
 
 #
-# get_block_len(block)
+# get_block_list(brdata | mcdc_data)
 #
-# Calculate total text length of all branches in a block of branches.
+# common method for branch and MC/DC records
+# Group elements that belong to the same basic record
+#
+# Returns: [block1, block2, ...] <- in order of increasing block number/group size
+# block:   [branch1, branch2, ...] <- in order of increasing branch/expression index
+# branch:  [block_num, branch_num, taken_count, text_length, open, close]
 #
 
-sub get_block_len($)
+sub get_block_list($)
 {
-	my ($block) = @_;
-	my $len = 0;
-	my $branch;
-
-	foreach $branch (@{$block}) {
-		$len += $branch->[$BR_LEN];
-	}
-
-	return $len;
+    my $data = shift;
+    return () unless defined($data);
+
+    my @blocks;
+
+    if ('BranchEntry' eq ref($data)) {
+
+        my $block = [];
+        my $last_block_num;
+
+        # Group branches
+        foreach my $block_num (sort $data->blocks()) {
+            my $blockData = $data->getBlock($block_num);
+            my $branch    = 0;
+            foreach my $br (@$blockData) {
+
+                if (defined($last_block_num) && $block_num != $last_block_num) {
+                    push(@blocks, $block);
+                    $block = [];
+                }
+                my $br = [$block_num, $branch, $br, 3, 0, 0];
+                push(@{$block}, $br);
+                $last_block_num = $block_num;
+                ++$branch;
+            }
+        }
+        push(@blocks, $block) if (scalar(@{$block}) > 0);
+    } else {
+        die('unexpected ' . ref($data)) unless 'MCDC_Block' eq ref($data);
+        # Group MC/DC groups
+        foreach my $groupSize (sort keys %{$data->groups()}) {
+            my $exprs = $data->groups()->{$groupSize};
+            my $block = [];
+            push(@blocks, $block);
+            foreach my $expr (@$exprs) {
+                # display 'true' then 'false' sense - to match what we do for branches
+                # and to match behaviour of ?: operator
+                foreach my $sense (1, 0) {
+                    my $cond =
+                        [$groupSize, $expr->index(), $sense, $expr, 3, 0, 0];
+                    push(@{$block}, $cond);
+                }
+            }
+        }
+    }
+    # Add braces to first and last branch in group
+    foreach my $block (@blocks) {
+        $block->[0]->[$BR_OPEN] = 1;
+        $block->[0]->[$BR_LEN]++;
+        $block->[$#$block]->[$BR_CLOSE] = 1;
+        $block->[$#$block]->[$BR_LEN]++;
+    }
+    return @blocks;
 }
 
+# distribute blocks into lines - trying to keep groups on a single line,
+# if possible
+sub distribute_blocks($$)
+{
+    my ($blocks, $field_width) = @_;
+
+    my $line_len = 0;
+    my $line     = [];    # [branch2|" ", branch|" ", ...]
+    my @lines;            # [line1, line2, ...]
+    my @result;
+
+    # Distribute blocks to lines
+    foreach my $block (@$blocks) {
+        my $block_len = get_block_len($block);
+
+        # Does this block fit into the current line?
+        if ($line_len + $block_len <= $field_width) {
+            # Add it
+            $line_len += $block_len;
+            push(@{$line}, @{$block});
+            next;
+        } elsif ($block_len <= $field_width) {
+            # It would fit if the line was empty - add it to new line
+            push(@lines, $line);
+            $line_len = $block_len;
+            $line     = [@{$block}];
+            next;
+        }
+        # Split the block into several lines
+        foreach my $branch (@{$block}) {
+            if ($line_len + $branch->[$BR_LEN] >= $field_width) {
+                # Start a new line
+                if (($line_len + 1 <= $field_width) &&
+                    scalar(@{$line}) > 0 &&
+                    !$line->[scalar(@$line) - 1]->[$BR_CLOSE]) {
+                    # Try to align branch symbols to be in
+                    # one # row
+                    push(@{$line}, " ");
+                }
+                push(@lines, $line);
+                $line_len = 0;
+                $line     = [];
+            }
+            push(@{$line}, $branch);
+            $line_len += $branch->[$BR_LEN];
+        }
+    }
+    push(@lines, $line);
+
+    return @lines;
+}
 
 #
-# get_branch_html(brdata)
+# get_branch_html(brdata, printCallbackStruct)
 #
 # Return a list of HTML lines which represent the specified branch coverage
 # data in source code view.
 #
 
-sub get_branch_html($)
-{
-	my ($brdata) = @_;
-	my @blocks = get_branch_blocks($brdata);
-	my $block;
-	my $branch;
-	my $line_len = 0;
-	my $line = [];	# [branch2|" ", branch|" ", ...]
-	my @lines;	# [line1, line2, ...]
-	my @result;
-
-	# Distribute blocks to lines
-	foreach $block (@blocks) {
-		my $block_len = get_block_len($block);
-
-		# Does this block fit into the current line?
-		if ($line_len + $block_len <= $br_field_width) {
-			# Add it
-			$line_len += $block_len;
-			push(@{$line}, @{$block});
-			next;
-		} elsif ($block_len <= $br_field_width) {
-			# It would fit if the line was empty - add it to new
-			# line
-			push(@lines, $line);
-			$line_len = $block_len;
-			$line = [ @{$block} ];
-			next;
-		}
-		# Split the block into several lines
-		foreach $branch (@{$block}) {
-			if ($line_len + $branch->[$BR_LEN] >= $br_field_width) {
-				# Start a new line
-				if (($line_len + 1 <= $br_field_width) &&
-				    scalar(@{$line}) > 0 &&
-				    !$line->[scalar(@$line) - 1]->[$BR_CLOSE]) {
-					# Try to align branch symbols to be in
-					# one # row
-					push(@{$line}, " ");
-				}
-				push(@lines, $line);
-				$line_len = 0;
-				$line = [];
-			}
-			push(@{$line}, $branch);
-			$line_len += $branch->[$BR_LEN];
-		}
-	}
-	push(@lines, $line);
-
-	# Convert to HTML
-	foreach $line (@lines) {
-		my $current = "";
-		my $current_len = 0;
-
-		foreach $branch (@$line) {
-			# Skip alignment space
-			if ($branch eq " ") {
-				$current .= " ";
-				$current_len++;
-				next;
-			}
-
-			my ($block_num, $br_num, $taken, $len, $open, $close) =
-			   @{$branch};
-			my $class;
-			my $title;
-			my $text;
-
-			if ($taken eq '-') {
-				$class	= "branchNoExec";
-				$text	= " # ";
-				$title	= "Branch $br_num was not executed";
-			} elsif ($taken == 0) {
-				$class	= "branchNoCov";
-				$text	= " - ";
-				$title	= "Branch $br_num was not taken";
-			} else {
-				$class	= "branchCov";
-				$text	= " + ";
-				$title	= "Branch $br_num was taken $taken ".
-					  "time";
-				$title .= "s" if ($taken > 1);
-			}
-			$current .= "[" if ($open);
-			$current .= "";
-			$current .= $text."";
-			$current .= "]" if ($close);
-			$current_len += $len;
-		}
-
-		# Right-align result text
-		if ($current_len < $br_field_width) {
-			$current = (" "x($br_field_width - $current_len)).
-				   $current;
-		}
-		push(@result, $current);
-	}
-
-	return @result;
+sub get_branch_html($$)
+{
+    my ($brdata, $cbdata) = @_;
+    my $differentialBranch;
+    my $fileDetail = $cbdata->sourceDetail();
+    if (defined($main::show_tla)) {
+        my $lineNo   = $cbdata->lineNo();
+        my $lineData = $cbdata->lineData()->line($lineNo);
+        $differentialBranch = $lineData->differential_branch()
+            if defined($lineData);
+    }
+    # build the 'blocks' array from differential data if we have it..
+    my @blocks = get_block_list(
+                  defined($differentialBranch) ? $differentialBranch : $brdata);
+
+    my @lines = distribute_blocks(\@blocks, $br_field_width);
+
+    # Convert to HTML
+    #  branch and MC/DC code is similar - but merging only makes things
+    #  more complicated as the details of the text, popups, etc are different
+    # maybe revisit later
+    my @result;
+    my %tlaLinks;
+
+    foreach my $line (@lines) {
+        my $current     = "";
+        my $current_len = 0;
+
+        foreach my $branch (@$line) {
+            # Skip alignment space
+            if ($branch eq " ") {
+                $current .= " ";
+                $current_len++;
+                next;
+            }
+
+            my ($block_num, $br_num, $br, $len, $open, $close) = @{$branch};
+
+            my $class;
+            my $prefix;
+            my $tla;
+            my $base_count;
+            if ('ARRAY' ne ref($br)) {
+                # vanilla case - no differential coverage info
+                die("differential branch coverage but no TLA")
+                    if defined($differentialBranch);
+                if ($br->data() eq '-') {
+                    $class = "tlaUNC";
+                } elsif ($br->data() == 0) {
+                    $class = "tlaUNC";
+                } else {
+                    $class = 'tlaGBC';
+                }
+                $prefix = '';
+            } else {
+                die("differential branch coverage but no TLA")
+                    unless defined($differentialBranch);
+                $tla        = $br->[1];
+                $base_count = $br->[2]->[0];
+                $br         = $br->[0];
+                $class      = "tla$tla";
+                my $label =
+                    $main::use_legacyLabels ?
+                    $SummaryInfo::tlaToLegacySrcLabel{$tla} :
+                    $tla;
+                $prefix = $label . ": ";
+            }
+            my ($char, $title);
+
+            my $br_name =
+                defined($br->expr()) ? '"' . $br->expr() . '"' : $br_num;
+            my $taken = $br->data();
+            if ($taken eq '-') {
+                $char  = "#";
+                $title = "${prefix}Branch $br_name was not executed";
+                $title .=
+                    " (previously taken $base_count time" .
+                    (1 == $base_count ? '' : 's') . ')'
+                    if (defined($tla) &&
+                        ($tla eq 'LBC' || $tla eq 'ECB'));
+            } elsif ($taken == 0) {
+                $char  = "-";
+                $title = "${prefix}Branch $br_name was not taken";
+                $title .=
+                    " (previously taken $base_count time" .
+                    (1 == $base_count ? '' : 's') . ')'
+                    if (defined($tla) &&
+                        ($tla eq 'LBC' || $tla eq 'ECB'));
+            } else {
+                $char  = "+";
+                $title = "${prefix}Branch $br_name was taken $taken time" .
+                    (($taken > 1) ? "s" : "");
+            }
+            $title = escape_html($title) if defined($br->expr());
+            $current .= "[" if ($open);
+
+            if (!$main::no_sourceview &&
+                defined($differentialBranch)) {
+                my $href;
+                if (exists($tlaLinks{$tla})) {
+                    $href = $tlaLinks{$tla};
+                } else {
+                    my $line = $differentialBranch->line();
+                    my $next = $fileDetail->nextBranchTlaGroup($tla, $line);
+                    $href =
+                        "$char";
+                    $tlaLinks{$tla} = $href;
+                }
+                $href =~ s#TITLE#$title#;
+                $current .= " $href ";
+            } else {
+                $current .=
+                    " $char ";
+            }
+            $current .= "]" if ($close);
+            $current_len += $len;
+        }
+
+        # Right-align result text
+        if ($current_len < $br_field_width) {
+            $current = (" " x ($br_field_width - $current_len)) . $current;
+        }
+        push(@result, $current);
+    }
+
+    return @result;
 }
 
+sub get_mcdc_html($$)
+{
+    # @todo can be shared with branch, MC/DC
+    my ($mcdc_data, $cbdata) = @_;
+    my $differentialMcdc;
+    my $fileDetail = $cbdata->sourceDetail();
+    if (defined($main::show_tla)) {
+        my $lineNo   = $cbdata->lineNo();
+        my $lineData = $cbdata->lineData()->line($lineNo);
+        $differentialMcdc = $lineData->differential_mcdc()
+            if defined($lineData);
+    }
+    # build the 'blocks' array from differential data if we have it..
+    my @blocks = get_block_list(
+                   defined($differentialMcdc) ? $differentialMcdc : $mcdc_data);
+
+    my @lines = distribute_blocks(\@blocks, $mcdc_field_width);
+
+    # Convert to HTML
+    #  branch and MC/DC code is similar - but merging only makes things
+    #  more complicated as the details of the text, popups, etc are different
+    # maybe revisit later
+    my @result;
+    my %tlaLinks;
+
+    foreach my $line (@lines) {
+        my $current     = "";
+        my $current_len = 0;
+
+        foreach my $cond (@$line) {
+            # Skip alignment space
+            unless ('ARRAY' eq ref($cond)) {
+                die("unexpected 'cond' entry '$cond'") unless ($cond eq " ");
+                $current .= " ";
+                $current_len++;
+                next;
+            }
+
+            my ($groupSize, $exprIdx, $sense, $expr, $len, $open, $close) =
+                @$cond;
+            die("unexpected expr type") unless ref($expr) eq 'MCDC_Expression';
+
+            my $count = $expr->count($sense);
+            my $taken;
+            my $class;
+            my $prefix;
+            my $tla;
+            my $base_count;
+            if ('ARRAY' ne ref($count)) {
+                # vanilla case - no differential coverage info
+                die("differential MC/DC coverage but no TLA")
+                    if defined($differentialMcdc);
+                $taken = $count;
+                if ($count == 0) {
+                    $class = "tlaUNC";
+                } else {
+                    $class = 'tlaGBC';
+                }
+                $prefix = '';
+            } else {
+                die("differential MC/DC coverage but no TLA")
+                    unless defined($differentialMcdc);
+                ($tla, $base_count, $taken) = @$count;
+
+                $class = "tla$tla";
+                my $label =
+                    $main::use_legacyLabels ?
+                    $SummaryInfo::tlaToLegacySrcLabel{$tla} :
+                    $tla;
+                $prefix = $label . ": ";
+            }
+            my ($char, $title);
+
+            my $expr_name = $expr->expression();
+            $title = ${prefix} . ($sense ? 'True' : 'False') .
+                ' sense of expression "' . $expr->expression() . '" ';
+            if ($expr->parent()->num_groups() > 1) {
+                $title .= 'in group "' . $expr->groupSize() . '" ';
+            }
+
+            if (!defined($taken)) {
+                $char = '-';
+                $title .= 'was dropped';
+                $title .=
+                    ' (previously ' .
+                    (1 == $base_count ? '' : 'not ') . 'sensitized)'
+                    if (defined($tla) &&
+                        ($tla eq 'LBC' || $tla eq 'ECB'));
+
+            } elsif ($taken == 0) {
+                $char = $sense ? 't' : 'f';
+                $title .= 'was not sensitized';
+                $title .=
+                    ' (previously ' .
+                    (1 == $base_count ? '' : 'not ') . 'sensitized)'
+                    if (defined($tla) &&
+                        ($tla eq 'LBC' || $tla eq 'ECB'));
+            } else {
+                $char = $sense ? 'T' : 'F';
+                $title .= 'was sensitized';
+            }
+            $title = escape_html($title);
+            $current .= "[" if ($open);
+
+            if (!$main::no_sourceview &&
+                defined($differentialMcdc)) {
+                my $href;
+                if (exists($tlaLinks{$tla})) {
+                    $href = $tlaLinks{$tla};
+                } else {
+                    my $line = $differentialMcdc->line();
+                    my $next = $fileDetail->nextMcdcTlaGroup($tla, $line);
+                    $href =
+                        "SENSE";
+                    # cache the href as 'next' lookup is moderately expensive
+                    $tlaLinks{$tla} = $href;
+                }
+                # keep the link but update the sense character
+                $href =~ s/SENSE/$char/;
+                $href =~ s#TITLE#$title#;
+                $current .= " $href ";
+            } else {
+                $current .=
+                    " $char ";
+            }
+            $current .= "]" if ($close);
+            $current_len += $len;
+        }
+
+        # Right-align result text
+        if ($current_len < $mcdc_field_width) {
+            $current = (" " x ($mcdc_field_width - $current_len)) . $current;
+        }
+        push(@result, $current);
+    }
+
+    return @result;
+}
 
 #
 # format_count(count, width)
@@ -4147,96 +10944,429 @@ sub get_branch_html($)
 
 sub format_count($$)
 {
-	my ($count, $width) = @_;
-	my $result;
-	my $exp;
-
-	$result = sprintf("%*.0f", $width, $count);
-	while (length($result) > $width) {
-		last if ($count < 10);
-		$exp++;
-		$count = int($count/10);
-		$result = sprintf("%*s", $width, ">$count*10^$exp");
-	}
-	return $result;
+    my ($count, $width) = @_;
+    my $result;
+    my $exp;
+    my $negative = 0 > $count;
+    if ($negative) {
+        $width -= 2;
+        $count = -$count;
+    }
+
+    $result = sprintf("%*.0f", $width, $count);
+    while (length($result) > $width) {
+        last if ($count < 10);
+        $exp++;
+        $count  = int($count / 10);
+        $result = sprintf("%*s", $width, ">$count*10^$exp");
+    }
+    if ($negative) {
+        # surround number with parens
+        $result =~ s/^( *)(\S+)$/$1\($2\)/;
+    }
+    return $result;
 }
 
 #
-# write_source_line(filehandle, line_num, source, hit_count, converted,
-#                   brdata)
+# write_source_line(filehandle, cbdata, source, hit_count, brdata, mcdc,
+#                   printCallbackStruct)
 #
 # Write formatted source code line. Return a line in a format as needed
 # by gen_png()
 #
 
-sub write_source_line(*$$$$$)
-{
-	my ($handle, $line, $source, $count, $converted, $brdata) = @_;
-	my $source_format;
-	my $count_format;
-	my $result;
-	my $anchor_start = "";
-	my $anchor_end = "";
-	my $count_field_width = $line_field_width - 1;
-	my @br_html;
-	my $html;
-
-	# Get branch HTML data for this line
-	@br_html = get_branch_html($brdata) if ($br_coverage);
-
-	if (!defined($count)) {
-		$result		= "";
-		$source_format	= "";
-		$count_format	= " "x$count_field_width;
-	}
-	elsif ($count == 0) {
-		$result		= $count;
-		$source_format	= '';
-		$count_format	= format_count($count, $count_field_width);
-	}
-	elsif ($converted && defined($highlight)) {
-		$result		= "*".$count;
-		$source_format	= '';
-		$count_format	= format_count($count, $count_field_width);
-	}
-	else {
-		$result		= $count;
-		$source_format	= '';
-		$count_format	= format_count($count, $count_field_width);
-	}
-	$result .= ":".$source;
-
-	# Write out a line number navigation anchor every $nav_resolution
-	# lines if necessary
-	$anchor_start	= "";
-	$anchor_end	= "";
-
-
-	# *************************************************************
-
-	$html = $anchor_start;
-	$html .= "".sprintf("%8d", $line)." ";
-	$html .= shift(@br_html).":" if ($br_coverage);
-	$html .= "$source_format$count_format : ";
-	$html .= escape_html($source);
-	$html .= "" if ($source_format);
-	$html .= $anchor_end."\n";
-
-	write_html($handle, $html);
-
-	if ($br_coverage) {
-		# Add lines for overlong branch information
-		foreach (@br_html) {
-			write_html($handle, "".
-				   "         $_\n");
-		}
-	}
-	# *************************************************************
-
-	return($result);
+sub write_source_line(*$$$$$$$)
+{
+    my ($handle, $srcline, $count, $showBranches, $brdata, $showMcdc, $mcdc,
+        $cbdata)
+        = @_;
+    my $line         = $cbdata->lineNo();
+    my $fileCovInfo  = $cbdata->lineData();
+    my $sourceDetail = $cbdata->sourceDetail();
+    my $source       = $srcline->text();
+    my $src_owner    = $srcline->owner();
+    my $src_age      = $srcline->age();
+    my $source_format;
+    my $count_format;
+    my $result;
+    my $anchor_start      = "";
+    my $anchor_end        = "";
+    my $count_field_width = $line_field_width - 1;
+    my $html;
+    my $tla;
+    my $base_count;
+    my $curr_count;
+    my $bucket;
+
+    my @prevData = ($cbdata->current('tla'),
+                    $cbdata->current('owner'),
+                    $cbdata->current('age'));
+
+    # Get branch HTML data for this line
+    my @br_html = get_branch_html($brdata, $cbdata) if ($showBranches);
+
+    my @mcdc_html = get_mcdc_html($mcdc, $cbdata) if ($showMcdc);
+
+    my $thisline = $fileCovInfo->lineMap()->{$line};
+    my $tlaIsHref;
+    if (!defined($thisline) ||
+        (!defined($count) &&
+            !($thisline->in_base() || $thisline->in_curr()))
+    ) {
+        $result        = "";
+        $source_format = "";
+        $count_format  = " " x $count_field_width;
+        $tla           = $cbdata->tla(undef, $line);
+    } else {
+        $base_count = $thisline->base_count();
+        $curr_count = $thisline->curr_count();
+        $bucket     = $thisline->tla();
+        # use callback data to keep track of the most recently seen TLA -
+        #   $tla is either "   " (3 spaces) if same as previous or if no TLA
+        #   we just stick "TLA " (4 characters) into the fixed-with source
+        #   line - right before the count.
+        $tla = $cbdata->tla($bucket, $line);
+        my $class = "tla$bucket";
+        if ($main::show_tla && $tla =~ /$bucket/) {
+            # maybe we want to link only the uncovered code categories?
+            my $next = $sourceDetail->nextTlaGroup($bucket, $line);
+            $tlaIsHref = 1;
+            # if no next segment in this category - then go to top to page.
+            $next = defined($next) ? "L$next" : "top";
+            my $anchorClass = "";
+            if ($bucket ne 'UNK') {
+                $class .= " tlaBg$bucket";
+                $anchorClass = " class=\"tlaBg$bucket\" ";
+            }
+            my $label =
+                $main::use_legacyLabels ?
+                $SummaryInfo::tlaToLegacySrcLabel{$bucket} :
+                $bucket;
+            my $popup = " title=\"Next $label group\"";
+            $label .= ' ' x ($main::tla_field_width - length($label));
+            $tla = "$label";
+        }
+        $source_format = "";
+
+        my $pchar;
+        if (exists($lcovutil::pngChar{$bucket})) {
+            $pchar = $lcovutil::pngChar{$bucket};
+        } else {
+            lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY,
+                                      "unexpected TLA '$bucket'");
+            $pchar = '';
+            $count = 0 unless defined($count);
+        }
+        if ($bucket eq "ECB" ||
+            $bucket eq "EUB") {
+            !defined($count) or
+                die("excluded code should have undefined count");
+            # show previous count for excluded code that had been hit
+            if ($bucket eq "ECB") {
+                $count_format = format_count(-$base_count, $count_field_width);
+            } else {
+                $count_format = " " x $count_field_width;
+            }
+            $result = $pchar . $base_count;
+        } else {
+            if (!defined($count)) {
+                # this is manufactured data...make something up
+                $count = $curr_count;
+            }
+            defined($count) && "" ne $count or
+                die("code should have defined count");
+            if ($bucket eq 'LBC') {
+                $count_format = format_count(-$base_count, $count_field_width);
+            } else {
+                $count_format = format_count($count, $count_field_width);
+            }
+            $result = $pchar . $count;
+        }
+        # '$result' is used to generate the PNG frame
+        info(2,
+             "    $bucket: line=$line " .
+                 (defined($count) ? "count= $count " : "") . ' curr=' .
+                 (defined($curr_count) ? $curr_count : '-') . ' base=' .
+                 (defined($base_count) ? $base_count : '-') . "\n");
+    }
+    $result .= ":" . $source;
+
+    my $tooltip =
+        @SourceFile::annotateScript ? $SourceFile::annotateTooltip : '';
+    if ($tooltip ne '') {
+        my $lineData = $sourceDetail->line($line);
+        my $commitId = $lineData->commit();
+        if (defined($commitId) &&
+            $commitId ne 'NONE') {
+            my $date = $lineData->date();
+            $date =~ s/(T.+)$//;    # just the year/month/day part
+            foreach my $p (['%U', $lineData->owner()],
+                           ['%F', $lineData->full_name()],
+                           ['%D', $lineData->date()],
+                           ['%d', $date],
+                           ['%A', $lineData->age()],
+                           ['%C', $commitId],
+                           ['%l', $line],
+            ) {
+                $tooltip =~ s/$p->[0]/$p->[1]/g;
+            }
+            $tooltip = " title=\"$tooltip\"";
+        } else {
+            $tooltip = '';
+        }
+    }
+    # Write out a line number navigation anchor every $nav_resolution
+    # lines if necessary
+    $anchor_start = "";
+    $anchor_end   = "";
+
+    # *************************************************************
+
+    $html = $anchor_start;
+    # we want to colorize the date/owner part of un-hit lines only
+    my $html_continuation_leader = "";    # for continued lines
+    if (defined($main::show_dateBins) &&
+        $sourceDetail->isProjectFile()) {
+        DATE_SECTION: {
+
+            my $ageLen   = $main::age_field_width;
+            my $ownerLen = $main::owner_field_width;
+
+            # need to space over on continuation lines
+            $html_continuation_leader = ' ' x ($ageLen + $ownerLen + 2);
+
+            if (!defined($count) &&
+                (!defined($main::show_nonCodeOwners) ||
+                    0 == $main::show_nonCodeOwners)
+                &&
+                (   !defined($bucket) ||
+                    ($bucket ne 'EUB' &&
+                        $bucket ne 'ECB'))
+            ) {
+                # suppress date- and owner entry on non-code lines
+                $html .= $html_continuation_leader;
+                last DATE_SECTION;
+            }
+
+            my $span    = "";
+            my $endspan = "";
+            my $bgclass = "";
+            if (defined($count) && 0 == $count) {
+                # probably want to pick a color based on which
+                #   date bin it is in.
+                # right now, picking based on TLA.
+                $bgclass = " class=\"tlaBg$bucket\""
+                    if (defined($bucket) &&
+                        $bucket ne "EUB" &&
+                        $bucket ne "ECB");
+
+                #$html .= "";
+                if ("" ne $source_format) {
+                    OWNER: {
+                        if (!defined($src_owner) || !defined($src_age)) {
+                            # maybe this should be a different error type?
+                            lcovutil::ignorable_eror(
+                                      $lcovutil::ERROR_UNMAPPED_LINE,
+                                      "undefined owner/age for $bucket $line " .
+                                          $sourceDetail->path());
+                            last OWNER;
+                        }
+                        # add a 'title="tooltip"' popup - to give owner, date, etc
+                        my $title = "span title=\"$src_owner $src_age days ago";
+                        if (defined($main::show_dateBins)) {
+                            my $bin = SummaryInfo::findAgeBin($src_age);
+                            $title .=
+                                " (bin $bin: " .
+                                $SummaryInfo::ageGroupHeader[$bin] . ")";
+                        }
+                        $title .= "\"";
+                        $span = $source_format;
+                        $span =~ s/span/$title/;
+                        $endspan = "";
+                    }
+                }
+            }    # OWNER block
+
+            # determine if this line is going to be the target of a 'date', 'owner'
+            # or TLA navigation href.
+            #  - is is possible for any of these to have changed from the previous
+            #    line, even if the others are unchanged:
+            #      - same TLA but different author
+            #      - same author but different date bin, .. and so on
+            # If it is a leader, then we need to insert the 'owner' and/or 'date'
+            #   link to go to the next group in this bin - even if the owner or
+            #   date bin has not changed from the previous line)
+            my $tlaChanged = defined($bucket) && $prevData[0] ne $bucket;
+
+            my $needOwnerHref = @SourceFile::annotateScript &&
+                ($tlaChanged ||
+                 (defined($bucket) && $prevData[1] ne $src_owner));
+
+            my $newBin = SummaryInfo::findAgeBin($src_age);
+            defined($prevData[2]) || $line == 1 or
+                die("unexpected uninitialized age");
+
+            my $needDateHref = (
+                           $tlaChanged || (defined($bucket) &&
+                               SummaryInfo::findAgeBin($prevData[2]) != $newBin)
+            );
+
+            if ($needDateHref) {
+                my $matchLine = $cbdata->nextDate($newBin, $bucket);
+                $needDateHref = 0
+                    if (defined($matchLine) && $matchLine != $line);
+            }
+            if ($needOwnerHref) {
+                # slightly complicated logic:
+                #   - there can be non-code lines owned by someone else
+                #     between code lines owned by '$src_owner', such that the
+                #     all the code lines have the same TLA.
+                # In that case, we just insert an href at the top of the
+                # block to take us past all of of them - to the next code block
+                # owned by $src_owner with this TLA, which separated by at least
+                # one line of code either owned by someone else, or with a different
+                # TLA.
+                my $matchLine = $cbdata->nextOwner($src_owner, $bucket);
+                # don't insert the owner href if this isn't the line we wanted
+                $needOwnerHref = 0
+                    if (defined($matchLine) && $matchLine != $line);
+            }
+
+            my $age   = $cbdata->age($src_age, $line);
+            my $owner = $cbdata->owner($src_owner);
+
+            # this HTML block turns into
+            #   "int name " <- note trailing space
+            #  .. but the age and name might be hrefs
+
+            $html .= $span;
+            # then 5 characters of 'age' (might be empty)
+            if ($needDateHref) {
+                # next line with this TLA, in this date bin
+                my $next =
+                    $sourceDetail->nextInDateBin($newBin, $bucket, $line);
+                $cbdata->nextDate($newBin, $bucket, $next);
+
+                $next = defined($next) ? "L$next" : "top";
+                my $dateBin = $SummaryInfo::ageGroupHeader[$newBin];
+                my $label =
+                    $main::use_legacyLabels ?
+                    $SummaryInfo::tlaToLegacySrcLabel{$bucket} :
+                    $bucket;
+                my $popup =
+                    " title=\"next $label in “$dateBin” bin\"";
+                $html .= ((' ' x ($ageLen - length($src_age))) .
+                          "$src_age ");
+            } else {
+                $html .= sprintf("%${ageLen}s ", $age);
+            }
+
+            if ($needOwnerHref) {
+                # next line with this TLA, by this owner..
+                my $next =
+                    $sourceDetail->nextInOwnerBin($src_owner, $bucket, $line);
+                $cbdata->nextOwner($src_owner, $bucket, $next);
+                $next = defined($next) ? "L$next" : "top";
+                my $label =
+                    $main::use_legacyLabels ?
+                    $SummaryInfo::tlaToLegacySrcLabel{$bucket} :
+                    $bucket;
+                my $popup =
+                    " title=\"next $label in “" .
+                    escape_html($src_owner) . "” bin\"";
+                # NOTE:  see note below about firefox nested span bug.
+                #  this code just arranges to wrap an explicit 'span' around
+                #  the space.
+                my $nspace = $ownerLen - length($src_owner);
+                my $space  = $nspace > 0 ? (' ' x $nspace) : '';
+                my $href   = "" .
+                    escape_html(substr($src_owner, 0, $ownerLen)) . '';
+                if ('' ne $bgclass &&
+                    '' ne $space) {
+                    $html .= "$endspan$href$span";
+                } else {
+                    $html .= $href;
+                }
+                $html .= $space;
+            } else {
+                $html .= sprintf("%-${ownerLen}s", $owner);
+            }
+            $html .= $endspan . ' ';    # add trailing space
+        }
+    }    # DATE_SECTION
+
+    # use a different HTML tag if there are deleted lines here
+    my $deletedLines = $fileCovInfo->deletedLineData($line);
+    my $lineNumTag   = 'lineNum';
+    my $lineNumTitle = '';
+    if (defined($deletedLines)) {
+        my $first = $deletedLines->[0]->lineNo('base');
+        my $last  = $deletedLines->[-1]->lineNo('base');
+        die("invalid deleted lines array") unless $first <= $last;
+        $lineNumTitle = ' title="'
+            .
+            (($first != $last) ? "removed lines [$first:$last]" :
+                 "removed line $first") .
+            ' from baseline version"';
+        $lineNumTag = 'lineNumWithDelete';
+    }
+    my $lineNumSpan = "";
+
+    $html .= sprintf("$lineNumSpan%8d ", $line);
+    #printf("tla= " . $tla);
+    #printf("html= " . $html);
+
+    $html .= shift(@br_html) . ":" if ($showBranches);
+    $html .= shift(@mcdc_html) . ":" if ($showMcdc);
+
+    $tla = ""
+        if (!defined($main::show_tla));
+
+    # 'source_format is the colorization, then the 3-letter TLA,
+    #    then the hit count, then the source line
+    if ($tlaIsHref) {
+        # there seems to be a bug in firefox:
+        #      link whatever
+        #   renders as if the 'span' didn't exist (so the colorization of the
+        #   link end - and the rest of the line doesn't pick up attributes
+        #   from class 'foo'.
+        # If I emit it as:
+        #     link wheatever
+        #   (i.e., do not nest anchor inside the span) - then it works
+        $html .= "$tla$source_format $count_format : ";
+    } else {
+        $html .= "$source_format$tla $count_format : ";
+    }
+
+    $html .= escape_html($source);
+    $html .= "" if ($source_format);
+    $html .= $anchor_end . "\n";
+
+    write_html($handle, $html);
+
+    # Add lines for overlong branch and/or MC/DC information
+    while (@br_html || @mcdc_html) {
+        my $br = @br_html ? shift(@br_html) : '';
+        my $mc = @mcdc_html ? shift(@mcdc_html) : '';
+        if ($mc) {
+            # space over far enough to line up with MC/DC extension column
+            # remove the span and other HTML
+            my $s = $br;
+            foreach my $tag ('a', 'span') {
+                $s =~ s/(<\/$tag>|<$tag.+?>)//g;
+            }
+            $br .= ' ' x ($br_field_width - length($s)) . ' ';
+            die("unexpected lengths") unless $br_field_width >= length($s);
+        }
+        write_html($handle,
+                   "$html_continuation_leader$lineNumSpan" .
+                       ' ' x 8 . " $br$mc\n");
+    }
+    # *************************************************************
+
+    return ($result);
 }
 
-
 #
 # write_source_epilog(filehandle)
 #
@@ -4245,22 +11375,20 @@ sub write_source_line(*$$$$$)
 
 sub write_source_epilog(*)
 {
-	# *************************************************************
+    # *************************************************************
 
-	write_html($_[0], <
-	      
-	    
-	  
-	  
+ write_html($_[0], < + + + +
END_OF_HTML - ; - # ************************************************************* + # ************************************************************* } - # # write_html_epilog(filehandle, base_dir[, break_frames]) # @@ -4271,33 +11399,33 @@ END_OF_HTML sub write_html_epilog(*$;$) { - my $basedir = $_[1]; - my $break_code = ""; - my $epilog; - - if (defined($_[2])) - { - $break_code = " target=\"_parent\""; - } - - # ************************************************************* - - write_html($_[0], < - - Generated by: $lcov_version - -
+ my $basedir = $_[1]; + my $break_code = ""; + my $epilog; + + if (defined($_[2])) { + $break_code = " target=\"_parent\""; + } + my $f = + defined($main::footer) ? $footer : + "Generated by: $lcov_version"; + + # ************************************************************* + + write_html($_[0], < + + $f + +
END_OF_HTML - ; - $epilog = $html_epilog; - $epilog =~ s/\@basedir\@/$basedir/g; + $epilog = $html_epilog; + $epilog =~ s/\@basedir\@/$basedir/g; - write_html($_[0], $epilog); + write_html($_[0], $epilog); } - # # write_frameset(filehandle, basedir, basename, pagetitle) # @@ -4305,37 +11433,35 @@ END_OF_HTML sub write_frameset(*$$$) { - my $frame_width = $overview_width + 40; + my $frame_width = $overview_width + 40; - # ************************************************************* + # ************************************************************* - write_html($_[0], < + write_html($_[0], < - + - - - $_[3] - - + + + $_[3] + + - - - - - <center>Frames not supported by your browser!<br></center> - - + + + + + <center>Frames not supported by your browser!<br></center> + + - + END_OF_HTML - ; - # ************************************************************* + # ************************************************************* } - # # sub write_overview_line(filehandle, basename, line, link) # @@ -4343,21 +11469,19 @@ END_OF_HTML sub write_overview_line(*$$$) { - my $y1 = $_[2] - 1; - my $y2 = $y1 + $nav_resolution - 1; - my $x2 = $overview_width - 1; + my $y1 = $_[2] - 1; + my $y2 = $y1 + $nav_resolution - 1; + my $x2 = $overview_width - 1; - # ************************************************************* + # ************************************************************* - write_html($_[0], < + write_html($_[0], < END_OF_HTML - ; - # ************************************************************* + # ************************************************************* } - # # write_overview(filehandle, basedir, basename, pagetitle, lines) # @@ -4365,478 +11489,1146 @@ END_OF_HTML sub write_overview(*$$$$) { - my $index; - my $max_line = $_[4] - 1; - my $offset; + my $index; + my $max_line = $_[4] - 1; + my $offset; - # ************************************************************* + # ************************************************************* - write_html($_[0], < + write_html($_[0], < - + - - $_[3] - - - + + $_[3] + + + - - + + END_OF_HTML - ; - - # ************************************************************* - - # Make $offset the next higher multiple of $nav_resolution - $offset = ($nav_offset + $nav_resolution - 1) / $nav_resolution; - $offset = sprintf("%d", $offset ) * $nav_resolution; - - # Create image map for overview image - for ($index = 1; $index <= $_[4]; $index += $nav_resolution) - { - # Enforce nav_offset - if ($index < $offset + 1) - { - write_overview_line($_[0], $_[2], $index, 1); - } - else - { - write_overview_line($_[0], $_[2], $index, $index - $offset); - } - } - - # ************************************************************* - - write_html($_[0], < - -
- Top

- Overview -
- - + + # ************************************************************* + + # Make $offset the next higher multiple of $nav_resolution + $offset = ($nav_offset + $nav_resolution - 1) / $nav_resolution; + $offset = sprintf("%d", $offset) * $nav_resolution; + + # Create image map for overview image + for ($index = 1; $index <= $_[4]; $index += $nav_resolution) { + # Enforce nav_offset + if ($index < $offset + 1) { + write_overview_line($_[0], $_[2], $index, 1); + } else { + write_overview_line($_[0], $_[2], $index, $index - $offset); + } + } + + # ************************************************************* + my $f = $lcovutil::case_insensitive ? lc($_[2]) : $_[2]; + write_html($_[0], < + +
+ Top

+ Overview +
+ + END_OF_HTML - ; - # ************************************************************* + # ************************************************************* } - sub max($$) { - my ($a, $b) = @_; + my ($a, $b) = @_; + + return $a if ($a > $b); + return $b; +} + +sub buildDateSummaryTable($$$$$$$$$) +{ + my ($summary, $covType, $covCountCallback, + $fileDetail, $nextLocationCallback, $title, + $detailLink, $numRows, $activeTlaList) = @_; + + $title = + "$title" + if (defined($detailLink)); + + my @table; + + my @dateSummary = [undef, #width + "subTableHeader", # class + $title, # text + $numRows, # colspan + ]; + # only insert the label if there is data + my $first = 1; + my $page = $detailLink; + if (defined($page)) { + $page =~ s/^index/index-bin_/; + $page =~ s/^index-bin_-/index-bin_/; + } + + my ($lim_med, $lim_high); + if ($covType == SummaryInfo::LINE_DATA) { + $lim_med = $ln_med_limit; + $lim_high = $ln_hi_limit; + } elsif ($covType == SummaryInfo::BRANCH_DATA) { + $lim_med = $br_med_limit; + $lim_high = $br_hi_limit; + } elsif ($covType == SummaryInfo::MCDC_DATA) { + $lim_med = $mcdc_med_limit; + $lim_high = $mcdc_hi_limit; + } elsif ($covType == SummaryInfo::FUNCTION_DATA) { + $lim_med = $fn_med_limit; + $lim_high = $fn_hi_limit; + } else { + die("unexpected cover type $covType"); + } + for (my $bin = 0; $bin <= $#SummaryInfo::ageGroupHeader; ++$bin) { + my $ageval = $summary->age_sample($bin); + my $found = &$covCountCallback($summary, "found", "age", $ageval); + next + if 0 == $found; + my $hit = &$covCountCallback($summary, "hit", "age", $ageval); + my $style = + $rate_name[classify_rate($found, $hit, $lim_med, $lim_high)]; + my $rate = rate($hit, $found, " %"); + my $href = $SummaryInfo::ageGroupHeader[$bin]; + if (defined($detailLink)) { + $href = + "$href"; + } + $hit -= $found # negative number + if ($main::opt_missed); + my @dataRow = ([undef, "headerItem", $href . ":"], + [undef, "headerCovTableEntry$style", $rate], + [undef, "headerCovTableEntry", $found]); + if ($main::show_hitTotalCol) { + push(@dataRow, [undef, "headerCovTableEntry", $hit]); + } + if ($main::show_tla) { + for my $tla (@$activeTlaList) { + my $value = &$covCountCallback($summary, $tla, "age", $ageval); + my $class = + !$main::use_legacyLabels && + 0 != $value && + grep(/^$tla$/, ("UNC", "LBC", "UIC")) ? "tla$tla" : + "headerCovTableEntry"; + # suppress zeros - make table less busy/easier to read + if ("0" eq $value) { + $value = ""; + } elsif (!$main::no_sourceview && + defined($fileDetail) && + defined($nextLocationCallback) && + !grep(/^$tla$/, ('DUB', 'DCB'))) { + # no link for deleted lines (DUB, DCB) because there + # is no TLA for those categories at the location in + # the 'current' sourceview. That location is just a + # normal line - maybe "not code", maybe 'CBC' or some + # other TLA. + + # link to first entry + my $firstAppearance = + &$nextLocationCallback($fileDetail, $bin, $tla); + defined($firstAppearance) or + die( + "$tla: unexpected date bin $bin undef appearance for " + . $fileDetail->path()); + my $dateBin = $SummaryInfo::ageGroupHeader[$bin]; + my ($label, $color); + if ($main::use_legacyLabels) { + $label = $SummaryInfo::tlaToLegacy{$tla}; + $color = ""; + } else { + $label = $tla; + $color = " class=\"tlaBg$tla\""; + } + my $popup = + " title=\"goto first $label " . + SummaryInfo::type2str($covType) . + " in “$dateBin” bin\""; + $value = + "$value"; + } + push(@dataRow, [undef, $class, $value]); + } + } + if ($first) { + push(@table, \@dateSummary); + $first = 0; + } + push(@table, \@dataRow); + } + return \@table; +} + +sub buildOwnerSummaryTable($$$$$$$$$$) +{ + my ($ownerList, $num_truncated, $summary, + $covType, $fileDetail, $nextLocationCallback, + $title, $detailLink, $numRows, + $activeTlaList) = @_; + + $title .= " (containing " . + ($main::show_ownerBins ? "" : "un-exercised ") . "code)"; + $title .= + ": $num_truncated author" . + ($num_truncated == 1 ? '' : 's') . ' truncated' + if $num_truncated; + + $title = + "$title" + if (defined($detailLink)); + + my $page = $detailLink; + if (defined($page)) { + $page =~ s/^index/index-bin_/; + $page =~ s/^index-bin_-/index-bin_/; + } + + my @table; + my @ownerSummary = [undef, #width + "subTableHeader", # class + $title, # text + $numRows, # colspan + ]; + + my ($lim_med, $lim_high); + if ($covType == SummaryInfo::LINE_DATA) { + $lim_med = $ln_med_limit; + $lim_high = $ln_hi_limit; + } elsif ($covType eq SummaryInfo::BRANCH_DATA) { + $lim_med = $br_med_limit; + $lim_high = $br_hi_limit; + } elsif ($covType eq SummaryInfo::MCDC_DATA) { + $lim_med = $mcdc_med_limit; + $lim_high = $mcdc_hi_limit; + } elsif ($covType eq SummaryInfo::FUNCTION_DATA) { + $lim_med = $fn_med_limit; + $lim_high = $fn_hi_limit; + } else { + die("unexpected cover type $covType"); + } + my $first = 1; + # owners are sorted from most uncovered lines to least + foreach my $od (@$ownerList) { + my ($name, $lineData, $branchData) = @$od; + my $d = ($covType == SummaryInfo::LINE_DATA) ? $lineData : $branchData; + my ($missed, $found) = @$d; + + # only put user in table if they are responsible for at least one point + next if $found == 0 or ($missed == 0 && $main::show_ownerBins ne 'all'); + + if ($first) { + $first = 0; + push(@table, \@ownerSummary); + } + my $hit = $found - $missed; + + my $style = + $rate_name[classify_rate($found, $hit, $lim_med, $lim_high)]; + my $rate = rate($hit, $found, ' %'); + + my $esc_name = escape_html($name); + my $href = $esc_name; + if (defined($detailLink)) { + $href = + "$esc_name"; + } + $hit -= $found # negative number + if ($main::opt_missed); + my @dataRow = ([undef, "headerItem", $href . ":"], + [undef, "owner_coverPer$style", $rate], + [undef, "ownerTla", $found]); + if ($main::show_hitTotalCol) { + push(@dataRow, [undef, "ownerTla", $hit]); + } + if ($main::show_tla) { + for my $tla (@$activeTlaList) { + my $value = $summary->owner_tlaCount($name, $tla, $covType); + # suppress zeros - make table less busy/easier to read + my $class = + !$main::use_legacyLabels && + 0 != $value && + grep(/^$tla$/, ("UNC", "LBC", "UIC")) ? "tla$tla" : + "ownerTla"; + if ("0" eq $value) { + $value = ""; + } elsif (!$main::no_sourceview && + defined($fileDetail) && + defined($nextLocationCallback)) { + my $firstAppearance = + &$nextLocationCallback($fileDetail, $name, $tla); + defined($firstAppearance) or + die( + "$tla: unexpected owner $name undef appearance for " . + $fileDetail->path()); + my ($label, $color); + if ($main::use_legacyLabels) { + $label = $SummaryInfo::tlaToLegacy{$tla}; + $color = ""; + } else { + $label = $tla; + $color = " class=\"tlaBg$tla\""; + } + my $popup = + " title=\"goto first $label " . + SummaryInfo::type2str($covType) . + " in “$name” bin\""; + $value = + "$value"; + } + push(@dataRow, [undef, $class, $value]); + } + } + push(@table, \@dataRow); + } + return \@table; +} - return $a if ($a > $b); - return $b; +sub buildHeaderSummaryTableRow +{ + my ($summary, $covType, $fileDetail, $nextLocationCallback, $activeTlaList) + = @_; + + $fileDetail = undef + if (!$main::show_tla && + (defined($fileDetail) && !$fileDetail->isProjectFile())); + my @row; + for my $tla (@$activeTlaList) { + my $value = $summary->get($tla, $covType); + # for the moment, no background colorization for non-zero counts + # in "concerning" categories. The table header is immediately + # above - so no need to colorize to draw emphasis - just makes the + # table busier + my $showConcerningTla = 0; + my $class = + $showConcerningTla && + !$main::use_legacyLabels && + 0 != $value && + grep(/^$tla$/, ("UNC", "LBC", "UIC")) ? "tla$tla" : + "headerCovTableEntry"; + if ("0" eq $value) { + # suppress zeros - make table less busy/easier to read + $value = ""; + } elsif (!$main::no_sourceview && + defined($fileDetail) && + !($tla eq "DCB" || $tla eq "DUB")) { + # deleted lines don't appear.. + my $firstAppearance = &$nextLocationCallback($fileDetail, $tla); + defined($firstAppearance) or + die( + "$tla: unexpected undef appearance for " . $fileDetail->path()); + my $label = + $main::use_legacyLabels ? $SummaryInfo::tlaToLegacy{$tla} : + $tla; + my $popup = " title=\"goto first $label " . + SummaryInfo::type2str($covType) . "\""; + $value = "$value"; + } + push(@row, [undef, $class, $value]); + } + return @row; } +# build an HTML string for the directory or file pathname, such +# that each element is clickable - and takes you to the 'index' file +# in the corresponding (transitive) parent directory +sub build_html_path($$$$$) +{ + my ($path, $key, $bin_type, $isFile, $isAbsolute) = @_; + + $path =~ s|^/||; + my @path = File::Spec->splitdir($path); + my $html_path = ""; + if ($main::hierarchical && + scalar(@path) > 1) { + pop(@path); # remove 'self' - at the tail + my $p = ""; + my $sep = ""; + # need one fewer "../" entries for file pathname because the + # index file we are looking for is in the current directory (i.e., + # not '../index.html' + my $len = scalar(@path) - $isFile; + foreach my $elem (@path) { + my $base = "../" x $len; + $elem = $lcovutil::dirseparator . $elem if $isAbsolute; + $isAbsolute = 0; + my $e = escape_html($elem); + $html_path .= + "$sep$e"; + $sep = $lcovutil::dirseparator; + --$len; + } + } + return $html_path; +} # -# write_header(filehandle, type, trunc_file_name, rel_file_name, lines_found, -# lines_hit, funcs_found, funcs_hit, sort_type) +# write_header(filehandle, ctrl, trunc_file_name, rel_file_name, +# summaryInfo, optionalFileDetailInfo, optionalFunctionData)) +# ctrl = (type, primary_key, sort_type, bin_type) # # Write a complete standard page header. TYPE may be (0, 1, 2, 3, 4) # corresponding to (directory view header, file view header, source view # header, test case description header, function view header) # +# bin_type in (undef, "", "-owner", "-date") +# - if 'bin' is set, then create link to 'vanilla' view of self, and +# to corresponding view of parent +# - i.e., from 'owner detail' directory page to "owner detail" +# toplevel, and to my correspondign vanilla directory page. +# return hash of coverType -> list of non-zero TLAs for that type (or to +# list of all TLA types if user has asked not to suppress all-zero +# columns) -sub write_header(*$$$$$$$$$$) -{ - local *HTML_HANDLE = $_[0]; - my $type = $_[1]; - my $trunc_name = $_[2]; - my $rel_filename = $_[3]; - my $lines_found = $_[4]; - my $lines_hit = $_[5]; - my $fn_found = $_[6]; - my $fn_hit = $_[7]; - my $br_found = $_[8]; - my $br_hit = $_[9]; - my $sort_type = $_[10]; - my $base_dir; - my $view; - my $test; - my $base_name; - my $style; - my $rate; - my @row_left; - my @row_right; - my $num_rows; - my $i; - my $esc_trunc_name = escape_html($trunc_name); - - $base_name = basename($rel_filename); - - # Prepare text for "current view" field - if ($type == $HDR_DIR) - { - # Main overview - $base_dir = ""; - $view = $overview_title; - } - elsif ($type == $HDR_FILE) - { - # Directory overview - $base_dir = get_relative_base_path($rel_filename); - $view = "". - "$overview_title - $esc_trunc_name"; - } - elsif ($type == $HDR_SOURCE || $type == $HDR_FUNC) - { - # File view - my $dir_name = dirname($rel_filename); - my $esc_base_name = escape_html($base_name); - my $esc_dir_name = escape_html($dir_name); - - $base_dir = get_relative_base_path($dir_name); - if ($frames) - { - # Need to break frameset when clicking any of these - # links - $view = "$overview_title - ". - "". - "$esc_dir_name - $esc_base_name"; - } - else - { - $view = "". - "$overview_title - ". - "". - "$esc_dir_name - $esc_base_name"; - } - - # Add function suffix - if ($func_coverage) { - $view .= ""; - if ($type == $HDR_SOURCE) { - if ($sort) { - $view .= " (source / functions)"; - } else { - $view .= " (source / functions)"; - } - } elsif ($type == $HDR_FUNC) { - $view .= " (source / functions)"; - } - $view .= ""; - } - } - elsif ($type == $HDR_TESTDESC) - { - # Test description header - $base_dir = ""; - $view = "". - "$overview_title - test case descriptions"; - } - - # Prepare text for "test" field - $test = escape_html($test_title); - - # Append link to test description page if available - if (%test_description && ($type != $HDR_TESTDESC)) - { - if ($frames && ($type == $HDR_SOURCE || $type == $HDR_FUNC)) - { - # Need to break frameset when clicking this link - $test .= " ( ". - "". - "view descriptions )"; - } - else - { - $test .= " ( ". - "". - "view descriptions )"; - } - } - - # Write header - write_header_prolog(*HTML_HANDLE, $base_dir); - - # Left row - push(@row_left, [[ "10%", "headerItem", "Current view:" ], - [ "35%", "headerValue", $view ]]); - push(@row_left, [[undef, "headerItem", "Test:"], - [undef, "headerValue", $test]]); - push(@row_left, [[undef, "headerItem", "Date:"], - [undef, "headerValue", $date]]); - - # Right row - if ($legend && ($type == $HDR_SOURCE || $type == $HDR_FUNC)) { - my $text = <isProjectFile()); + + my $key = $primary_key ne "name" ? "-bin_$primary_key" : ""; + my $isAbsolutePath = ( + $summary->is_directory(1) || ($summary->type() eq 'file' && + $summary->parent()->is_directory(1)) + ); + my $html_path = + build_html_path($trunc_name, $key, $bin_type, + $summary->type() eq 'file', + $isAbsolutePath); + + # Prepare text for "current view" field + if ($type == $HDR_DIR) { + # Main overview + $base_dir = ""; + if ($bin_type ne "" || + $primary_key ne 'name') { + # this is the header of the 'top-level' page, for either 'owner' + # or 'date' binning - link back to vanilla top-level page + $view = + "$overview_title"; + } else { + $view = $overview_title; + } + } elsif ($type == $HDR_FILE) { + # Directory overview + $base_dir = get_relative_base_path($rel_filename); + my $self_link; + if ($main::hierarchical) { + my $base = escape_html(File::Basename::basename($rel_filename)); + if ($base eq $rel_filename && + $isAbsolutePath) { + $base = $lcovutil::dirseparator . $base; + } + $self_link = $html_path; + $self_link .= $lcovutil::dirseparator if ('' ne $html_path); + if ('name' ne $primary_key || + '' ne $bin_type) { + $self_link .= + "$base"; + } else { + $self_link .= $base; + } + } else { + $esc_trunc_name = $lcovutil::dirseparator . $esc_trunc_name + if $isAbsolutePath; + $self_link = $esc_trunc_name; + if ('name' ne $primary_key || + '' ne $bin_type) { + # go back to the 'vanilla' view of this directory + $self_link = + "$esc_trunc_name"; + } + } + $view = + "" + . "$overview_title - $self_link"; + } elsif ($type == $HDR_SOURCE || $type == $HDR_FUNC) { + # File view + my $dir_name = dirname($rel_filename); + + my $esc_base_name = escape_html($base_name); + my $esc_dir_name = escape_html($dir_name); + $esc_dir_name = $lcovutil::dirseparator . $esc_dir_name + if $isAbsolutePath; + + $base_dir = get_relative_base_path($dir_name); + # if using frames, to break frameset when clicking any of the links + my $parent = $frames ? " target=\"_parent\"" : ""; + $view = + "" . + "$overview_title - "; + if ($main::flat) { + # for flat view, point only to the top-levevl and show path + # to this file. No other links + $view .= File::Spec->catfile($esc_dir_name, $esc_base_name); + } else { + if ($main::hierarchical) { + $html_path =~ s/$esc_dir_name"; + } + $view .= " - $esc_base_name"; + } + + # Add function suffix + if ($lcovutil::func_coverage && + defined($differentialFunctionMap) && + %$differentialFunctionMap) { + $view .= ""; + if ($type == $HDR_SOURCE) { + my $suffix = $sort_tables ? '-c' : ''; + $view .= + " (source / functions)"; + } elsif ($type == $HDR_FUNC) { + $view .= + " (source / functions)"; + } + $view .= ""; + } + } elsif ($type == $HDR_TESTDESC) { + # Test description header + $base_dir = ""; + $view = "" . + "$overview_title - test case descriptions"; + } + + # Prepare text for "test" field + my $test = escape_html($test_title); + + # Append link to test description page if available + if (%test_description && ($type != $HDR_TESTDESC)) { + if ($frames && ($type == $HDR_SOURCE || $type == $HDR_FUNC)) { + # Need to break frameset when clicking this link + $test .= + " ( " . "" . + "view descriptions )"; + } else { + $test .= + " ( " . "" . "view descriptions )"; + } + } + + # Write header + write_header_prolog(*HTML_HANDLE, $base_dir); + + # Left row + push(@row_left, + [["10%", "headerItem", "Current view:"], ["10%", "headerValue", $view] + ]); + my $label = defined($baseline_title) ? "Current" : "Test"; + push(@row_left, + [[undef, "headerItem", "$label:"], [undef, "headerValue", $test]]); + push(@row_left, + [[undef, "headerItem", "$label Date:"], + [undef, "headerValue", $current_date] + ]); + if (defined($baseline_title)) { + push(@row_left, + [[undef, "headerItem", "Baseline:"], + [undef, "headerValue", $baseline_title] + ]); + push(@row_left, + [[undef, "headerItem", "Baseline Date:"], + [undef, "headerValue", $baseline_date] + ]); + } + my $hasBranchData = $lcovutil::br_coverage && 0 != $summary->branch_found(); + my $hasMcdcData = $lcovutil::mcdc_coverage && 0 != $summary->mcdc_found(); + + # if top-level page, link to command line and profile + if ($summary->type() eq 'top' && + $bin_type eq '' && + $primary_key eq 'name' && + $sort_type eq $main::SORT_FILE && + defined($lcovutil::profile)) { + + push(@row_left, + [[undef, 'headerItem', 'genhtml Process:'], + [undef, 'headerValue', + 'command line' + ] + ], + [[undef, 'headerItem', ''], #blank + [undef, 'headerValue', + 'profile data' + ] + ]); + } + if ($type != $HDR_SOURCE && + $type != $HDR_FUNC && + (defined($main::show_ownerBins) || + @SourceFile::annotateScript) + ) { + # we are going to have 3 versions of of the page: + # flat, with owner bin data, with date bin data + # so label which one this is + my ($tableLabel, $tableKey); + if ($bin_type eq '-owner') { + $tableLabel = 'Group by:'; + $tableKey = 'Owner'; + } elsif ($bin_type eq '-date') { + $tableLabel = 'Group by:'; + $tableKey = 'Date bin'; + } else { + $bin_type eq "" or + die("unexpected bin detail type $bin_type"); + $tableLabel = 'Summarize by:'; + if ($primary_key eq 'date') { + $tableKey = 'Date bin'; + } elsif ($primary_key eq 'owner') { + $tableKey = 'Owner'; + } # else ($primary_key eq 'name'; + } + # label this only if there is both a primary and secondary + push(@row_left, + [[undef, 'headerItem', $tableLabel], + [undef, 'headerValue', $tableKey] + ]) if (defined($tableKey)); + } + + # Right row + if ($legend && ($type == $HDR_SOURCE || $type == $HDR_FUNC)) { + # kind of hacky - using spaces to try to align + my $text = <hit
not hit END_OF_HTML - if ($br_coverage) { - $text .= <Branches: + taken - not taken # not executed END_OF_HTML - } - push(@row_left, [[undef, "headerItem", "Legend:"], - [undef, "headerValueLeg", $text]]); - } elsif ($legend && ($type != $HDR_TESTDESC)) { - my $text = <MC/DC:    + T 'true' sensitized + t 'true' not sensitized + F 'false' sensitized + f 'false' not sensitized +END_OF_HTML + } + push(@row_left, + [[undef, "headerItem", "Legend:"], + [undef, "headerValueLeg", $text] + ]); + } elsif ($legend && ($type != $HDR_TESTDESC)) { + my $text = <low: < $med_limit % medium: >= $med_limit % high: >= $hi_limit % END_OF_HTML - push(@row_left, [[undef, "headerItem", "Legend:"], - [undef, "headerValueLeg", $text]]); - } - if ($type == $HDR_TESTDESC) { - push(@row_right, [[ "55%" ]]); - } else { - push(@row_right, [["15%", undef, undef ], - ["10%", "headerCovTableHead", "Hit" ], - ["10%", "headerCovTableHead", "Total" ], - ["15%", "headerCovTableHead", "Coverage"]]); - } - # Line coverage - $style = $rate_name[classify_rate($lines_found, $lines_hit, - $med_limit, $hi_limit)]; - $rate = rate($lines_hit, $lines_found, " %"); - push(@row_right, [[undef, "headerItem", "Lines:"], - [undef, "headerCovTableEntry", $lines_hit], - [undef, "headerCovTableEntry", $lines_found], - [undef, "headerCovTableEntry$style", $rate]]) - if ($type != $HDR_TESTDESC); - # Function coverage - if ($func_coverage) { - $style = $rate_name[classify_rate($fn_found, $fn_hit, - $fn_med_limit, $fn_hi_limit)]; - $rate = rate($fn_hit, $fn_found, " %"); - push(@row_right, [[undef, "headerItem", "Functions:"], - [undef, "headerCovTableEntry", $fn_hit], - [undef, "headerCovTableEntry", $fn_found], - [undef, "headerCovTableEntry$style", $rate]]) - if ($type != $HDR_TESTDESC); - } - # Branch coverage - if ($br_coverage) { - $style = $rate_name[classify_rate($br_found, $br_hit, - $br_med_limit, $br_hi_limit)]; - $rate = rate($br_hit, $br_found, " %"); - push(@row_right, [[undef, "headerItem", "Branches:"], - [undef, "headerCovTableEntry", $br_hit], - [undef, "headerCovTableEntry", $br_found], - [undef, "headerCovTableEntry$style", $rate]]) - if ($type != $HDR_TESTDESC); - } - - # Print rows - $num_rows = max(scalar(@row_left), scalar(@row_right)); - for ($i = 0; $i < $num_rows; $i++) { - my $left = $row_left[$i]; - my $right = $row_right[$i]; - - if (!defined($left)) { - $left = [[undef, undef, undef], [undef, undef, undef]]; - } - if (!defined($right)) { - $right = []; - } - write_header_line(*HTML_HANDLE, @{$left}, - [ $i == 0 ? "5%" : undef, undef, undef], - @{$right}); - } - - # Fourth line - write_header_epilog(*HTML_HANDLE, $base_dir); -} - -sub get_sorted_by_rate($$) -{ - my ($hash, $type) = @_; - - if ($type == $SORT_LINE) { - # Sort by line coverage - return sort({$hash->{$a}[7] <=> $hash->{$b}[7]} keys(%{$hash})); - } elsif ($type == $SORT_FUNC) { - # Sort by function coverage; - return sort({$hash->{$a}[8] <=> $hash->{$b}[8]} keys(%{$hash})); - } elsif ($type == $SORT_BRANCH) { - # Sort by br coverage; - return sort({$hash->{$a}[9] <=> $hash->{$b}[9]} keys(%{$hash})); - } -} - -sub get_sorted_by_missed($$) -{ - my ($hash, $type) = @_; - - if ($type == $SORT_LINE) { - # Sort by number of instrumented lines without coverage - return sort( - { - ($hash->{$b}[0] - $hash->{$b}[1]) <=> - ($hash->{$a}[0] - $hash->{$a}[1]) - } keys(%{$hash})); - } elsif ($type == $SORT_FUNC) { - # Sort by number of instrumented functions without coverage - return sort( - { - ($hash->{$b}[2] - $hash->{$b}[3]) <=> - ($hash->{$a}[2] - $hash->{$a}[3]) - } keys(%{$hash})); - } elsif ($type == $SORT_BRANCH) { - # Sort by number of instrumented branches without coverage - return sort( - { - ($hash->{$b}[4] - $hash->{$b}[5]) <=> - ($hash->{$a}[4] - $hash->{$a}[5]) - } keys(%{$hash})); - } -} - -# -# get_sorted_keys(hash_ref, sort_type) -# -# hash_ref: filename -> stats -# stats: [ lines_found, lines_hit, fn_found, fn_hit, br_found, br_hit, -# link_name, line_rate, fn_rate, br_rate ] -# - -sub get_sorted_keys($$) -{ - my ($hash, $type) = @_; - - if ($type == $SORT_FILE) { - # Sort by name - return sort(keys(%{$hash})); - } elsif ($opt_missed) { - return get_sorted_by_missed($hash, $type); - } else { - return get_sorted_by_rate($hash, $type); - } + push(@row_left, + [[undef, "headerItem", "Legend:"], + [undef, "headerValueLeg", $text] + ]); + } + my %activeTlaColsForType; + my @nonZeroTlas; + if ($type != $HDR_TESTDESC && $main::show_tla) { + # compute which TLAs have non-zero entries for line/branch/function + my @visit = (SummaryInfo::LINE_DATA); + push(@visit, SummaryInfo::BRANCH_DATA) + if $hasBranchData; + push(@visit, SummaryInfo::MCDC_DATA) + if $hasMcdcData; + push(@visit, SummaryInfo::FUNCTION_DATA) + if $lcovutil::func_coverage; + + foreach my $covtype (@visit) { + if ($main::show_zeroTlaColumns) { + $activeTlaColsForType{$covtype} = + \@SummaryInfo::tlaPriorityOrder; + } else { + my @found; + for my $tla (@SummaryInfo::tlaPriorityOrder) { + push(@found, $tla) + if (0 != $summary->get($tla, $covtype)); + } + $activeTlaColsForType{$covtype} = \@found; + } + } + foreach my $tla (@SummaryInfo::tlaPriorityOrder) { + # check to see that there are non-zero entries + foreach my $covtype (@visit) { + if (grep({ $tla eq $_ } @{$activeTlaColsForType{$covtype}})) { + push(@nonZeroTlas, $tla); + last; + } + } + } + } + # if $showAllTlasInSummary is set, then put all TLA categories in the + # summary tables at the top of the page (but only the non-zero categories + # in the 'file detail' tables. + # Otherwise, only show non-zero categories in both places + my $showAllTlasInSummary = 0; # turn off for now + my $tlaSummaryTypes = + $showAllTlasInSummary ? \@$SummaryInfo::tlaPriorityOrder : + \@nonZeroTlas; + + if ($type == $HDR_TESTDESC) { + push(@row_right, [["80%"]]); + } else { + my $totalTitle = "Covered + Uncovered code"; + my $hitTitle = "Exercised code only"; + if (@main::base_filenames) { + $totalTitle .= " (not including EUB, ECB, DUB, DCB categories)"; + $hitTitle .= " (CBC + GBC + GNC + GIC)"; + } + + my @headerRow = (["5%", undef, undef], + ["5%", "headerCovTableHead", "Coverage"], + ["5%", "headerCovTableHead", + "Total", undef, + $totalTitle + ]); + if ($main::show_hitTotalCol) { + # legacy view, or we have all the differential categories + #- thus also want a summary + $hitTitle = $main::use_legacyLabels ? undef : $hitTitle; + push(@headerRow, + ["5%", "headerCovTableHead", + $main::opt_missed ? "Missed" : "Hit", + undef, $hitTitle + ]); + } + if ($main::show_tla) { + # Show selected TLAs in title row - either all of them, or + # only types which have non-zero entries + for my $tla (@$tlaSummaryTypes) { + + my ($title, $label, $class); + if ($main::use_legacyLabels) { + $label = $SummaryInfo::tlaToLegacy{$tla}; + $class = "headerCovTableHead"; + } else { + $label = $tla; + $title = $SummaryInfo::tlaToTitle{$tla}; + $class = "headerCovTableHead$tla"; + } + push(@headerRow, ["5%", $class, $label, undef, $title]); + } + } + push(@row_right, \@headerRow); + } + # Line coverage + my $tot = $summary->lines_found(); #might have been modified... + my $hit = $summary->lines_hit(); + my $style = + $rate_name[classify_rate($tot, $hit, $ln_med_limit, $ln_hi_limit)]; + my $rate = rate($hit, $tot, " %"); + $hit -= $tot + if $main::opt_missed; # negative number + my @dataRow = ([undef, "headerItem", "Lines:"], + [undef, "headerCovTableEntry$style", $rate], + [undef, "headerCovTableEntry", $tot]); + if ($main::show_hitTotalCol) { + push(@dataRow, [undef, "headerCovTableEntry", $hit]); + } + if ($main::show_tla) { + my @tlaRow = + buildHeaderSummaryTableRow($summary, SummaryInfo::LINE_DATA, + $fileDetail, \&SourceFile::nextTlaGroup, + $tlaSummaryTypes); + push(@dataRow, @tlaRow); + } + push(@row_right, \@dataRow) + if ($type != $HDR_TESTDESC); + # Function coverage + if ($lcovutil::func_coverage) { + my $tot = $summary->function_found(); + my $hit = $summary->function_hit(); + $style = + $rate_name[classify_rate($tot, $hit, $fn_med_limit, $fn_hi_limit)]; + $rate = rate($hit, $tot, " %"); + $hit -= $tot + if $main::opt_missed; # negative number + my @dataRow = ([undef, "headerItem", "Functions:"], + [undef, "headerCovTableEntry$style", $rate], + [undef, "headerCovTableEntry", $tot]); + if ($main::show_hitTotalCol) { + push(@dataRow, [undef, "headerCovTableEntry", $hit]); + } + if ($main::show_tla) { + # no file position for function (yet) + my @tlaRow = + buildHeaderSummaryTableRow($summary, + SummaryInfo::FUNCTION_DATA, undef, undef, $tlaSummaryTypes); + push(@dataRow, @tlaRow); + } + push(@row_right, \@dataRow) + if ($type != $HDR_TESTDESC); + } + # Branch coverage + if ($hasBranchData) { + my $tot = $summary->branch_found(); + my $hit = $summary->branch_hit(); + $style = + $rate_name[classify_rate($tot, $hit, $br_med_limit, $br_hi_limit)]; + $rate = rate($hit, $tot, " %"); + $hit -= $tot + if $main::opt_missed; # negative number + my @dataRow = ([undef, "headerItem", "Branches:"], + [undef, "headerCovTableEntry$style", $rate], + [undef, "headerCovTableEntry", $tot]); + if ($main::show_hitTotalCol) { + push(@dataRow, [undef, "headerCovTableEntry", $hit]); + } + if ($main::show_tla) { + my @tlaRow = + buildHeaderSummaryTableRow($summary, SummaryInfo::BRANCH_DATA, + $fileDetail, \&SourceFile::nextBranchTlaGroup, + $tlaSummaryTypes); + push(@dataRow, @tlaRow); + } + push(@row_right, \@dataRow) + if ($type != $HDR_TESTDESC); + } + + # MC/DC coverage + if ($hasMcdcData) { + my $tot = $summary->mcdc_found(); + my $hit = $summary->mcdc_hit(); + $style = + $rate_name[classify_rate($tot, $hit, $br_med_limit, $br_hi_limit)]; + $rate = rate($hit, $tot, " %"); + $hit -= $tot + if $main::opt_missed; # negative number + my @dataRow = ([undef, "headerItem", "MC/DC:"], + [undef, "headerCovTableEntry$style", $rate], + [undef, "headerCovTableEntry", $tot]); + if ($main::show_hitTotalCol) { + push(@dataRow, [undef, "headerCovTableEntry", $hit]); + } + if ($main::show_tla) { + my @tlaRow = + buildHeaderSummaryTableRow($summary, SummaryInfo::MCDC_DATA, + $fileDetail, \&SourceFile::nextBranchTlaGroup, + $tlaSummaryTypes); + push(@dataRow, @tlaRow); + } + push(@row_right, \@dataRow) + if ($type != $HDR_TESTDESC); + } + + # Aged coverage + if ($show_dateBins) { + # make a space in the table between before date bins + my $dateBinDetailPage = "index-date.$html_ext" + if ($type != $HDR_SOURCE && + $type != $HDR_FUNC); + my $table = + buildDateSummaryTable( + $summary, SummaryInfo::LINE_DATA, + \&SummaryInfo::lineCovCount, $fileDetail, + \&SourceFile::nextInDateBin, "Line coverage date bins:", + $dateBinDetailPage, scalar(@dataRow), + $tlaSummaryTypes); + push(@row_right, @$table) + if ($type != $HDR_TESTDESC); + + if ($lcovutil::func_coverage) { + my $fn_table = + buildDateSummaryTable($summary, + SummaryInfo::FUNCTION_DATA, + \&SummaryInfo::functionCovCount, + $fileDetail, + undef, + "Function coverage date bins:", + $dateBinDetailPage, + scalar(@dataRow), + $tlaSummaryTypes); + push(@row_right, @$fn_table) + if ($type != $HDR_TESTDESC); + } + + if ($hasBranchData) { + my $br_table = + buildDateSummaryTable($summary, + SummaryInfo::BRANCH_DATA, + \&SummaryInfo::branchCovCount, + $fileDetail, + \&SourceFile::nextBranchInDateBin, + "Branch coverage date bins:", + $dateBinDetailPage, + scalar(@dataRow), + $tlaSummaryTypes); + push(@row_right, @$br_table) + if ($type != $HDR_TESTDESC); + } + + if ($hasMcdcData) { + my $mcdc_table = + buildDateSummaryTable($summary, + SummaryInfo::MCDC_DATA, + \&SummaryInfo::mcdcCovCount, + $fileDetail, + \&SourceFile::nextMcdcInDateBin, + "MC/DC coverage date bins:", + $dateBinDetailPage, + scalar(@dataRow), + $tlaSummaryTypes); + push(@row_right, @$mcdc_table) + if ($type != $HDR_TESTDESC); + } + } + # owner bins.. + if (defined($main::show_ownerBins)) { + # first, make sure there is owner data here (ie., owner data + # was collected, or both that there is owner data and some + # owners have uncovered code) + # This it the header table - so we want to truncate the owner + # list if it is too long (and the user asked us to) + my ($ownerList, $truncated) = + $summary->findOwnerList($callback_type, 1, + $main::show_ownerBins && $main::show_ownerBins eq 'all'); + if (defined($ownerList)) { + my $ownerBinDetailPage = "index-owner.$html_ext" + if ($type != $HDR_SOURCE && + $type != $HDR_FUNC); + my $table = + buildOwnerSummaryTable($ownerList, + $truncated, + $summary, + SummaryInfo::LINE_DATA, + $fileDetail, + \&SourceFile::nextInOwnerBin, + "Line coverage ownership bins", + $ownerBinDetailPage, + scalar(@dataRow), + $tlaSummaryTypes); + push(@row_right, @$table) + if ($type != $HDR_TESTDESC); + + if ($hasBranchData) { + my $br_table = + buildOwnerSummaryTable($ownerList, + $truncated, + $summary, + SummaryInfo::BRANCH_DATA, + $fileDetail, + \&SourceFile::nextBranchInOwnerBin, + "Branch coverage ownership bins", + $ownerBinDetailPage, + scalar(@dataRow), + $tlaSummaryTypes); + push(@row_right, @$br_table) + if ($type != $HDR_TESTDESC); + } + + if ($hasMcdcData) { + my $mcdc_table = + buildOwnerSummaryTable($ownerList, + $truncated, + $summary, + SummaryInfo::MCDC_DATA, + $fileDetail, + \&SourceFile::nextBranchInOwnerBin, + "MC/DC coverage ownership bins", + $ownerBinDetailPage, + scalar(@dataRow), + $tlaSummaryTypes); + push(@row_right, @$mcdc_table) + if ($type != $HDR_TESTDESC); + } + } + } + + # Print rows + my $num_rows = max(scalar(@row_left), scalar(@row_right)); + for (my $i = 0; $i < $num_rows; $i++) { + my $left = $row_left[$i]; + my $right = $row_right[$i]; + + if (!defined($left)) { + $left = [[undef, undef, undef], [undef, undef, undef]]; + } + if (!defined($right)) { + $right = []; + } + write_header_line(*HTML_HANDLE, @{$left}, + [$i == 0 ? "5%" : undef, undef, undef], + @{$right}); + } + + # Fourth line + write_header_epilog(*HTML_HANDLE, $base_dir); + return \%activeTlaColsForType; } sub get_sort_code($$$) { - my ($link, $alt, $base) = @_; - my $png; - my $link_start; - my $link_end; - - if (!defined($link)) { - $png = "glass.png"; - $link_start = ""; - $link_end = ""; - } else { - $png = "updown.png"; - $link_start = ''; - $link_end = ""; - } - - return ' '.$link_start. - ''.$link_end.''; -} - -sub get_file_code($$$$) -{ - my ($type, $text, $sort_button, $base) = @_; - my $result = $text; - my $link; - - if ($sort_button) { - if ($type == $HEAD_NO_DETAIL) { - $link = "index.$html_ext"; - } else { - $link = "index-detail.$html_ext"; - } - } - $result .= get_sort_code($link, "Sort by name", $base); - - return $result; -} - -sub get_line_code($$$$$) -{ - my ($type, $sort_type, $text, $sort_button, $base) = @_; - my $result = $text; - my $sort_link; - - if ($type == $HEAD_NO_DETAIL) { - # Just text - if ($sort_button) { - $sort_link = "index-sort-l.$html_ext"; - } - } elsif ($type == $HEAD_DETAIL_HIDDEN) { - # Text + link to detail view - $result .= ' ( show details )'; - if ($sort_button) { - $sort_link = "index-sort-l.$html_ext"; - } - } else { - # Text + link to standard view - $result .= ' ( hide details )'; - if ($sort_button) { - $sort_link = "index-detail-sort-l.$html_ext"; - } - } - # Add sort button - $result .= get_sort_code($sort_link, "Sort by line coverage", $base); - - return $result; -} - -sub get_func_code($$$$) -{ - my ($type, $text, $sort_button, $base) = @_; - my $result = $text; - my $link; - - if ($sort_button) { - if ($type == $HEAD_NO_DETAIL) { - $link = "index-sort-f.$html_ext"; - } else { - $link = "index-detail-sort-f.$html_ext"; - } - } - $result .= get_sort_code($link, "Sort by function coverage", $base); - return $result; -} - -sub get_br_code($$$$) -{ - my ($type, $text, $sort_button, $base) = @_; - my $result = $text; - my $link; - - if ($sort_button) { - if ($type == $HEAD_NO_DETAIL) { - $link = "index-sort-b.$html_ext"; - } else { - $link = "index-detail-sort-b.$html_ext"; - } - } - $result .= get_sort_code($link, "Sort by branch coverage", $base); - return $result; -} - -# -# write_file_table(filehandle, base_dir, overview, testhash, testfnchash, -# testbrhash, fileview, sort_type) + my ($link, $alt, $base) = @_; + my $png; + my $link_start; + my $link_end; + + if (!defined($link)) { + $png = "glass.png"; + $link_start = ""; + $link_end = ""; + } else { + $png = "updown.png"; + $link_start = ''; + $link_end = ""; + } + my $help = " title=\"Click to sort table by $alt\""; + $alt = "Sort by $alt"; + return " " . + $link_start . '' . $link_end . ''; +} + +sub get_file_code($$$$$$) +{ + my ($type, $text, $sort_button, $bin_type, $primary_key, $base) = @_; + my $result = $text; + my $link; + + my $key = 'name' ne $primary_key ? "-bin_$primary_key" : ""; + if ($sort_button) { + $link = "index$key$bin_type"; + $link .= '-detail' + unless ($type == $HEAD_NO_DETAIL); + $link .= ".$html_ext"; + } + $result .= get_sort_code($link, "file name", $base); + + return $result; +} + +sub get_line_code($$$$$$$) +{ + my ($type, $sort_type, $text, $sort_button, $bin_type, $primary_key, $base) + = @_; + my $result = $text; + my $key = ('name' eq $primary_key) ? '' : "-bin_$primary_key"; + my $sort_link = "index" . $key . $bin_type . "-sort-l.$html_ext" + if $sort_button; + + if ($type == $HEAD_NO_DETAIL) { + # Just text + } elsif ($type == $HEAD_DETAIL_HIDDEN) { + # Text + link to detail view + my $help = "title=\"Click to go to per-testcase coverage details\""; + my $detail_link = 'index' . $key . $bin_type . + '-detail' . $fileview_sortname[$sort_type] . '.' . $html_ext; + $result .= + " ( show details )'; + } else { + # Text + link to standard view + my $help = "title=\"Click to hide per-testcase coverage details\""; + $result .= + " ( hide details )'; + } + # Add sort button + $result .= get_sort_code($sort_link, "line coverage", $base); + # we don't have a 'detail' bin page + $result =~ s/index-bin_(.+?)-detail((-sort-.)?\.)/index-$1$2/g; + + return $result; +} + +sub get_func_code($$$$$$) +{ + my ($type, $text, $sort_button, $bin_type, $primary_key, $base) = @_; + my $result = $text; + my $link; + my $key = 'name' ne $primary_key ? "-bin_$primary_key" : ""; + + if ($sort_button) { + $link = "index$key$bin_type"; + $link .= '-detail' + unless ($type == $HEAD_NO_DETAIL); + $link .= "-sort-f.$html_ext"; + } + $result .= get_sort_code($link, "function coverage", $base); + return $result; +} + +sub get_br_code($$$$$$) +{ + my ($type, $text, $sort_button, $bin_type, $primary_key, $base) = @_; + my $result = $text; + my $link; + my $key = 'name' ne $primary_key ? "-bin_$primary_key" : ""; + + if ($sort_button) { + $link = "index$key$bin_type"; + $link .= '-detail' + unless ($type == $HEAD_NO_DETAIL); + $link .= "-sort-b.$html_ext"; + } + $result .= get_sort_code($link, "branch coverage", $base); + return $result; +} + +sub get_mcdc_code($$$$$$) +{ + my ($type, $text, $sort_button, $bin_type, $primary_key, $base) = @_; + my $result = $text; + my $link; + my $key = 'name' ne $primary_key ? "-bin_$primary_key" : ""; + + if ($sort_button) { + $link = "index$key$bin_type"; + $link .= '-detail' + unless ($type == $HEAD_NO_DETAIL); + $link .= "-sort-m.$html_ext"; + } + $result .= get_sort_code($link, "MC/DC coverage", $base); + return $result; +} + +# +# write_file_table(filehandle, callback_type, base_dir, perTestcaseData, +# parentSummary, ctrlSettings, activeTlaCols) +# ctrlSettings = [fileview, sort_type, details_type, sort_name] +# perTestcaseData = [testhash, testfnchash, testbrhash] +# activeTlaCols = {coverage_type, [tlas with nonzero count]} # # Write a complete file table. OVERVIEW is a reference to a hash containing # the following mapping: # # filename -> "lines_found,lines_hit,funcs_found,funcs_hit,page_link, -# func_link" +# func_link" + other file details # # TESTHASH is a reference to the following hash: # @@ -4848,269 +12640,684 @@ sub get_br_code($$$$) # otherwise. # -sub write_file_table(*$$$$$$$) -{ - local *HTML_HANDLE = $_[0]; - my $base_dir = $_[1]; - my $overview = $_[2]; - my $testhash = $_[3]; - my $testfnchash = $_[4]; - my $testbrhash = $_[5]; - my $fileview = $_[6]; - my $sort_type = $_[7]; - my $filename; - my $bar_graph; - my $hit; - my $found; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - my $page_link; - my $testname; - my $testdata; - my $testfncdata; - my $testbrdata; - my %affecting_tests; - my $line_code = ""; - my $func_code; - my $br_code; - my $file_code; - my @head_columns; - - # Determine HTML code for column headings - if (($base_dir ne "") && $show_details) - { - my $detailed = keys(%{$testhash}); - - $file_code = get_file_code($detailed ? $HEAD_DETAIL_HIDDEN : - $HEAD_NO_DETAIL, - $fileview ? "Filename" : "Directory", - $sort && $sort_type != $SORT_FILE, - $base_dir); - $line_code = get_line_code($detailed ? $HEAD_DETAIL_SHOWN : - $HEAD_DETAIL_HIDDEN, - $sort_type, - "Line Coverage", - $sort && $sort_type != $SORT_LINE, - $base_dir); - $func_code = get_func_code($detailed ? $HEAD_DETAIL_HIDDEN : - $HEAD_NO_DETAIL, - "Functions", - $sort && $sort_type != $SORT_FUNC, - $base_dir); - $br_code = get_br_code($detailed ? $HEAD_DETAIL_HIDDEN : - $HEAD_NO_DETAIL, - "Branches", - $sort && $sort_type != $SORT_BRANCH, - $base_dir); - } else { - $file_code = get_file_code($HEAD_NO_DETAIL, - $fileview ? "Filename" : "Directory", - $sort && $sort_type != $SORT_FILE, - $base_dir); - $line_code = get_line_code($HEAD_NO_DETAIL, $sort_type, "Line Coverage", - $sort && $sort_type != $SORT_LINE, - $base_dir); - $func_code = get_func_code($HEAD_NO_DETAIL, "Functions", - $sort && $sort_type != $SORT_FUNC, - $base_dir); - $br_code = get_br_code($HEAD_NO_DETAIL, "Branches", - $sort && $sort_type != $SORT_BRANCH, - $base_dir); - } - push(@head_columns, [ $line_code, 3 ]); - push(@head_columns, [ $func_code, 2]) if ($func_coverage); - push(@head_columns, [ $br_code, 2]) if ($br_coverage); - - write_file_table_prolog(*HTML_HANDLE, $file_code, @head_columns); - - foreach $filename (get_sorted_keys($overview, $sort_type)) - { - my @columns; - ($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit, - $page_link) = @{$overview->{$filename}}; - - # Line coverage - push(@columns, [$found, $hit, $med_limit, $hi_limit, 1]); - # Function coverage - if ($func_coverage) { - push(@columns, [$fn_found, $fn_hit, $fn_med_limit, - $fn_hi_limit, 0]); - } - # Branch coverage - if ($br_coverage) { - push(@columns, [$br_found, $br_hit, $br_med_limit, - $br_hi_limit, 0]); - } - write_file_table_entry(*HTML_HANDLE, $base_dir, $filename, - $page_link, @columns); - - $testdata = $testhash->{$filename}; - $testfncdata = $testfnchash->{$filename}; - $testbrdata = $testbrhash->{$filename}; - - # Check whether we should write test specific coverage - # as well - if (!($show_details && $testdata)) { next; } - - # Filter out those tests that actually affect this file - %affecting_tests = %{ get_affecting_tests($testdata, - $testfncdata, $testbrdata) }; - - # Does any of the tests affect this file at all? - if (!%affecting_tests) { next; } - - foreach $testname (keys(%affecting_tests)) - { - my @results; - ($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit) = - split(",", $affecting_tests{$testname}); - - # Insert link to description of available - if ($test_description{$testname}) - { - $testname = "". - "$testname"; - } - - push(@results, [$found, $hit]); - push(@results, [$fn_found, $fn_hit]) if ($func_coverage); - push(@results, [$br_found, $br_hit]) if ($br_coverage); - write_file_table_detail_entry(*HTML_HANDLE, $testname, - @results); - } - } - - write_file_table_epilog(*HTML_HANDLE); -} - - -# -# get_found_and_hit(hash) -# -# Return the count for entries (found) and entries with an execution count -# greater than zero (hit) in a hash (linenumber -> execution count) as -# a list (found, hit) -# - -sub get_found_and_hit($) -{ - my %hash = %{$_[0]}; - my $found = 0; - my $hit = 0; - - # Calculate sum - $found = 0; - $hit = 0; - - foreach (keys(%hash)) - { - $found++; - if ($hash{$_}>0) { $hit++; } - } - - return ($found, $hit); -} - - -# -# get_func_found_and_hit(sumfnccount) -# -# Return (f_found, f_hit) for sumfnccount -# - -sub get_func_found_and_hit($) +sub write_file_table(*$$$$$$) { - my ($sumfnccount) = @_; - my $function; - my $fn_found; - my $fn_hit; - - $fn_found = scalar(keys(%{$sumfnccount})); - $fn_hit = 0; - foreach $function (keys(%{$sumfnccount})) { - if ($sumfnccount->{$function} > 0) { - $fn_hit++; - } - } - return ($fn_found, $fn_hit); -} - - -sub get_br_found_and_hit($) -{ - my ($brcount) = @_; - my $db; + local *HTML_HANDLE = $_[0]; + my $callback_type = $_[1]; + my $base_dir = $_[2]; + my $perTestcaseData = + $_[3]; # undef or [lineCov, funcCov, branchCov, mcdcCov] + my $dirSummary = $_[4]; # SummaryInfo object + my ($fileview, $primary_key, $sort_type, $bin_type) = @{$_[5]}; + my $activeTlaCols = $_[6]; + # $fileview == 0 if listing directories, 1 if listing files + # $primary_key in ("name", "owner", "date"). If $primary_key is: + # - 'name': leftmost column is file/directory name - + # this is the original/vanilla genhtml behaviour + # - 'owner': leftmost column is author name. Details for that owner + # (for all files in the project, or for all files in this drectory) + # are shown in a contiguous block. + # - 'date': leftmost column is date bin. Details for that bin are + # shown in a contiguous block. + # $sort_type in ("", "-sort-l", "-sort-b", "-sort-f") + # $bin_type in ("", "-owner", "-date") + # - if $bin_type not "", expand non-empty entries after file/directory + # overall count (i.e. - show all the "owners" for this file/directory, + # or all date bins for this file/directory) + # - $bin_type is applied only if $primary_key is "name" + + $primary_key eq "name" || $bin_type eq "" or + die( + "primary key '$primary_key' does not support '$bin_type' detail reporting" + ); + + my $hasBranchData = + $lcovutil::br_coverage && 0 != $dirSummary->branch_found(); + my $hasMcdcData = + $lcovutil::mcdc_coverage && 0 != $dirSummary->mcdc_found(); + my $includeFunctionColumns = $lcovutil::func_coverage && + 0 != $dirSummary->function_found() && + 'owner' ne $primary_key; + + # Determine HTML code for column headings + my $hide = $HEAD_NO_DETAIL; + my $show = $HEAD_NO_DETAIL; + if (($dirSummary->type() eq 'directory' || + $main::flat || + $main::hierarchical) && + $show_details + ) { + # "detailed" if line coverage hash not empty + my $detailed = + defined($perTestcaseData) && scalar(%{$perTestcaseData->[0]}); + $hide = $detailed ? $HEAD_DETAIL_HIDDEN : $HEAD_NO_DETAIL; + $show = $detailed ? $HEAD_DETAIL_SHOWN : $HEAD_DETAIL_HIDDEN; + } + my $file_col_title = ($fileview || $main::flat) ? 'File' : 'Directory'; + # don't insert the 'sort' controls if there is just a single source file + my $use_sort_button = $sort_tables && 1 < scalar($dirSummary->sources()); + my $file_code = + get_file_code($hide, $file_col_title, + $use_sort_button && $sort_type != $SORT_FILE, + $bin_type, $primary_key, $base_dir); + my $line_code = get_line_code($show, $sort_type, + "Line Coverage", + $use_sort_button && $sort_type != $SORT_LINE, + $bin_type, $primary_key, $base_dir); + my $func_code = get_func_code($hide, + "Function Coverage", + $use_sort_button && $sort_type != $SORT_FUNC, + $bin_type, $primary_key, $base_dir); + my $br_code = get_br_code($hide, + "Branch Coverage", + $use_sort_button && $sort_type != $SORT_BRANCH, + $bin_type, $primary_key, $base_dir); + my $mcdc_code = get_mcdc_code($hide, + "MC/DC Coverage", + $use_sort_button && $sort_type != $SORT_MCDC, + $bin_type, $primary_key, $base_dir); + + my @head_columns; + + my @lineCovCols = (["Rate", 2], "Total"); + my @mcdcCovCols = ("Rate", "Total"); + my @branchCovCols = ("Rate", "Total"); + my @functionCovCols = ("Rate", "Total"); + if ($main::show_hitTotalCol) { + my $t = $main::opt_missed ? "Missed" : "Hit"; + push(@lineCovCols, $t); + push(@mcdcCovCols, $t); + push(@branchCovCols, $t); + push(@functionCovCols, $t); + } + if ($main::show_tla) { + my @visit = ([SummaryInfo::LINE_DATA, \@lineCovCols]); + push(@visit, [SummaryInfo::MCDC_DATA, \@mcdcCovCols]) + if $hasMcdcData; + push(@visit, [SummaryInfo::BRANCH_DATA, \@branchCovCols]) + if $hasBranchData; + push(@visit, [SummaryInfo::FUNCTION_DATA, \@functionCovCols]) + if $includeFunctionColumns; + foreach my $d (@visit) { + my ($covtype, $cols) = @$d; + foreach my $tla (@{$activeTlaCols->{$covtype}}) { + my $label = + $main::use_legacyLabels ? $SummaryInfo::tlaToLegacy{$tla} : + $tla; + push(@$cols, + [$label, + 1, + $main::use_legacyLabels ? + undef : + $SummaryInfo::tlaToTitle{$tla} + ]); + } + } + } + push(@head_columns, [$line_code, $#lineCovCols + 2, \@lineCovCols]); + push(@head_columns, [$mcdc_code, $#mcdcCovCols + 1, \@mcdcCovCols]) + if $hasMcdcData; + push(@head_columns, [$br_code, $#branchCovCols + 1, \@branchCovCols]) + if $hasBranchData; + push(@head_columns, [$func_code, $#functionCovCols + 1, \@functionCovCols]) + if $includeFunctionColumns; + + my $showBinDetail = undef; + if ($bin_type eq '-date' && + $dirSummary && + $dirSummary->hasDateInfo()) { + $showBinDetail = 'date'; + } elsif ($bin_type eq '-owner' && + $dirSummary && + $dirSummary->hasOwnerInfo()) { + $showBinDetail = 'owner'; + } + my $num_columns = + write_file_table_prolog(*HTML_HANDLE, $file_code, + defined($showBinDetail) ? $showBinDetail : undef, + $primary_key, @head_columns); + + my @tableRows; + if ($primary_key eq 'name') { + + # sorted list of all the file or directory names + foreach my $name ($dirSummary->get_sorted_keys($sort_type, 1)) { + my $entrySummary = $dirSummary->get_source($name); + + if ($entrySummary->type() eq 'directory') { + if ('directory' eq $dirSummary->type()) { + $name = File::Basename::basename($name); + } elsif ($entrySummary->is_directory(1)) { + $name = $lcovutil::dirseparator . $name; + } + } else { + die("unexpected summary type") + unless 'file' eq $entrySummary->type(); + $name = File::Basename::basename($name); + } + push(@tableRows, + FileOrDirectoryCallback->new($name, $entrySummary)); + } + + } elsif ($primary_key eq 'owner') { + + # retrieve sorted list of owner names - alphabetically, by name + # or by number of missed lines or missed branches + my $all = defined($main::show_ownerBins) && $main::show_ownerBins; + + my %owners; + foreach my $d ([1, SummaryInfo::LINE_DATA, 0], + [$lcovutil::mcdc_coverage, SummaryInfo::MCDC_DATA, 1], + [$lcovutil::br_coverage, SummaryInfo::BRANCH_DATA, 2], + ) { + my ($enable, $dataType, $idx) = @$d; + next unless $enable; + + foreach my $owner ($dirSummary->owners($all, $dataType)) { + # (line count, branch count, mcdc count, function count) + $owners{$owner} = [[0, 0], [0, 0], [0, 0], [0, 0]] + unless exists($owners{$owner}); + + $owners{$owner}->[$idx] = [ + $dirSummary->owner_tlaCount($owner, 'found', $dataType), + $dirSummary->owner_tlaCount($owner, 'hit', $dataType) + ]; + } + } + my @sorted = sort(keys(%owners)); # default sort order + # now, sort the owner list... + foreach my $d ([1, $SORT_LINE, 0], + [$lcovutil::mcdc_coverage, $SORT_MCDC, 1], + [$lcovutil::br_coverage, $SORT_BRANCH, 2], + ) { + my ($enable, $s, $idx) = @$d; + if ($sort_type eq $s) { + # sort by number of missed lines/branches/MCDC expressions + @sorted = sort({ + my $la = $owners{$a}->[$idx]; + my $lb = $owners{$b}->[$idx]; + ($lb->[0] - $lb->[1]) + <=> ($la->[0] - $la->[1]) || + $a cmp $b # then by name + } keys(%owners)); + last; + } + } + + # don't truncate the 'owner detail table' + # if user asked to see the page, then they want to be able to navigate + # to all users + #if (defined($ownderTableElements) && + # $ownerTableElements < scalar(@sorted)) && + # (0 == scalar(@truncateOwnerTableLevels) || + # grep(/$callback_type/, @truncateOwnerTableLevels)))) { + # # truncate owner list in header table + # splice(@sorted, $ownerTableElements); + #} + + foreach my $owner (@sorted) { + push(@tableRows, + FileOrDirectoryOwnerCallback->new($owner, $dirSummary)); + } + + } elsif ($primary_key eq 'date') { + + for (my $bin = 0; $bin <= $#SummaryInfo::ageGroupHeader; ++$bin) { + my $ageval = $dirSummary->age_sample($bin); + my $lines = $dirSummary->lineCovCount('found', 'age', $ageval); + if (0 != $lines || + ($lcovutil::br_coverage && + 0 != $dirSummary->branchCovCount('found', 'age', $ageval)) + || + ($lcovutil::mcdc_coverage && + 0 != $dirSummary->mcdcCovCount('found', 'age', $ageval)) || + ($lcovutil::func_coverage && + 0 != $dirSummary->functionCovCount('found', 'age', $ageval)) + ) { + push(@tableRows, + FileOrDirectoryDateCallback->new($bin, $dirSummary)); + } + } + } else { + die("unsupported primary key '$primary_key'"); + } + + my $all = defined($main::show_ownerBins) && $main::show_ownerBins eq 'all'; + # show only the 'Total' row if all row entries are suppressed + # because they have no un-exercised coverpoints + my $elideEmptyRows = 0; + if ($primary_key eq 'owner') { + $elideEmptyRows = $main::show_ownerBins ne 'all'; + } elsif ($primary_key eq 'date') { + $elideEmptyRows = 1; + } + my $suppressedEmptyRow = 0; + # if only one secondary entry, we don't need the 'Total' header + my $suppressedSecondaryHeader = 0; + + foreach my $primaryCb (@tableRows) { + + # we need to find the 'owner' and 'date' row data for this file before + # we write anything else, because we need to know the number of + # rows that the $primary cell will span + + my @secondaryRows; + my $skippedSecondaryRows = 0; + + if (defined($showBinDetail)) { + + if (defined($primaryCb->summary())) { + my $source = $primaryCb->summary(); + + if ($showBinDetail eq 'owner') { + # do I need an option to suppress the list of owners? + # maybe too much information, in some circumstances? + # are there any non-empty owner tables here? + # If user explicitly asks to see the 'owner detail' page, + # then they must be interested in which code is written by + # each author - so we should not truncate the list + my ($ownerList, $truncated) = + $primaryCb->findOwnerList($callback_type, 0, $all); + die("unexpected truncate count") unless $truncated == 0; + push(@secondaryRows, @$ownerList) + if defined($ownerList); + } + + if ($showBinDetail eq 'date') { + for (my $bin = 0; + $bin <= $#SummaryInfo::ageGroupHeader; + ++$bin) { + my $ageval = $source->age_sample($bin); + my $lineCb = + $primaryCb->dateDetailCallback($ageval, + SummaryInfo::LINE_DATA); + my $lineTotal = $lineCb->get('found'); + my $hit = $lineCb->get('hit'); + my $lineMissed = $lineTotal - $hit; + + my $branchCb = + $primaryCb->dateDetailCallback($ageval, + SummaryInfo::BRANCH_DATA); + my $branchTotal = + $lcovutil::br_coverage ? $branchCb->get('found') : + 0; + $hit = + $lcovutil::br_coverage ? $branchCb->get('hit') : 0; + my $branchMissed = $branchTotal - $hit; + + my $mcdcCb = + $primaryCb->dateDetailCallback($ageval, + SummaryInfo::MCDC_DATA); + my $mcdcTotal = + $lcovutil::mcdc_coverage ? $mcdcCb->get('found') : + 0; + $hit = + $lcovutil::mcdc_coverage ? $mcdcCb->get('hit') : 0; + my $mcdcMissed = $mcdcTotal - $hit; + + my $functionCb = + $primaryCb->dateDetailCallback($ageval, + SummaryInfo::FUNCTION_DATA); + my $functionTotal = + $lcovutil::func_coverage ? + $functionCb->get('found') : + 0; + $hit = + $lcovutil::func_coverage ? $functionCb->get('hit') : + 0; + my $functionMissed = $functionTotal - $hit; + + next + if 0 == $lineTotal && + 0 == $branchTotal && + 0 == $mcdcTotal && + 0 == $functionTotal; + + push(@secondaryRows, + [$SummaryInfo::ageGroupHeader[$bin], + [$lineMissed, $lineTotal, $lineCb], + [$mcdcMissed, $mcdcTotal, $mcdcCb], + [$branchMissed, $branchTotal, $branchCb], + [$functionMissed, $functionTotal, $functionCb] + ]); + } + } + } + } # if showBinDetail + elsif ($primary_key ne 'name') { + ($skippedSecondaryRows, @secondaryRows) = + @{$primaryCb->findFileList($all)}; + } + + my ($found, $hit, $fn_found, $fn_hit, + $br_found, $br_hit, $mcdc_found, $mcdc_hit, + $page_link, $fileSummary, $fileDetails) = $primaryCb->data(); + # a bit of a hack: if this is top-level page (such that the links + # are to directory pages rather than to source code detail pages) + # and this is the 'owner bin detail' (or the 'date bin detail') view, + # then link to the same 'bin detail' view of the directory page + # This enables the user who is tracking down code written by a + # particular user (or on a particular date) to go link-to-link + # without having to select the 'bin' link in the destination header. + if ($fileview == 0 && + $bin_type ne "") { + $page_link =~ s/index.$html_ext$/index$bin_type.$html_ext/; + } + + my @columns; + + my @tableCallbackData = ($primaryCb->name(), $primaryCb->summary(), + $fileDetails, $page_link); + my $showLineGraph = 1; + # Line coverage + push(@columns, + [$found, $hit, $med_limit, $hi_limit, $showLineGraph, + $primaryCb->totalCallback(SummaryInfo::LINE_DATA), + SummaryInfo::LINE_DATA + ]); + # MC/DC coverage + if ($hasMcdcData) { + push(@columns, + [$mcdc_found, + $mcdc_hit, + $mcdc_med_limit, + $mcdc_hi_limit, + 0, + $primaryCb->totalCallback(SummaryInfo::MCDC_DATA), + SummaryInfo::MCDC_DATA + ]); + } + # Branch coverage + if ($hasBranchData) { + push(@columns, + [$br_found, + $br_hit, + $br_med_limit, + $br_hi_limit, + 0, + $primaryCb->totalCallback(SummaryInfo::BRANCH_DATA), + SummaryInfo::BRANCH_DATA + ]); + } + # Function coverage + if ($includeFunctionColumns) { + # no 'owner' callbacks for function... + my $cbStruct = + $primaryCb->totalCallback(SummaryInfo::FUNCTION_DATA); + + push(@columns, + [$fn_found, $fn_hit, $fn_med_limit, $fn_hi_limit, 0, + $cbStruct, SummaryInfo::FUNCTION_DATA + ]); + } + my $elide_secondary_header = $compactSummaryTables && + (scalar(@secondaryRows) == 1 && $skippedSecondaryRows == 0); + $suppressedSecondaryHeader ||= $elide_secondary_header; + if (!$elide_secondary_header) { + # pass 'dirSummary' to print method: we omit the 'owner' column if + # none of the files in this directory have any owner information + # (i.e., none of them are found in the repo) + my $numRows = (1 + scalar(@secondaryRows)); + my $asterisk; + if ($elideEmptyRows && (1 == $numRows || $skippedSecondaryRows)) { + $asterisk = '∗'; + $suppressedEmptyRow = 1; + } + write_file_table_entry(*HTML_HANDLE, + $base_dir, + [$primaryCb->name(), \@tableCallbackData, + $activeTlaCols, $numRows, + $primary_key, 0, + $fileview, "fileOrDir", + $page_link, $dirSummary, + $showBinDetail, $asterisk + ], + @columns); + } + + # sort secondary rows... + if ($sort_type == $SORT_FILE) { + # alphabetic + @secondaryRows = sort({ $a->[0] cmp $b->[0] } @secondaryRows); + } else { + my $sortElem; + if ($sort_type == $SORT_LINE) { + $sortElem = 1; + } elsif ($sort_type == $SORT_BRANCH) { + $sortElem = 2; + } elsif ($sort_type == $SORT_MCDC) { + $sortElem = 3; + } elsif ($sort_type == $SORT_FUNC) { + $sortElem = 4; + } + @secondaryRows = + sort({ + my $ca = $a->[$sortElem]; + my $cb = $b->[$sortElem]; + # sort based on 'missed' + $cb->[0] <=> $ca->[0]; + } @secondaryRows); + } + foreach my $secondary (@secondaryRows) { + my ($name, $line, $branch, $mcdc, $func) = @$secondary; + my ($lineMissed, $lineTotal, $lineCb) = @$line; + my $lineHit = $lineTotal - $lineMissed; + + my $fileInfo = $primaryCb->secondaryElementFileData($name); + + my $entry_type = $lineCb->cb_type(); + + my @ownerColData; + push(@ownerColData, + [$lineTotal, $lineHit, $med_limit, + $hi_limit, $showLineGraph, $lineCb, + SummaryInfo::LINE_DATA + ]); + if ($hasMcdcData) { + my ($mcdcMissed, $mcdcTotal, $mcdcCallback) = @$mcdc; + # need to compute the totals... + push(@ownerColData, + [$mcdcTotal, $mcdcTotal - $mcdcMissed, + $mcdc_med_limit, $mcdc_hi_limit, + 0, $mcdcCallback, + SummaryInfo::MCDC_DATA + ]); + } + if ($hasBranchData) { + my ($branchMissed, $branchTotal, $brCallback) = @$branch; + # need to compute the totals... + push(@ownerColData, + [$branchTotal, $branchTotal - $branchMissed, + $br_med_limit, $br_hi_limit, + 0, $brCallback, + SummaryInfo::BRANCH_DATA + ]); + } + if ($includeFunctionColumns) { + my ($funcMissed, $funcTotal, $funcCallback) = @$func; + # need to compute the totals... + push(@ownerColData, + [$funcTotal, $funcTotal - $funcMissed, + $fn_med_limit, $fn_hi_limit, 0, $funcCallback, + SummaryInfo::FUNCTION_DATA + ]); + } + if ($dirSummary->type() eq 'directory') { + # use the basename (not the path or full path) in the + # secondary key list: + # - these are files in the current directory, so we lose + # no information by eliding the directory part + $name = File::Basename::basename($name); + } + $name = apply_prefix($name, @dir_prefix); + write_file_table_entry(*HTML_HANDLE, $base_dir, + # 'owner' page type - no span, no page link + [$name, + $fileInfo, + $activeTlaCols, + 1, + $primary_key, + 1 + $elide_secondary_header, + $fileview, + $entry_type, + ], + @ownerColData); + } + + next + unless ($show_details && + defined($perTestcaseData) && + $primary_key eq "name"); + + # we know that the top-level callback item must hold a file. + my $filename = $primaryCb->name(); + + my ($testhash, $testfnchash, $testbrhash, $testcase_mcdc) = + @$perTestcaseData; + + my $testdata = $testhash->{$filename}; + + # Check whether we should write test specific coverage as well + next if (!defined($testdata)); + + my $testfncdata = $testfnchash->{$filename}; + my $testbrdata = $testbrhash->{$filename}; + my $test_mcdc = $testcase_mcdc->{$filename}; + + # Filter out those tests that actually affect this file + my %affecting_tests = %{ + get_affecting_tests($testdata, $testfncdata, $testbrdata, + $test_mcdc) + }; + + # Does any of the tests affect this file at all? + if (!%affecting_tests) { next; } + + foreach my $testname (keys(%affecting_tests)) { + + ($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit, $mcdc_found, + $mcdc_hit) = @{$affecting_tests{$testname}}; + + my $showgraph = 0; + my @results; + push(@results, + [$found, $hit, + SummaryInfo::LINE_DATA, + TestcaseTlaCount->new( + $testdata->value($testname), $fileDetails, + SummaryInfo::LINE_DATA) + ]); + # there might not be optional test data for some tests - that type was + # not enabled + push(@results, + [$mcdc_found, + $mcdc_hit, + SummaryInfo::MCDC_DATA, + TestcaseTlaCount->new( + $test_mcdc->value($testname), $fileDetails, + SummaryInfo::MCDC_DATA) + ]) + if ($hasMcdcData && + defined($test_mcdc->value($testname))); + + push(@results, + [$br_found, + $br_hit, + SummaryInfo::BRANCH_DATA, + TestcaseTlaCount->new( + $testbrdata->value($testname), $fileDetails, + SummaryInfo::BRANCH_DATA) + ]) + if ($hasBranchData && + defined($testbrdata->value($testname))); + + push(@results, + [$fn_found, + $fn_hit, + SummaryInfo::FUNCTION_DATA, + TestcaseTlaCount->new( + $testfncdata->value($testname), $fileDetails, + SummaryInfo::FUNCTION_DATA) + ]) + if ($includeFunctionColumns && + defined($testfncdata->value($testname))); + + my $href = $testname; + # Insert link to description of available + if ($test_description{$testname}) { + $href = "" . "$testname"; + } + write_file_table_detail_entry(*HTML_HANDLE, $base_dir, + $href, $showBinDetail, $activeTlaCols, @results); + } + } + foreach my $note ( + [ $suppressedEmptyRow, + " 'Detail' entries with no 'missed' coverpoints are elided. Use the '--show-owners all' flag to retain them." + ], + [ $suppressedSecondaryHeader, + '∗∗ Bin \'Total\' header elided when bin contains only one entry.' + ], + [ $lcovutil::func_coverage && !$includeFunctionColumns, + "Note: 'Function Coverage' columns elided as function owner is not identified." + ] + ) { + next unless $note->[0]; + write_html(*HTML_HANDLE, < + $note->[1] + +END_OF_HTML - $db = brcount_to_db($brcount); + } - return brcount_db_get_found_and_hit($db); + write_file_table_epilog(*HTML_HANDLE); } - # -# get_affecting_tests(testdata, testfncdata, testbrdata) +# get_affecting_tests(testdata, testfncdata, testbrdata, pertest_mcdc) # # HASHREF contains a mapping filename -> (linenumber -> exec count). Return # a hash containing mapping filename -> "lines found, lines hit" for each # filename which has a nonzero hit count. # -sub get_affecting_tests($$$) -{ - my ($testdata, $testfncdata, $testbrdata) = @_; - my $testname; - my $testcount; - my $testfnccount; - my $testbrcount; - my %result; - my $found; - my $hit; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - - foreach $testname (keys(%{$testdata})) - { - # Get (line number -> count) hash for this test case - $testcount = $testdata->{$testname}; - $testfnccount = $testfncdata->{$testname}; - $testbrcount = $testbrdata->{$testname}; - - # Calculate sum - ($found, $hit) = get_found_and_hit($testcount); - ($fn_found, $fn_hit) = get_func_found_and_hit($testfnccount); - ($br_found, $br_hit) = get_br_found_and_hit($testbrcount); - - if ($hit>0) - { - $result{$testname} = "$found,$hit,$fn_found,$fn_hit,". - "$br_found,$br_hit"; - } - } - - return(\%result); -} - - -sub get_hash_reverse($) +sub get_affecting_tests($$$$) { - my ($hash) = @_; - my %result; - - foreach (keys(%{$hash})) { - $result{$hash->{$_}} = $_; - } - - return \%result; + my ($testdata, $testfncdata, $testbrdata, $testmcdcdata) = @_; + my %result; + + foreach my $testname ($testdata->keylist()) { + # Get (line number -> count) hash for this test case + my $testcount = $testdata->value($testname); + my $testfnccount = $testfncdata->value($testname); + my $testbrcount = $testbrdata->value($testname); + my $testmcdccount = $testmcdcdata->value($testname); + + # Calculate sum + my ($found, $hit) = $testcount->get_found_and_hit(); + # might be no data for this testcase + my ($fn_found, $fn_hit) = + defined($testfnccount) ? $testfnccount->get_found_and_hit() : + (0, 0); + my ($br_found, $br_hit) = + defined($testbrcount) ? $testbrcount->get_found_and_hit() : (0, 0); + my ($mcdc_found, $mcdc_hit) = + defined($testmcdccount) ? $testmcdccount->get_found_and_hit() : + (0, 0); + + $result{$testname} = [$found, $hit, $fn_found, $fn_hit, + $br_found, $br_hit, $mcdc_found, $mcdc_hit + ] + if ($hit > 0); + } + return (\%result); } # # write_source(filehandle, source_filename, count_data, checksum_data, -# converted_data, func_data, sumbrcount) +# converted_data, func_data, sumbrcount, mcdc_summary) # # Write an HTML view of a source code file. Returns a list containing # data as needed by gen_png(). @@ -5118,114 +13325,131 @@ sub get_hash_reverse($) # Die on error. # -sub write_source($$$$$$$) -{ - local *HTML_HANDLE = $_[0]; - local *SOURCE_HANDLE; - my $source_filename = $_[1]; - my %count_data; - my $line_number; - my @result; - my $checkdata = $_[3]; - my $converted = $_[4]; - my $funcdata = $_[5]; - my $sumbrcount = $_[6]; - my $datafunc = get_hash_reverse($funcdata); - my @file; - - if ($_[2]) - { - %count_data = %{$_[2]}; - } - - if (!open(SOURCE_HANDLE, "<", $source_filename)) { - my @lines; - my $last_line = 0; - - if (!$ignore[$ERROR_SOURCE]) { - die("ERROR: cannot read $source_filename\n"); - } - - # Continue without source file - warn("WARNING: cannot read $source_filename!\n"); - - @lines = sort( { $a <=> $b } keys(%count_data)); - if (@lines) { - $last_line = $lines[scalar(@lines) - 1]; - } - return ( ":" ) if ($last_line < 1); - - # Simulate gcov behavior - for ($line_number = 1; $line_number <= $last_line; - $line_number++) { - push(@file, "/* EOF */"); - } - } else { - @file = ; - } - - write_source_prolog(*HTML_HANDLE); - $line_number = 0; - foreach (@file) { - $line_number++; - chomp($_); - - # Also remove CR from line-end - s/\015$//; - - # Source code matches coverage data? - if (defined($checkdata->{$line_number}) && - ($checkdata->{$line_number} ne md5_base64($_))) - { - die("ERROR: checksum mismatch at $source_filename:". - "$line_number\n"); - } - - push (@result, - write_source_line(HTML_HANDLE, $line_number, - $_, $count_data{$line_number}, - $converted->{$line_number}, - $sumbrcount->{$line_number})); - } - - close(SOURCE_HANDLE); - write_source_epilog(*HTML_HANDLE); - return(@result); -} - - -sub funcview_get_func_code($$$) +sub write_source($$$$$$$$) { - my ($name, $base, $type) = @_; - my $result; - my $link; - - if ($sort && $type == 1) { - $link = "$name.func.$html_ext"; - } - $result = "Function Name"; - $result .= get_sort_code($link, "Sort by function name", $base); - - return $result; + local *HTML_HANDLE = shift; + my ($srcfile, $count_data, $checkdata, $fileCovInfo, + $funcdata, $sumbrcount, $mcdc_summary) = @_; + my @result; + + # suppress branch and MCDC columns if there is no data in the file + my $showBranches = $lcovutil::br_coverage && 0 != $sumbrcount->found(); + my $showMcdc = $lcovutil::mcdc_coverage && 0 != $mcdc_summary->found(); + + write_source_prolog(*HTML_HANDLE, $srcfile->isProjectFile(), + $showBranches, $showMcdc); + my $line_number = 0; + my $cbdata = PrintCallback->new($srcfile, $fileCovInfo); + + my ($region, $empty); + if ($SummaryInfo::selectCallback) { + $region = InInterestingRegion->new($srcfile, $fileCovInfo->lineMap()); + $empty = ''; + if ($srcfile->isProjectFile()) { + $empty .= ' ' x $main::age_field_width; + $empty .= ' ' x $main::owner_field_width; + } + # using 8 characters for line number field, with different + # foreground/background - to make distinctive + $empty .= ' ' . + (' ' x (8 - 3)) . '...'; + $empty .= ' ' x $main::br_field_width if $showBranches; + $empty .= ' ' x $main::mcdc_field_width if $showMcdc; + $empty .= ' ' x $main::tla_field_width; + $empty .= ' ' x ($line_field_width - 1); + $empty .= + ' (elided NUM_LINES ignored lines)'; + } + + my $prevLine = 0; + foreach my $srcline (@{$srcfile->lines()}) { + $line_number++; + $cbdata->lineNo($line_number); + if (defined($region) && !$region->interesting($line_number)) { + lcovutil::info(1, "skip line $line_number\n"); + next; + } + if ($prevLine != $line_number - 1) { + $cbdata->age(-1, $line_number); + $cbdata->owner(undef, $line_number); + $cbdata->tla(undef, $line_number); + + lcovutil::info(1, "continuation_line $line_number\n"); + my $l = $empty; + my $c = $line_number - ($prevLine + 1); + $l =~ s/NUM_LINES/$c/; + push(@result, $l); + write_html(*HTML_HANDLE, $l . "\n"); + } + $prevLine = $line_number; + + # Source code matches coverage data? + lcovutil::ignorable_error($lcovutil::ERROR_MISMATCH, + "checksum mismatch at " . $srcfile->path() . ":$line_number\n") + if ( + defined($checkdata->value($line_number)) && + ($checkdata->value($line_number) ne md5_base64($srcline->text())) + ); + push(@result, + write_source_line(HTML_HANDLE, $srcline, + $count_data->value($line_number), $showBranches, + $sumbrcount->value($line_number), $showMcdc, + $mcdc_summary->value($line_number), $cbdata)); + } + if ($prevLine != $line_number) { + # need another blank line at the bottom... + my $l = $empty; + my $c = $line_number - $prevLine; + $l =~ s/NUM_LINES/$c/; + push(@result, $l); + write_html(*HTML_HANDLE, $l . "\n"); + } + + write_source_epilog(*HTML_HANDLE); + return (@result); } -sub funcview_get_count_code($$$) +sub funcview_get_label($$$$) { - my ($name, $base, $type) = @_; - my $result; - my $link; - - if ($sort && $type == 0) { - $link = "$name.func-sort-c.$html_ext"; - } - $result = "Hit count"; - $result .= get_sort_code($link, "Sort by hit count", $base); - - return $result; -} + my ($name, $base, $col, $sort_type) = @_; + my $link; + + if (!defined($col)) { + if ($sort_tables && $sort_type != $SORT_FILE) { + $link = "$name.func.$html_ext"; + } + return "Function Name" . get_sort_code($link, "function name", $base); + } elsif ($col eq 'hit') { + if ($sort_tables && $sort_type != $SORT_LINE) { + $link = "$name.func-c.$html_ext"; + } + return "Hit count" . get_sort_code($link, "function hit count", $base); + } elsif ($col eq 'missed_line') { + if ($sort_tables && $sort_type != $SORT_MISSING_LINE) { + $link = "$name.func-l.$html_ext"; + } + return "Lines" . + get_sort_code($link, "unexercised lines in function", $base); + } elsif ($col eq 'missed_mcdc') { + if ($sort_tables && $sort_type != $SORT_MISSING_MCDC) { + $link = "$name.func-m.$html_ext"; + } + return "MC/DC" + . + get_sort_code($link, "unexercised MC/DC expressions in function", + $base); + } else { + die("unexpected sort $col") unless ($col eq 'missed_branch'); + if ($sort_tables && $sort_type != $SORT_MISSING_BRANCH) { + $link = "$name.func-b.$html_ext"; + } + return "Branches" . + get_sort_code($link, "unexercised branches in function", $base); + } +} # -# funcview_get_sorted(funcdata, sumfncdata, sort_type) +# funcview_get_sorted(funcdata, sort_type, mergedView) # # Depending on the value of sort_type, return a list of functions sorted # by name (type 0) or by the associated call count (type 1). @@ -5233,69 +13457,87 @@ sub funcview_get_count_code($$$) sub funcview_get_sorted($$$) { - my ($funcdata, $sumfncdata, $type) = @_; - - if ($type == 0) { - return sort(keys(%{$funcdata})); - } - return sort({ - $sumfncdata->{$b} == $sumfncdata->{$a} ? - $a cmp $b : $sumfncdata->{$a} <=> $sumfncdata->{$b} - } keys(%{$sumfncdata})); -} - -sub demangle_list($) -{ - my ($list) = @_; - my $tmpfile; - my $handle; - my %demangle; - my $demangle_arg = $demangle_cpp_params; - my %versions; - - # Write function names to file - ($handle, $tmpfile) = tempfile(); - die("ERROR: could not create temporary file") if (!defined($tmpfile)); - print($handle join("\n", @$list)); - close($handle); - - # Extra flag necessary on OS X so that symbols listed by gcov get demangled - # properly. - if ($demangle_arg eq "" && $^O eq "darwin") { - $demangle_arg = "--no-strip-underscores"; - } - - # Build translation hash from c++filt output - open($handle, "-|", "$demangle_cpp_tool $demangle_arg < $tmpfile") or - die("ERROR: could not run c++filt: $!\n"); - foreach my $func (@$list) { - my $translated = <$handle>; - my $version; - - last if (!defined($translated)); - chomp($translated); - - $version = ++$versions{$translated}; - $translated .= ".$version" if ($version > 1); - $demangle{$func} = $translated; - } - close($handle); - - if (scalar(keys(%demangle)) != scalar(@$list)) { - die("ERROR: c++filt output not as expected (". - scalar(keys(%demangle))." vs ".scalar(@$list).") lines\n"); - } - - unlink($tmpfile) or - warn("WARNING: could not remove temporary file $tmpfile: $!\n"); - - return \%demangle; + my ($funcData, $type, $merged) = @_; + + my ($differential_func, $lineFuncData, $branchFuncData, $mcdcFuncData) = + @$funcData; + my @rtn = keys(%$differential_func); + + if ($type == $SORT_LINE) { + @rtn = sort({ + my $da = $differential_func->{$a}->hit(); + my $db = $differential_func->{$b}->hit(); + $da->[0] <=> $db->[0] or + # sort by function name if count matches + $a cmp $b + } @rtn); + } elsif ($type == $SORT_MISSING_LINE) { + @rtn = sort({ + my $linea = + exists($lineFuncData->{$a}) ? $lineFuncData->{$a} : + undef; + my $lineb = + exists($lineFuncData->{$b}) ? $lineFuncData->{$b} : + undef; + my $missedA = + defined($linea) ? $linea->[0] - $linea->[1] : -1; + my $missedB = + defined($lineb) ? $lineb->[0] - $lineb->[1] : -1; + + # highest to lowest 'missed lines', then by + # function name if count matches + $missedB <=> $missedA or $a cmp $b + } @rtn); + } elsif ($type == $SORT_MISSING_BRANCH) { + @rtn = sort({ + my $bra = + exists($branchFuncData->{$a}) ? + $branchFuncData->{$a} : + undef; + my $brb = + exists($branchFuncData->{$b}) ? + $branchFuncData->{$b} : + undef; + my $missedA = + defined($bra) ? $bra->[0] - $bra->[1] : -1; + my $missedB = + defined($brb) ? $brb->[0] - $brb->[1] : -1; + + # highest to lowest 'missed branches', then by + # function name if count matches + $missedB <=> $missedA or $a cmp $b + } @rtn); + } elsif ($type == $SORT_MISSING_MCDC) { + @rtn = sort({ + my $bra = + exists($mcdcFuncData->{$a}) ? + $mcdcFuncData->{$a} : + undef; + my $brb = + exists($mcdcFuncData->{$b}) ? + $mcdcFuncData->{$b} : + undef; + my $missedA = + defined($bra) ? $bra->[0] - $bra->[1] : -1; + my $missedB = + defined($brb) ? $brb->[0] - $brb->[1] : -1; + + # highest to lowest 'missed expressions', then by + # function name if count matches + $missedB <=> $missedA or $a cmp $b + } @rtn); + + } elsif (!defined($main::no_sort)) { + # sort alphabetically by function name + @rtn = sort(@rtn); + } + return @rtn; } # -# write_function_table(filehandle, source_file, sumcount, funcdata, -# sumfnccount, testfncdata, sumbrcount, testbrdata, -# base_name, base_dir, sort_type) +# write_function_table(filehandle, funcData, source_file, sumcount, funcdata, +# testfncdata, sumbrcount, testbrdata, mcdc_summary, testase_mcdc, +# base_name, base_dir, sort_type) # # Write an HTML table listing all functions in a source file, including # also function call counts and line coverages inside of each function. @@ -5303,351 +13545,242 @@ sub demangle_list($) # Die on error. # -sub write_function_table(*$$$$$$$$$$) -{ - local *HTML_HANDLE = $_[0]; - my $source = $_[1]; - my $sumcount = $_[2]; - my $funcdata = $_[3]; - my $sumfncdata = $_[4]; - my $testfncdata = $_[5]; - my $sumbrcount = $_[6]; - my $testbrdata = $_[7]; - my $name = $_[8]; - my $base = $_[9]; - my $type = $_[10]; - my $func; - my $func_code; - my $count_code; - my $demangle; - - # Get HTML code for headings - $func_code = funcview_get_func_code($name, $base, $type); - $count_code = funcview_get_count_code($name, $base, $type); - write_html(*HTML_HANDLE, < - - - - - - +sub write_function_table(*$$$$$$$$$$$$) +{ + local *HTML_HANDLE = shift; + my ($funcData, $source, $sumcount, $funcdata, + $testfncdata, $sumbrcount, $testbrdata, $mcdc_summary, + $testcase_mcdc, $name, $base, $type) = @_; + my ($differentialMap, $funcLineCovMap, $funcBranchCovMap, $funcMcdcCovMap) + = @$funcData; + # Get HTML code for headings + my $func_code = funcview_get_label($name, $base, undef, $type); + my $count_code = funcview_get_label($name, $base, 'hit', $type); + my $line_code = funcview_get_label($name, $base, 'missed_line', $type); + my $branch_code = funcview_get_label($name, $base, 'missed_branch', $type); + my $mcdc_code = funcview_get_label($name, $base, 'missed_mcdc', $type); + my $showTlas = $main::show_tla && 0 != scalar(keys %$differentialMap); + my $tlaRow = ""; + my $lineProportionRow = ''; + my $branchProportionRow = ''; + my $mcdcProportionRow = ''; + my $countWidth = 20; + + if ($showTlas) { + my $label = $main::use_legacyLabels ? 'Hit?' : 'TLA'; + $tlaRow = ""; + $countWidth = 10; + } + if ($main::show_functionProportions) { + $lineProportionRow = "" + if (%$funcLineCovMap); + $branchProportionRow = "" + if $lcovutil::br_coverage && %$funcBranchCovMap; + $mcdcProportionRow = "" + if $lcovutil::mcdc_coverage && %$funcMcdcCovMap; + } + + die("no functions in file") unless (%$differentialMap); + write_html(*HTML_HANDLE, < +

$func_code$count_code
$label$line_code$branch_code$mcdc_code
+ + + + $tlaRow + + $lineProportionRow + $branchProportionRow + $mcdcProportionRow + END_OF_HTML - ; - - # Get demangle translation hash - if ($demangle_cpp) { - $demangle = demangle_list([ sort(keys(%{$funcdata})) ]); - } - - # Get a sorted table - foreach $func (funcview_get_sorted($funcdata, $sumfncdata, $type)) { - if (!defined($funcdata->{$func})) - { - next; - } - - my $startline = $funcdata->{$func} - $func_offset; - my $name = $func; - my $count = $sumfncdata->{$name}; - my $countstyle; - - # Replace function name with demangled version if available - $name = $demangle->{$name} if (exists($demangle->{$name})); - - # Escape special characters - $name = escape_html($name); - if ($startline < 1) { - $startline = 1; - } - if ($count == 0) { - $countstyle = "coverFnLo"; - } else { - $countstyle = "coverFnHi"; - } - - write_html(*HTML_HANDLE, < - + + foreach my $func ( + funcview_get_sorted($funcData, $type, $main::merge_function_aliases)) { + + my $funcEntry = $differentialMap->{$func}; + my ($count, $tla) = @{$funcEntry->hit()}; + next + if grep(/^$tla$/, ('DUB', 'DCB')); # don't display deleted functions + my $startline = $funcEntry->line() - $func_offset; + my $name = simplify_function_name($func); + my $countstyle; + + # Escape special characters + $name = escape_html($name); + if ($startline < 1) { + $startline = 1; + } + if ($count == 0) { + $countstyle = "coverFnLo"; + } else { + $countstyle = "coverFnHi"; + } + my $tlaRow = ""; + my $lineProportionRow = ''; + my $branchProportionRow = ''; + my $mcdcProportionRow = ''; + if ($showTlas) { + my $label = + $main::use_legacyLabels ? $SummaryInfo::tlaToLegacy{$tla} : + $tla; + $tlaRow = ""; + } + if ($main::show_functionProportions) { + if (exists($funcLineCovMap->{$func})) { + my ($found, $hit) = @{$funcLineCovMap->{$func}}; + # colorize based on hit proportion + my $style = + $rate_name[classify_rate($found, $hit, $ln_med_limit, + $ln_hi_limit)]; + my $rate = rate($hit, $found, " %"); + $lineProportionRow = + ""; + } else { + $lineProportionRow = "" + if %$funcLineCovMap; + } + if ($lcovutil::br_coverage && %$funcBranchCovMap) { + if (exists($funcBranchCovMap->{$func})) { + my ($found, $hit) = @{$funcBranchCovMap->{$func}}; + # colorize based on hit proportion + my $style = $rate_name[ + classify_rate($found, $hit, $br_med_limit, $br_hi_limit) + ]; + my $rate = rate($hit, $found, " %"); + $branchProportionRow = + ""; + } else { + $branchProportionRow = ""; + } + } + if ($lcovutil::mcdc_coverage && %$funcMcdcCovMap) { + if (exists($funcMcdcCovMap->{$func})) { + my ($found, $hit) = @{$funcMcdcCovMap->{$func}}; + # colorize based on hit proportion + my $style = $rate_name[ + classify_rate($found, $hit, $mcdc_med_limit, + $mcdc_hi_limit) + ]; + my $rate = rate($hit, $found, " %"); + $mcdcProportionRow = + ""; + } else { + $mcdcProportionRow = ""; + } + } + } + + write_html(*HTML_HANDLE, < + + $tlaRow + $lineProportionRow + $branchProportionRow + $mcdcProportionRow END_OF_HTML - ; - } - write_html(*HTML_HANDLE, < -
- -END_OF_HTML - ; -} - - -# -# info(printf_parameter) -# -# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag -# is not set. -# - -sub info(@) -{ - if (!$quiet) - { - # Print info string - printf(@_); - } -} - - -# -# subtract_counts(data_ref, base_ref) -# - -sub subtract_counts($$) -{ - my %data = %{$_[0]}; - my %base = %{$_[1]}; - my $line; - my $data_count; - my $base_count; - my $hit = 0; - my $found = 0; - - foreach $line (keys(%data)) - { - $found++; - $data_count = $data{$line}; - $base_count = $base{$line}; - - if (defined($base_count)) - { - $data_count -= $base_count; - - # Make sure we don't get negative numbers - if ($data_count<0) { $data_count = 0; } - } - - $data{$line} = $data_count; - if ($data_count > 0) { $hit++; } - } - - return (\%data, $found, $hit); -} - - -# -# subtract_fnccounts(data, base) -# -# Subtract function call counts found in base from those in data. -# Return (data, f_found, f_hit). -# - -sub subtract_fnccounts($$) -{ - my %data; - my %base; - my $func; - my $data_count; - my $base_count; - my $fn_hit = 0; - my $fn_found = 0; - - %data = %{$_[0]} if (defined($_[0])); - %base = %{$_[1]} if (defined($_[1])); - foreach $func (keys(%data)) { - $fn_found++; - $data_count = $data{$func}; - $base_count = $base{$func}; - - if (defined($base_count)) { - $data_count -= $base_count; - # Make sure we don't get negative numbers - if ($data_count < 0) { - $data_count = 0; - } - } - - $data{$func} = $data_count; - if ($data_count > 0) { - $fn_hit++; - } - } - - return (\%data, $fn_found, $fn_hit); -} - - -# -# apply_baseline(data_ref, baseline_ref) -# -# Subtract the execution counts found in the baseline hash referenced by -# BASELINE_REF from actual data in DATA_REF. -# - -sub apply_baseline($$) -{ - my %data_hash = %{$_[0]}; - my %base_hash = %{$_[1]}; - my $filename; - my $testname; - my $data; - my $data_testdata; - my $data_funcdata; - my $data_checkdata; - my $data_testfncdata; - my $data_testbrdata; - my $data_count; - my $data_testfnccount; - my $data_testbrcount; - my $base; - my $base_checkdata; - my $base_sumfnccount; - my $base_sumbrcount; - my $base_count; - my $sumcount; - my $sumfnccount; - my $sumbrcount; - my $found; - my $hit; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - - foreach $filename (keys(%data_hash)) - { - # Get data set for data and baseline - $data = $data_hash{$filename}; - $base = $base_hash{$filename}; - - # Skip data entries for which no base entry exists - if (!defined($base)) - { - next; - } - - # Get set entries for data and baseline - ($data_testdata, undef, $data_funcdata, $data_checkdata, - $data_testfncdata, undef, $data_testbrdata) = - get_info_entry($data); - (undef, $base_count, undef, $base_checkdata, undef, - $base_sumfnccount, undef, $base_sumbrcount) = - get_info_entry($base); - - # Check for compatible checksums - merge_checksums($data_checkdata, $base_checkdata, $filename); - - # sumcount has to be calculated anew - $sumcount = {}; - $sumfnccount = {}; - $sumbrcount = {}; - - # For each test case, subtract test specific counts - foreach $testname (keys(%{$data_testdata})) - { - # Get counts of both data and baseline - $data_count = $data_testdata->{$testname}; - $data_testfnccount = $data_testfncdata->{$testname}; - $data_testbrcount = $data_testbrdata->{$testname}; - - ($data_count, undef, $hit) = - subtract_counts($data_count, $base_count); - ($data_testfnccount) = - subtract_fnccounts($data_testfnccount, - $base_sumfnccount); - ($data_testbrcount) = - combine_brcount($data_testbrcount, - $base_sumbrcount, $BR_SUB); - - - # Check whether this test case did hit any line at all - if ($hit > 0) - { - # Write back resulting hash - $data_testdata->{$testname} = $data_count; - $data_testfncdata->{$testname} = - $data_testfnccount; - $data_testbrdata->{$testname} = - $data_testbrcount; - } - else - { - # Delete test case which did not impact this - # file - delete($data_testdata->{$testname}); - delete($data_testfncdata->{$testname}); - delete($data_testbrdata->{$testname}); - } - - # Add counts to sum of counts - ($sumcount, $found, $hit) = - add_counts($sumcount, $data_count); - ($sumfnccount, $fn_found, $fn_hit) = - add_fnccount($sumfnccount, $data_testfnccount); - ($sumbrcount, $br_found, $br_hit) = - combine_brcount($sumbrcount, $data_testbrcount, - $BR_ADD); - } - - # Write back resulting entry - set_info_entry($data, $data_testdata, $sumcount, $data_funcdata, - $data_checkdata, $data_testfncdata, $sumfnccount, - $data_testbrdata, $sumbrcount, $found, $hit, - $fn_found, $fn_hit, $br_found, $br_hit); - - $data_hash{$filename} = $data; - } - - return (\%data_hash); + if ((!defined($main::suppress_function_aliases) || + 0 == $main::suppress_function_aliases) && + $funcEntry->numAliases() > 1 + ) { + my $aliases = $funcEntry->aliases(); + my @aliasList = keys(%$aliases); + if ($main::show_functionProportions) { + $lineProportionRow = "
" + if %$funcLineCovMap; + $branchProportionRow = $lineProportionRow + if $lcovutil::br_coverage && %$funcBranchCovMap; + $mcdcProportionRow = $mcdcProportionRow + if $lcovutil::mcdc_coverage && %$funcMcdcCovMap; + } + if (0 != $type) { + @aliasList = + sort({ + my $da = $aliases->{$a}; + my $db = $aliases->{$b}; + $da->[0] <=> $db->[0] or $a cmp $b + } @aliasList); + } else { + @aliasList = sort(@aliasList); + } + foreach my $alias (@aliasList) { + my ($hit, $tla) = @{$aliases->{$alias}}; + # don't display deleted functions + next if grep(/^$tla$/, ('DUB', 'DCB')); # + my $style = "coverFnAlias" . ($hit == 0 ? "Lo" : "Hi"); + $tlaRow = ""; + if ($showTlas) { + my $label = + $main::use_legacyLabels ? + $SummaryInfo::tlaToLegacy{$tla} : + $tla; + $tlaRow = ""; + } + + # Escape special characters + $alias = escape_html(simplify_function_name($alias)); + + write_html(*HTML_HANDLE, < + + $tlaRow + + $lineProportionRow + $branchProportionRow + $mcdcProportionRow + +END_OF_HTML + } + } + } + write_html(*HTML_HANDLE, < +
+ +END_OF_HTML } - # # remove_unused_descriptions() # # Removes all test descriptions from the global hash %test_description which -# are not present in %info_data. +# are not present in %current_data. # sub remove_unused_descriptions() { - my $filename; # The current filename - my %test_list; # Hash containing found test names - my $test_data; # Reference to hash test_name -> count_data - my $before; # Initial number of descriptions - my $after; # Remaining number of descriptions - - $before = scalar(keys(%test_description)); - - foreach $filename (keys(%info_data)) - { - ($test_data) = get_info_entry($info_data{$filename}); - foreach (keys(%{$test_data})) - { - $test_list{$_} = ""; - } - } - - # Remove descriptions for tests which are not in our list - foreach (keys(%test_description)) - { - if (!defined($test_list{$_})) - { - delete($test_description{$_}); - } - } - - $after = scalar(keys(%test_description)); - if ($after < $before) - { - info("Removed ".($before - $after). - " unused descriptions, $after remaining.\n"); - } + my $filename; # The current filename + my %test_list; # Hash containing found test names + my $test_data; # Reference to hash test_name -> count_data + my $before; # Initial number of descriptions + my $after; # Remaining number of descriptions + + $before = scalar(keys(%test_description)); + + foreach $filename ($current_data->files()) { + ($test_data) = $current_data->data($filename)->get_info(); + foreach ($test_data->keylist()) { + $test_list{$_} = ""; + } + } + + # Remove descriptions for tests which are not in our list + foreach (keys(%test_description)) { + if (!defined($test_list{$_})) { + delete($test_description{$_}); + } + } + + $after = scalar(keys(%test_description)); + if ($after < $before) { + info("Removed " . ($before - $after) . + " unused descriptions, $after remaining.\n"); + } } - # # apply_prefix(filename, PREFIXES) # @@ -5657,143 +13790,21 @@ sub remove_unused_descriptions() sub apply_prefix($@) { - my $filename = shift; - my @dir_prefix = @_; - - if (@dir_prefix) - { - foreach my $prefix (@dir_prefix) - { - if ($prefix eq $filename) - { - return "root"; - } - if ($prefix ne "" && $filename =~ /^\Q$prefix\E\/(.*)$/) - { - return substr($filename, length($prefix) + 1); - } - } - } - - return $filename; -} - - -# -# system_no_output(mode, parameters) -# -# Call an external program using PARAMETERS while suppressing depending on -# the value of MODE: -# -# MODE & 1: suppress STDOUT -# MODE & 2: suppress STDERR -# -# Return 0 on success, non-zero otherwise. -# - -sub system_no_output($@) -{ - my $mode = shift; - my $result; - local *OLD_STDERR; - local *OLD_STDOUT; - - # Save old stdout and stderr handles - ($mode & 1) && open(OLD_STDOUT, ">>&", "STDOUT"); - ($mode & 2) && open(OLD_STDERR, ">>&", "STDERR"); - - # Redirect to /dev/null - ($mode & 1) && open(STDOUT, ">", "/dev/null"); - ($mode & 2) && open(STDERR, ">", "/dev/null"); - - system(@_); - $result = $?; - - # Close redirected handles - ($mode & 1) && close(STDOUT); - ($mode & 2) && close(STDERR); - - # Restore old handles - ($mode & 1) && open(STDOUT, ">>&", "OLD_STDOUT"); - ($mode & 2) && open(STDERR, ">>&", "OLD_STDERR"); - - return $result; -} - - -# -# read_config(filename) -# -# Read configuration file FILENAME and return a reference to a hash containing -# all valid key=value pairs found. -# - -sub read_config($) -{ - my $filename = $_[0]; - my %result; - my $key; - my $value; - local *HANDLE; - - if (!open(HANDLE, "<", $filename)) - { - warn("WARNING: cannot read configuration file $filename\n"); - return undef; - } - while () - { - chomp; - # Skip comments - s/#.*//; - # Remove leading blanks - s/^\s+//; - # Remove trailing blanks - s/\s+$//; - next unless length; - ($key, $value) = split(/\s*=\s*/, $_, 2); - if (defined($key) && defined($value)) - { - $result{$key} = $value; - } - else - { - warn("WARNING: malformed statement in line $. ". - "of configuration file $filename\n"); - } - } - close(HANDLE); - return \%result; -} - - -# -# apply_config(REF) -# -# REF is a reference to a hash containing the following mapping: -# -# key_string => var_ref -# -# where KEY_STRING is a keyword and VAR_REF is a reference to an associated -# variable. If the global configuration hashes CONFIG or OPT_RC contain a value -# for keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. -# - -sub apply_config($) -{ - my $ref = $_[0]; - - foreach (keys(%{$ref})) - { - if (defined($opt_rc{$_})) { - ${$ref->{$_}} = $opt_rc{$_}; - } elsif (defined($config->{$_})) { - ${$ref->{$_}} = $config->{$_}; - } - } + my $filename = shift; + + foreach my $prefix (@_) { + if ($prefix eq $filename) { + return 'root'; + } + if ($prefix ne "" && + $filename =~ /^\Q$prefix\E$lcovutil::dirseparator(.*)$/) { + return substr($filename, length($prefix) + 1); + } + } + + return $filename; } - # # get_html_prolog(FILENAME) # @@ -5803,24 +13814,20 @@ sub apply_config($) sub get_html_prolog($) { - my $filename = $_[0]; - my $result = ""; - - if (defined($filename)) - { - local *HANDLE; - - open(HANDLE, "<", $filename) - or die("ERROR: cannot open html prolog $filename!\n"); - while () - { - $result .= $_; - } - close(HANDLE); - } - else - { - $result = <) { + $result .= $_; + } + close(HANDLE) or die("unable to close HTML handle: $!\n"); + } else { + $result = < @@ -5834,13 +13841,10 @@ sub get_html_prolog($) END_OF_HTML - ; - } - - return $result; + } + return $result; } - # # get_html_epilog(FILENAME) # @@ -5849,82 +13853,27 @@ END_OF_HTML # sub get_html_epilog($) { - my $filename = $_[0]; - my $result = ""; - - if (defined($filename)) - { - local *HANDLE; - - open(HANDLE, "<", $filename) - or die("ERROR: cannot open html epilog $filename!\n"); - while () - { - $result .= $_; - } - close(HANDLE); - } - else - { - $result = <) { + $result .= $_; + } + close(HANDLE) or die("unable to close HTML handle: $!\n"); + } else { + $result = < END_OF_HTML - ; - } - - return $result; - -} - -sub warn_handler($) -{ - my ($msg) = @_; - - warn("$tool_name: $msg"); -} - -sub die_handler($) -{ - my ($msg) = @_; + } - die("$tool_name: $msg"); -} - -# -# parse_ignore_errors(@ignore_errors) -# -# Parse user input about which errors to ignore. -# - -sub parse_ignore_errors(@) -{ - my (@ignore_errors) = @_; - my @items; - my $item; - - return if (!@ignore_errors); - - foreach $item (@ignore_errors) { - $item =~ s/\s//g; - if ($item =~ /,/) { - # Split and add comma-separated parameters - push(@items, split(/,/, $item)); - } else { - # Add single parameter - push(@items, $item); - } - } - foreach $item (@items) { - my $item_id = $ERROR_ID{lc($item)}; - - if (!defined($item_id)) { - die("ERROR: unknown argument for --ignore-errors: ". - "$item\n"); - } - $ignore[$item_id] = 1; - } + return $result; } # @@ -5935,52 +13884,38 @@ sub parse_ignore_errors(@) sub parse_dir_prefix(@) { - my (@opt_dir_prefix) = @_; - my $item; - - return if (!@opt_dir_prefix); - - foreach $item (@opt_dir_prefix) { - if ($item =~ /,/) { - # Split and add comma-separated parameters - push(@dir_prefix, split(/,/, $item)); - } else { - # Add single parameter - push(@dir_prefix, $item); - } - } + my (@opt_dir_prefix) = @_; + + return if (!@opt_dir_prefix); + + foreach my $item (@opt_dir_prefix) { + if ($item =~ /$lcovutil::split_char/) { + # Split and add comma-separated parameters + push(@dir_prefix, split($lcovutil::split_char, $item)); + } else { + # Add single parameter + push(@dir_prefix, $item); + } + } } # -# rate(hit, found[, suffix, precision, width]) +# simplify_function_name($name) # -# Return the coverage rate [0..100] for HIT and FOUND values. 0 is only -# returned when HIT is 0. 100 is only returned when HIT equals FOUND. -# PRECISION specifies the precision of the result. SUFFIX defines a -# string that is appended to the result if FOUND is non-zero. Spaces -# are added to the start of the resulting string until it is at least WIDTH -# characters wide. +# apply @function_simplify_patterns to $name and return +# goal is to shorten really long demangled names/template expansions # - -sub rate($$;$$$) +sub simplify_function_name($) { - my ($hit, $found, $suffix, $precision, $width) = @_; - my $rate; - - # Assign defaults if necessary - $precision = $default_precision if (!defined($precision)); - $suffix = "" if (!defined($suffix)); - $width = 0 if (!defined($width)); - - return sprintf("%*s", $width, "-") if (!defined($found) || $found == 0); - $rate = sprintf("%.*f", $precision, $hit * 100 / $found); - - # Adjust rates if necessary - if ($rate == 0 && $hit > 0) { - $rate = sprintf("%.*f", $precision, 1 / 10 ** $precision); - } elsif ($rate == 100 && $hit != $found) { - $rate = sprintf("%.*f", $precision, 100 - 1 / 10 ** $precision); - } - - return sprintf("%*s", $width, $rate.$suffix); + my $name = shift; + if ($simplifyFunctionCallback) { + + eval { $name = $simplifyFunctionCallback->simplify($name); }; + if ($@) { + my $context = MessageContext::context(); + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "simplify($name) failed$context: $@"); + } + } + return $name; } diff --git a/bin/geninfo b/bin/geninfo index 43b023e5..e1c478ff 100755 --- a/bin/geninfo +++ b/bin/geninfo @@ -10,7 +10,7 @@ # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. +# General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see @@ -47,617 +47,590 @@ # 2008-07-14 / Tom Zoerner: added --function-coverage command line option # 2008-08-13 / Peter Oberparleiter: modified function coverage # implementation (now enabled per default) -# +# July 2020 / Henry Cox: henry.cox@mediatek.com +# Refactor to use common lcovutil package. +# Add filters to suppress certain line and branch coverpoints +# Sept 2020 / Henry Cox: modify to use common lcov package for coverage +# data representation. use strict; use warnings; -use File::Basename; +use File::Basename qw(basename dirname fileparse); use File::Spec::Functions qw /abs2rel catdir file_name_is_absolute splitdir - splitpath catpath/; -use File::Temp qw(tempfile tempdir); -use File::Copy qw(copy); -use Getopt::Long; -use Digest::MD5 qw(md5_base64); -use Cwd qw/abs_path/; -use IO::Uncompress::Gunzip qw(gunzip $GunzipError); -use JSON::PP qw(decode_json); - -if( $^O eq "msys" ) -{ - require File::Spec::Win32; + splitpath catpath catfile/; +use File::Temp; +use File::Copy qw(copy move); +use File::Path; +use Cwd qw/abs_path getcwd realpath/; +use Time::HiRes; # for profiling +use Capture::Tiny; +use FindBin; +use Storable; +use POSIX; + +use lib "$FindBin::RealBin/../lib"; +use lcovutil qw (define_errors parse_ignore_errors + $tool_name $tool_dir $lcov_version $lcov_url + ignorable_error ignorable_warning is_ignored + set_info_callback info init_verbose_flag $verbose + debug $debug + $br_coverage $func_coverage + system_no_output $devnull $dirseparator + die_handler warn_handler + parse_cov_filters summarize_cov_filters + $EXCL_START $EXCL_STOP $EXCL_BR_START $EXCL_BR_STOP + $EXCL_EXCEPTION_BR_START $EXCL_EXCEPTION_BR_STOP + $EXCL_LINE $EXCL_BR_LINE $EXCL_EXCEPTION_LINE + + %excluded_files + warn_file_patterns + @extractVersionScript + + $ERROR_GCOV $ERROR_GRAPH $ERROR_PACKAGE $ERROR_CHILD + $ERROR_EMPTY $ERROR_PARALLEL $ERROR_UNSUPPORTED $ERROR_PATH + $ERROR_INCONSISTENT_DATA $ERROR_UTILITY $ERROR_FORMAT + report_parallel_error check_parent_process + summarize_messages + is_external @internal_dirs + parseOptions + @comments + $maxParallelism init_parallel_params $maxMemory + ); + +if ($^O eq "msys") { + require File::Spec::Win32; } -# Constants -our $tool_dir = abs_path(dirname($0)); -our $lcov_version = 'LCOV version '.`$tool_dir/get_version.sh --full`; -our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php"; -our $gcov_tool = "gcov"; -our $tool_name = basename($0); - -our $GCOV_VERSION_8_0_0 = 0x80000; -our $GCOV_VERSION_4_7_0 = 0x40700; -our $GCOV_VERSION_3_4_0 = 0x30400; -our $GCOV_VERSION_3_3_0 = 0x30300; -our $GCNO_FUNCTION_TAG = 0x01000000; -our $GCNO_LINES_TAG = 0x01450000; -our $GCNO_FILE_MAGIC = 0x67636e6f; -our $BBG_FILE_MAGIC = 0x67626267; - -# Error classes which users may specify to ignore during processing -our $ERROR_GCOV = 0; -our $ERROR_SOURCE = 1; -our $ERROR_GRAPH = 2; -our %ERROR_ID = ( - "gcov" => $ERROR_GCOV, - "source" => $ERROR_SOURCE, - "graph" => $ERROR_GRAPH, -); - -our $EXCL_START = "LCOV_EXCL_START"; -our $EXCL_STOP = "LCOV_EXCL_STOP"; +our @gcov_tool; -# Marker to exclude branch coverage but keep function and line coverage -our $EXCL_BR_START = "LCOV_EXCL_BR_START"; -our $EXCL_BR_STOP = "LCOV_EXCL_BR_STOP"; +# Constants -# Marker to exclude exception branch coverage but keep function, line coverage and non-exception branch coverage -our $EXCL_EXCEPTION_BR_START = "LCOV_EXCL_EXCEPTION_BR_START"; -our $EXCL_EXCEPTION_BR_STOP = "LCOV_EXCL_EXCEPTION_BR_STOP"; +our $GCOV_VERSION_8_0_0 = 0x80000; +our $GCOV_VERSION_4_7_0 = 0x40700; +our $GCOV_VERSION_4_2_0 = 0x40200; # llvm/11 # Compatibility mode values -our $COMPAT_VALUE_OFF = 0; -our $COMPAT_VALUE_ON = 1; -our $COMPAT_VALUE_AUTO = 2; +our $COMPAT_VALUE_OFF = 0; +our $COMPAT_VALUE_ON = 1; +our $COMPAT_VALUE_AUTO = 2; # Compatibility mode value names -our %COMPAT_NAME_TO_VALUE = ( - "off" => $COMPAT_VALUE_OFF, - "on" => $COMPAT_VALUE_ON, - "auto" => $COMPAT_VALUE_AUTO, -); +our %COMPAT_NAME_TO_VALUE = ("off" => $COMPAT_VALUE_OFF, + "on" => $COMPAT_VALUE_ON, + "auto" => $COMPAT_VALUE_AUTO,); -# Compatiblity modes -our $COMPAT_MODE_LIBTOOL = 1 << 0; -our $COMPAT_MODE_HAMMER = 1 << 1; -our $COMPAT_MODE_SPLIT_CRC = 1 << 2; +# Compatibility modes +our $COMPAT_MODE_LIBTOOL = 1 << 0; +our $COMPAT_MODE_HAMMER = 1 << 1; +our $COMPAT_MODE_SPLIT_CRC = 1 << 2; # Compatibility mode names -our %COMPAT_NAME_TO_MODE = ( - "libtool" => $COMPAT_MODE_LIBTOOL, - "hammer" => $COMPAT_MODE_HAMMER, - "split_crc" => $COMPAT_MODE_SPLIT_CRC, - "android_4_4_0" => $COMPAT_MODE_SPLIT_CRC, -); +our %COMPAT_NAME_TO_MODE = ("libtool" => $COMPAT_MODE_LIBTOOL, + "hammer" => $COMPAT_MODE_HAMMER, + "split_crc" => $COMPAT_MODE_SPLIT_CRC, + "android_4_4_0" => $COMPAT_MODE_SPLIT_CRC,); # Map modes to names -our %COMPAT_MODE_TO_NAME = ( - $COMPAT_MODE_LIBTOOL => "libtool", - $COMPAT_MODE_HAMMER => "hammer", - $COMPAT_MODE_SPLIT_CRC => "split_crc", -); +our %COMPAT_MODE_TO_NAME = ($COMPAT_MODE_LIBTOOL => "libtool", + $COMPAT_MODE_HAMMER => "hammer", + $COMPAT_MODE_SPLIT_CRC => "split_crc",); # Compatibility mode default values -our %COMPAT_MODE_DEFAULTS = ( - $COMPAT_MODE_LIBTOOL => $COMPAT_VALUE_ON, - $COMPAT_MODE_HAMMER => $COMPAT_VALUE_AUTO, - $COMPAT_MODE_SPLIT_CRC => $COMPAT_VALUE_AUTO, -); +our %COMPAT_MODE_DEFAULTS = ($COMPAT_MODE_LIBTOOL => $COMPAT_VALUE_ON, + $COMPAT_MODE_HAMMER => $COMPAT_VALUE_AUTO, + $COMPAT_MODE_SPLIT_CRC => $COMPAT_VALUE_AUTO,); # Compatibility mode auto-detection routines -sub compat_hammer_autodetect(); -our %COMPAT_MODE_AUTO = ( - $COMPAT_MODE_HAMMER => \&compat_hammer_autodetect, - $COMPAT_MODE_SPLIT_CRC => 1, # will be done later +our %COMPAT_MODE_AUTO = ($COMPAT_MODE_HAMMER => 0, # no gcc/3.3 support + $COMPAT_MODE_SPLIT_CRC => 1, # will be done later ); -our $BR_LINE = 0; -our $BR_BLOCK = 1; -our $BR_BRANCH = 2; -our $BR_TAKEN = 3; -our $BR_VEC_ENTRIES = 4; -our $BR_VEC_WIDTH = 32; -our $BR_VEC_MAX = vec(pack('b*', 1 x $BR_VEC_WIDTH), 0, $BR_VEC_WIDTH); +our $UNNAMED_BLOCK = -1; -our $UNNAMED_BLOCK = -1; +our $trace_data; # Prototypes sub print_usage(*); -sub transform_pattern($); -sub gen_info($); -sub process_dafile($$); +sub gen_info(@); +sub process_dafile($$$$); sub match_filename($@); sub solve_ambiguous_match($$$); sub split_filename($); sub solve_relative_path($$); +sub compute_internal_directories(@); sub read_gcov_header($); -sub read_gcov_file($); -sub info(@); -sub process_intermediate($$$); +sub read_gcov_file($$$); +sub my_info(@); +set_info_callback(\&my_info); +sub process_intermediate($$$$); sub map_llvm_version($); sub version_to_str($); sub get_gcov_version(); -sub system_no_output($@); -sub read_config($); -sub apply_config($); -sub apply_exclusion_data($$); +sub apply_exclusion_data($$$); sub process_graphfile($$); -sub filter_fn_name($); -sub warn_handler($); -sub die_handler($); +sub filter_fn_name($$); sub graph_error($$); -sub graph_expect($); sub graph_read(*$;$$); sub graph_skip(*$;$); sub uniq(@); sub sort_uniq(@); -sub sort_uniq_lex(@); sub graph_cleanup($); sub graph_find_base($); sub graph_from_bb($$$$); sub graph_add_order($$$); -sub read_bb_word(*;$); -sub read_bb_value(*;$); -sub read_bb_string(*$); -sub read_bb($); -sub read_bbg_word(*;$); -sub read_bbg_value(*;$); -sub read_bbg_string(*); -sub read_bbg_lines_record(*$$$$$); -sub read_bbg($); sub read_gcno_word(*;$$); sub read_gcno_value(*$;$$); sub read_gcno_string(*$); sub read_gcno_lines_record(*$$$$$$); -sub determine_gcno_split_crc($$$$); sub read_gcno_function_record(*$$$$$); sub read_gcno($); sub get_gcov_capabilities(); -sub get_overall_line($$$$); -sub print_overall_rate($$$$$$$$$); -sub br_gvec_len($); -sub br_gvec_get($$); -sub debug($); sub int_handler(); -sub parse_ignore_errors(@); -sub is_external($); sub compat_name($); sub parse_compat_modes($); sub is_compat($); -sub is_compat_auto($); - +sub which($); # Global variables our $gcov_version; our $gcov_version_string; -our $graph_file_extension; -our $data_file_extension; +our $graph_file_extension = '.gcno'; +our $data_file_extension = '.gcda'; our @data_directory; +our $buildDirSearchPath; our $test_name = ""; -our $quiet; our $help; our $output_filename; +our $single_file; # Write result into single merged file or not +our $files_created = 0; # Number of output files created our $base_directory; our $version; -our $follow; -our $checksum; -our $no_checksum; -our $opt_compat_libtool; our $opt_no_compat_libtool; -our $rc_adjust_src_path;# Regexp specifying parts to remove from source path -our $adjust_src_pattern; -our $adjust_src_replace; -our $adjust_testname; -our $config; # Configuration file contents -our @ignore_errors; # List of errors to ignore (parameter) -our @ignore; # List of errors to ignore (array) our $initial; -our @include_patterns; # List of source file patterns to include -our @exclude_patterns; # List of source file patterns to exclude -our %excluded_files; # Files excluded due to include/exclude options our $no_recursion = 0; our $maxdepth; -our $no_markers = 0; +our $no_markers = 0; our $opt_derive_func_data = 0; -our $opt_external = 1; -our $opt_no_external; -our $debug = 0; our $gcov_caps; -our @gcov_options; -our @internal_dirs; -our $opt_config_file; -our $opt_gcov_all_blocks = 1; -our $opt_compat; -our %opt_rc; our %compat_value; -our $gcno_split_crc; -our $func_coverage = 1; -our $br_coverage = 0; -our $no_exception_br = 0; -our $rc_auto_base = 1; -our $rc_intermediate = "auto"; our $intermediate; -our $excl_line = "LCOV_EXCL_LINE"; -our $excl_br_line = "LCOV_EXCL_BR_LINE"; -our $excl_exception_br_line = "LCOV_EXCL_EXCEPTION_BR_LINE"; +our $intervalMonitor; # class for progress reporting +our $totalChildCpuTime = 0; +our $intervalChildCpuTime = 0; # since last updata + +# for performance tweaking/tuning +our $defaultChunkSize; +our $defaultInterval; +our %childRetryCounts; +our @large_files; -our $cwd = `pwd`; +our $cwd = getcwd(); chomp($cwd); +lcovutil::save_cmd_line(\@ARGV, "$FindBin::RealBin"); # # Code entry point # # Register handler routine to be called when interrupted -$SIG{"INT"} = \&int_handler; +$SIG{"INT"} = \&int_handler; $SIG{__WARN__} = \&warn_handler; -$SIG{__DIE__} = \&die_handler; +$SIG{__DIE__} = \&die_handler; # Set LC_ALL so that gcov output will be in a unified format $ENV{"LC_ALL"} = "C"; -# Check command line for a configuration file name -Getopt::Long::Configure("pass_through", "no_auto_abbrev"); -GetOptions("config-file=s" => \$opt_config_file, - "rc=s%" => \%opt_rc); -Getopt::Long::Configure("default"); +# retrieve settings from RC file - use these if not overridden on command line +my %geninfo_opts = ("test-name|t=s" => \$test_name, + "output-filename|o=s" => \$output_filename, + "base-directory|b=s" => \$base_directory, + "follow|f" => \$lcovutil::opt_follow, + "compat-libtool" => \$lcovutil::opt_compat_libtool, + "no-compat-libtool" => \$opt_no_compat_libtool, + "gcov-tool=s" => \@gcov_tool, + "initial|i" => \$initial, + "all" => \$lcovutil::geninfo_captureAll, + "no-recursion" => \$no_recursion, + "no-markers" => \$no_markers, + "derive-func-data" => \$opt_derive_func_data, + "external|e" => \$lcovutil::opt_external, + "no-external" => \$lcovutil::opt_no_external, + "compat=s" => \$lcovutil::geninfo_opt_compat, + 'large-file=s' => \@large_files); -{ - # Remove spaces around rc options - my %new_opt_rc; - - while (my ($key, $value) = each(%opt_rc)) { - $key =~ s/^\s+|\s+$//g; - $value =~ s/^\s+|\s+$//g; - - $new_opt_rc{$key} = $value; - } - %opt_rc = %new_opt_rc; +# Parse command line options +if (!lcovutil::parseOptions(\%lcovutil::geninfo_rc_opts, \%geninfo_opts, + \$output_filename)) { + print(STDERR "Use $tool_name --help to get usage information\n"); + exit(1); } -# Read configuration file if available -if (defined($opt_config_file)) { - $config = read_config($opt_config_file); -} elsif (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc")) -{ - $config = read_config($ENV{"HOME"}."/.lcovrc"); +$buildDirSearchPath = + SearchPath->new('build directory', @lcovutil::build_directory); +@gcov_tool = @lcovutil::rc_gcov_tool unless @gcov_tool; + +eval { + map { qr($_) } @large_files; +}; +die("invalid 'large-file' regexp: $@") + if ($@); + +# Check regexp +if (defined($lcovutil::rc_adjust_src_path)) { + my ($pattern, $replace) = split(/\s*=>\s*/, $lcovutil::rc_adjust_src_path); + # If no replacement is specified, simply remove pattern + $replace = '' unless defined($replace); + my $p = "s#$pattern#$replace#g"; + $p .= 'i' if $lcovutil::case_insensitive; + my $text = 'abc'; + my $str = eval { '$test =~ ' . $p . ';' }; + if ($@) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "Invalid 'geninfo_adjust_src_path=$lcovutil::rc_adjust_src_path' syntax: '$@'" + ); + } else { + push(@lcovutil::file_subst_patterns, [$p, 0]); + } } -elsif (-r "/etc/lcovrc") -{ - $config = read_config("/etc/lcovrc"); -} elsif (-r "/usr/local/etc/lcovrc") -{ - $config = read_config("/usr/local/etc/lcovrc"); + +if ($lcovutil::geninfo_captureAll && $initial) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "'--all' ignored when '--initial' is used"); + $lcovutil::geninfo_captureAll = undef; } -if ($config || %opt_rc) -{ - # Copy configuration file and --rc values to variables - apply_config({ - "geninfo_gcov_tool" => \$gcov_tool, - "geninfo_adjust_testname" => \$adjust_testname, - "geninfo_checksum" => \$checksum, - "geninfo_no_checksum" => \$no_checksum, # deprecated - "geninfo_compat_libtool" => \$opt_compat_libtool, - "geninfo_external" => \$opt_external, - "geninfo_gcov_all_blocks" => \$opt_gcov_all_blocks, - "geninfo_compat" => \$opt_compat, - "geninfo_adjust_src_path" => \$rc_adjust_src_path, - "geninfo_auto_base" => \$rc_auto_base, - "geninfo_intermediate" => \$rc_intermediate, - "geninfo_no_exception_branch" => \$no_exception_br, - "lcov_function_coverage" => \$func_coverage, - "lcov_branch_coverage" => \$br_coverage, - "lcov_excl_line" => \$excl_line, - "lcov_excl_br_line" => \$excl_br_line, - "lcov_excl_exception_br_line" => \$excl_exception_br_line, - }); - - # Merge options - if (defined($no_checksum)) - { - $checksum = ($no_checksum ? 0 : 1); - $no_checksum = undef; - } - - # Check regexp - if (defined($rc_adjust_src_path)) { - my ($pattern, $replace) = split(/\s*=>\s*/, - $rc_adjust_src_path); - local $SIG{__DIE__}; - eval '$adjust_src_pattern = qr>'.$pattern.'>;'; - if (!defined($adjust_src_pattern)) { - my $msg = $@; - - chomp($msg); - $msg =~ s/at \(eval.*$//; - warn("WARNING: invalid pattern in ". - "geninfo_adjust_src_path: $msg\n"); - } elsif (!defined($replace)) { - # If no replacement is specified, simply remove pattern - $adjust_src_replace = ""; - } else { - $adjust_src_replace = $replace; - } - } - for my $regexp (($excl_line, $excl_br_line, $excl_exception_br_line)) { - eval 'qr/'.$regexp.'/'; - my $error = $@; - chomp($error); - $error =~ s/at \(eval.*$//; - die("ERROR: invalid exclude pattern: $error") if $error; - } +if (defined($lcovutil::tempdirname)) { + $lcovutil::tmp_dir = $lcovutil::tempdirname; + File::Path::make_path($lcovutil::tmp_dir) or + die("unable to mkdir $lcovutil::tmp_dir: $!") + unless (-d $lcovutil::tmp_dir); } -# Parse command line options -if (!GetOptions("test-name|t=s" => \$test_name, - "output-filename|o=s" => \$output_filename, - "checksum" => \$checksum, - "no-checksum" => \$no_checksum, - "base-directory|b=s" => \$base_directory, - "version|v" =>\$version, - "quiet|q" => \$quiet, - "help|h|?" => \$help, - "follow|f" => \$follow, - "compat-libtool" => \$opt_compat_libtool, - "no-compat-libtool" => \$opt_no_compat_libtool, - "gcov-tool=s" => \$gcov_tool, - "ignore-errors=s" => \@ignore_errors, - "initial|i" => \$initial, - "include=s" => \@include_patterns, - "exclude=s" => \@exclude_patterns, - "no-recursion" => \$no_recursion, - "no-markers" => \$no_markers, - "derive-func-data" => \$opt_derive_func_data, - "debug" => \$debug, - "external|e" => \$opt_external, - "no-external" => \$opt_no_external, - "compat=s" => \$opt_compat, - "config-file=s" => \$opt_config_file, - "rc=s%" => \%opt_rc, - )) -{ - print(STDERR "Use $tool_name --help to get usage information\n"); - exit(1); +# Merge options +if (defined($opt_no_compat_libtool)) { + $lcovutil::opt_compat_libtool = ($opt_no_compat_libtool ? 0 : 1); + $opt_no_compat_libtool = undef; } -else -{ - # Merge options - if (defined($no_checksum)) - { - $checksum = ($no_checksum ? 0 : 1); - $no_checksum = undef; - } - - if (defined($opt_no_compat_libtool)) - { - $opt_compat_libtool = ($opt_no_compat_libtool ? 0 : 1); - $opt_no_compat_libtool = undef; - } - - if (defined($opt_no_external)) { - $opt_external = 0; - $opt_no_external = undef; - } - - if(@include_patterns) { - # Need perlreg expressions instead of shell pattern - @include_patterns = map({ transform_pattern($_); } @include_patterns); - } - - if(@exclude_patterns) { - # Need perlreg expressions instead of shell pattern - @exclude_patterns = map({ transform_pattern($_); } @exclude_patterns); - } + +if (defined($lcovutil::opt_external)) { + $lcovutil::opt_no_external = !$lcovutil::opt_external; + $lcovutil::opt_external = undef; } +my $start = Time::HiRes::gettimeofday(); + @data_directory = @ARGV; debug("$lcov_version\n"); -# Check for help option -if ($help) -{ - print_usage(*STDOUT); - exit(0); +if (0 == scalar(@gcov_tool)) { + # not specified - use gcov by default - expected to be in user's path + push(@gcov_tool, 'gcov'); +} else { + @gcov_tool = + split($lcovutil::split_char, join($lcovutil::split_char, @gcov_tool)); + my $tool = $gcov_tool[0]; + my (undef, $dir, $file) = splitpath($tool); + + if ($dir eq "") { + $tool = which($tool); + } elsif (!file_name_is_absolute($tool)) { + $tool = abs_path($tool); + } + if (!-x $tool) { + die("cannot access gcov tool '$gcov_tool[0]'"); + } + $gcov_tool[0] = $tool; + if (scalar(@gcov_tool) > 1) { + foreach my $e (@gcov_tool) { + $e = "'$e'" if ($e =~ /\s/); + } + } } - -# Check for version option -if ($version) -{ - print("$tool_name: $lcov_version\n"); - exit(0); +if (scalar(@lcovutil::extractVersionScript) > 1) { + foreach my $e (@lcovutil::extractVersionScript) { + $e = "'$e'" if ($e =~ /\s/); + } } # Check gcov tool -if (system_no_output(3, $gcov_tool, "--help") == -1) -{ - die("ERROR: need tool $gcov_tool!\n"); +if (system_no_output(3, @gcov_tool, "--help") == -1) { + die("failed execution of gcov_tool \"" . + join(' ', @gcov_tool) . " --help\": $!"); } ($gcov_version, $gcov_version_string) = get_gcov_version(); $gcov_caps = get_gcov_capabilities(); # Determine intermediate mode -if ($rc_intermediate eq "0") { - $intermediate = 0; -} elsif ($rc_intermediate eq "1") { - $intermediate = 1; -} elsif (lc($rc_intermediate) eq "auto") { - # Use intermediate format if supported by gcov and not conflicting with - # exception branch exclusion - $intermediate = (($gcov_caps->{'intermediate-format'} && !$no_exception_br) || - $gcov_caps->{'json-format'}) ? 1 : 0; +if ($lcovutil::rc_intermediate eq "0") { + $intermediate = 0; +} elsif ($lcovutil::rc_intermediate eq "1") { + $intermediate = 1; +} elsif (lc($lcovutil::rc_intermediate) eq "auto") { + # Use intermediate format if supported by gcov and not conflicting with + # exception branch exclusion + $intermediate = (($gcov_caps->{'intermediate-format'} && + !$lcovutil::exclude_exception_branch) || + $gcov_caps->{'json-format'}) ? 1 : 0; } else { - die("ERROR: invalid value for geninfo_intermediate: ". - "'$rc_intermediate'\n"); + die("invalid value for geninfo_intermediate: " . + "'$lcovutil::rc_intermediate'\n"); } -if ($intermediate) { - info("Using intermediate gcov format\n"); - if ($opt_derive_func_data) { - warn("WARNING: --derive-func-data is not compatible with ". - "intermediate format - ignoring\n"); - $opt_derive_func_data = 0; - } - if ($no_exception_br && !$gcov_caps->{'json-format'}) { - die("ERROR: excluding exception branches is not compatible with ". - "text intermediate format\n"); - } +if ($gcov_version >= (9 << 16) && + !$intermediate) { + lcovutil::ignorable_error($ERROR_UNSUPPORTED, + "geninfo does not support text format for gcov/9 or higher (your version appears to be '$gcov_version_string').\n Please remove config file entry 'geninfo_intermdediate = 0'." + ); + $intermediate = 1; } -if ($no_exception_br && ($gcov_version < $GCOV_VERSION_3_3_0)) { - die("ERROR: excluding exception branches is not compatible with ". - "gcov versions older than 3.3\n"); +if ($intermediate) { + info("Using intermediate gcov format\n"); + if ($opt_derive_func_data) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "--derive-func-data is not compatible with intermediate format"); + $opt_derive_func_data = 0; + } + if ($lcovutil::exclude_exception_branch && !$gcov_caps->{'json-format'}) { + die("excluding exception branches is not compatible with " . + "text intermediate format\n"); + } } # Determine gcov options -push(@gcov_options, "-b") if ($gcov_caps->{'branch-probabilities'} && - ($br_coverage || $func_coverage)); -push(@gcov_options, "-c") if ($gcov_caps->{'branch-counts'} && - $br_coverage); -push(@gcov_options, "-a") if ($gcov_caps->{'all-blocks'} && - $opt_gcov_all_blocks && $br_coverage && - !$intermediate); -if ($gcov_caps->{'hash-filenames'}) -{ - push(@gcov_options, "-x"); +push(@gcov_tool, "-b") + if ($gcov_caps->{'branch-probabilities'} && + ($lcovutil::br_coverage || + $lcovutil::func_coverage || + $lcovutil::opt_adjust_unexecuted_blocks)); +push(@gcov_tool, "-c") + if ($gcov_caps->{'branch-counts'} && + $lcovutil::br_coverage); +push(@gcov_tool, "-a") + if ($gcov_caps->{'all-blocks'} && + $lcovutil::opt_gcov_all_blocks && + $lcovutil::br_coverage && + !$intermediate); +if ($gcov_caps->{'hash-filenames'}) { + push(@gcov_tool, "-x"); } else { - push(@gcov_options, "-p") if ($gcov_caps->{'preserve-paths'}); + push(@gcov_tool, "-p") if ($gcov_caps->{'preserve-paths'}); +} +push(@gcov_tool, '-i') if $intermediate; + +if ($lcovutil::mcdc_coverage) { + if ($gcov_caps->{'conditions'}) { + push(@gcov_tool, '--conditions'); + } else { + $lcovutil::mcdc_coverage = 0; + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "MC/DC coverage enabled but \"$gcov_tool[0]\" does not support the '--conditions' option." + ); + } } # Determine compatibility modes -parse_compat_modes($opt_compat); - -# Determine which errors the user wants us to ignore -parse_ignore_errors(@ignore_errors); +parse_compat_modes($lcovutil::geninfo_opt_compat); + +if ($no_markers) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "use new '--filter' option or old '--no-markers' - not both") + if (@lcovutil::opt_filter); +} elsif (!@lcovutil::opt_filter) { + # don't apply the backward-compatible options if user specifies any filters + lcovutil::info(1, + "$lcovutil::tool_name: applying '--filter region,branch_region' by default - see the '--no-markers' section in the man page for more information.\n" + ); + push(@lcovutil::opt_filter, "region"); + push(@lcovutil::opt_filter, "branch_region") if $br_coverage; +} else { + lcovutil::info( + "Note: 'region' and 'branch_region' filters are not applied by default when '--filter' is specified. See the '--no-markers section in the man page for more information.\n" + ) unless grep({ /(region|branch_region)/ } @lcovutil::opt_filter); +} +parse_cov_filters(@lcovutil::opt_filter); # Make sure test names only contain valid characters -if ($test_name =~ s/\W/_/g) -{ - warn("WARNING: invalid characters removed from testname!\n"); +if ($test_name =~ s/\W/_/g) { + lcovutil::ignorable_warning($lcovutil::ERROR_FORMAT, + "invalid characters removed from testname"); } # Adjust test name to include uname output if requested -if ($adjust_testname) -{ - $test_name .= "__".`uname -a`; - $test_name =~ s/\W/_/g; +if ($lcovutil::geninfo_adjust_testname) { + $test_name .= "__" . `uname -a`; + $test_name =~ s/\W/_/g; } # Make sure base_directory contains an absolute path specification -if ($base_directory) -{ - $base_directory = solve_relative_path($cwd, $base_directory); +if ($base_directory) { + $base_directory = solve_relative_path($cwd, $base_directory); + push(@ReadCurrentSource::source_directories, $base_directory); } -# Check for follow option -if ($follow) -{ - $follow = "-follow" -} -else -{ - $follow = ""; -} - -# Determine checksum mode -if (defined($checksum)) -{ - # Normalize to boolean - $checksum = ($checksum ? 1 : 0); -} -else -{ - # Default is off - $checksum = 0; -} +# Determine checksum mode - normalize to boolean +$lcovutil::verify_checksum = + defined($lcovutil::verify_checksum) && $lcovutil::verify_checksum; # Determine max depth for recursion -if ($no_recursion) -{ - $maxdepth = "-maxdepth 1"; -} -else -{ - $maxdepth = ""; +if ($no_recursion) { + $maxdepth = "-maxdepth 1"; +} else { + $maxdepth = ""; } # Check for directory name -if (!@data_directory) -{ - die("No directory specified\n". - "Use $tool_name --help to get usage information\n"); -} -else -{ - foreach (@data_directory) - { - stat($_); - if (!-r _) - { - die("ERROR: cannot read $_!\n"); - } - } +if (!@data_directory) { + die("No directory specified\n" . + "Use $tool_name --help to get usage information\n"); +} else { + my @dirs; + foreach my $pattern (@data_directory) { + if (-d $pattern) { + $pattern =~ + s#$lcovutil::dirseparator$##; # remove trailing slash - if any + push(@dirs, $pattern); + next; + } + $pattern =~ s/([^\\]) /$1\\ /g # explicitly escape spaces + unless $^O =~ /Win/; + + my @glob = glob($pattern); + + my $count = 0; + foreach (@glob) { + + stat($_); + if (!-r _) { + ignorable_error($ERROR_GCOV, "cannot read $_!"); + } else { + push(@dirs, $_); + $count++; + } + } + if (0 == $count) { + ignorable_error($ERROR_EMPTY, "$pattern does not match anything."); + } + } + @data_directory = @dirs; } -if ($gcov_version < $GCOV_VERSION_3_4_0) -{ - if (is_compat($COMPAT_MODE_HAMMER)) - { - $data_file_extension = ".da"; - $graph_file_extension = ".bbg"; - } - else - { - $data_file_extension = ".da"; - $graph_file_extension = ".bb"; - } +if ($gcov_version < $GCOV_VERSION_4_2_0) { + die("Your toolchain version is too old and is no longer supported by lcov. Please upgrade - or use an older lcov release." + ); } -else -{ - $data_file_extension = ".gcda"; - $graph_file_extension = ".gcno"; -} # Check output filename -if (defined($output_filename) && ($output_filename ne "-")) -{ - # Initially create output filename, data is appended - # for each data file processed - local *DUMMY_HANDLE; - open(DUMMY_HANDLE, ">", $output_filename) - or die("ERROR: cannot create $output_filename!\n"); - close(DUMMY_HANDLE); - - # Make $output_filename an absolute path because we're going - # to change directories while processing files - if (!($output_filename =~ /^\/(.*)$/)) - { - $output_filename = $cwd."/".$output_filename; - } -} +$single_file = defined($output_filename); + +# use absolute path: we change directories while processing files +$output_filename = catfile($cwd, $output_filename) + if (defined($output_filename) && + ($output_filename ne "-") && + !file_name_is_absolute($output_filename)); # Build list of directories to identify external files -foreach my $entry(@data_directory, $base_directory) { - next if (!defined($entry)); - push(@internal_dirs, solve_relative_path($cwd, $entry)); +compute_internal_directories(@data_directory, $base_directory); + +if ($initial && $lcovutil::br_coverage && !$intermediate) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "--initial cannot generate branch coverage data with this compiler/toolchain version." + ); } +lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "--fail_under_lines not supported unless output file specified") + if (defined($lcovutil::fail_under_lines) && !$single_file); + +# where to write parallel child data +my $tempFileDir = + defined($lcovutil::tempdirname) ? $lcovutil::tempdirname : + File::Temp->newdir("geninfo_datXXXX", + DIR => $lcovutil::tmp_dir, + CLEANUP => !defined($lcovutil::preserve_intermediates)); + +lcovutil::info("Writing temporary data to $tempFileDir\n"); # Do something -foreach my $entry (@data_directory) { - gen_info($entry); +my $processedFiles = 0; +my $exit_code = 0; +eval { $processedFiles += gen_info(@data_directory); }; +if ($@) { + $exit_code = 1; + print(STDERR $@); } -if ($initial && $br_coverage && !$intermediate) { - warn("Note: --initial does not generate branch coverage ". - "data\n"); +if (0 == $exit_code) { + eval { + my $now = Time::HiRes::gettimeofday(); + # have to check the loaded input data for exclusion markers because the + # data was generated directly from the gcov files - did not go through + # TraceFile::load which explicitly checks + if ($single_file && defined($trace_data)) { + $trace_data->applyFilters(); + my $f = Time::HiRes::gettimeofday(); + $trace_data->add_comments(@lcovutil::comments); + $trace_data->write_info_file($output_filename, + $lcovutil::verify_checksum); + $files_created++; + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{filter} = $f - $now; + $lcovutil::profileData{write} = $then - $f; + } + if ($files_created == 0) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "no data generated\n"); + } + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{emit} = $then - $now; + if (defined($trace_data)) { + info("Finished .info-file creation\n"); + $trace_data->print_summary() + if ($lcovutil::verbose >= 0); + } + summarize_cov_filters(); + # print warnings + lcovutil::warn_file_patterns(); + $buildDirSearchPath->warn_unused( + @lcovutil::build_directory ? '--build-directory' : + 'build_directory = '); + ReadCurrentSource::warn_sourcedir_patterns(); + }; + if ($@) { + # eval of filter application failed - set error return code, but + # still try to emit profile + print(STDERR $@); + $exit_code = 1; + } +} +# $trace_data may be undef if no non-empty GCDA files found and the +# 'empty' warning is ignored +if (0 == $exit_code && + defined($trace_data) && + $single_file) { + + $trace_data->checkCoverageCriteria(); + CoverageCriteria::summarize(); + $exit_code = 1 if $CoverageCriteria::coverageCriteriaStatus; } -info("Finished .info-file creation\n"); +summarize_messages(); +my $end = Time::HiRes::gettimeofday(); +$lcovutil::profileData{total} = $end - $start; -exit(0); +lcovutil::cleanup_callbacks(); +lcovutil::save_profile( + (defined($output_filename) && '-' ne $output_filename) ? $output_filename : + "geninfo"); +# exit with non-zero status if --keep-going and some errors detected +$exit_code = 1 + if (0 == $exit_code && + (!(defined($trace_data) && $single_file) || + lcovutil::saw_error())); +exit($exit_code); # # print_usage(handle) @@ -667,136 +640,405 @@ exit(0); sub print_usage(*) { - local *HANDLE = $_[0]; + local *HANDLE = $_[0]; - print(HANDLE < 0, + TOTAL_CHUNKS => 1, + CHUNK_SIZE => 2, + PROCESSED_CHUNKS => 3, + INTERVAL_LENGTH => 4, + START_TIME => 5, # start processing worklist + LAST_UPDATE => 6, # number files processed at last update + INTERVAL_START => 7, + INTERVAL_COUNTS => + 8, # source files and coverpoints found since update + + FILE_COUNT => 0, + LINE_COUNT => 1, + BRANCH_COUNT => 2, + FUNCTION_COUNT => 3, +}; + +sub new +{ + my ($class, $totalFiles, $nChunks, $chunkSize, $intervalLength) = @_; + my $start = Time::HiRes::gettimeofday(); + my $self = [$totalFiles, $nChunks, $chunkSize, 0, + $intervalLength, $start, 0, $start, + [0, 0, 0, 0] + ]; + bless $self, $class; + return $self; +} -# -# transform_pattern(pattern) -# -# Transform shell wildcard expression to equivalent Perl regular expression. -# Return transformed pattern. -# +sub checkUpdate +{ + my ($self, $processedFiles) = @_; + + if ($self->[INTERVAL_LENGTH] && + $self->[INTERVAL_LENGTH] <= ($processedFiles - $self->[LAST_UPDATE])) { + my $filesLastInterval = $processedFiles - $self->[LAST_UPDATE]; + $self->[LAST_UPDATE] = $processedFiles; + my $now = Time::HiRes::gettimeofday(); + my $elapsed = $now - $self->[START_TIME]; + my $intervalTime = $now - $self->[INTERVAL_START]; + my $rate = ($processedFiles) / $elapsed; + my $average = $totalChildCpuTime / ($processedFiles); + my $interval = $intervalChildCpuTime / $filesLastInterval; + $intervalChildCpuTime = 0; + $self->[INTERVAL_START] = $now; + # compute average wall clock files/s and individual s/file (actual CPU) + # for overall processing of all files and for files during this + # interval. Might be useful to observe performance issues during + # execution. + my $total = $self->[TOTAL_FILES]; + lcovutil::info( + "elapsed:%0.1fm: remaining:%d files %0.1fm: %0.2f files/s %0.2f s/file (interval:%0.2f f/s %0.2f s/f)\n", + $elapsed / 60, + $total - $processedFiles, + ($total - $processedFiles) / ($rate * 60), + $rate, + $average, + $filesLastInterval / $intervalTime, + $interval); + # how many new source files and new coverpoints have been found + # in this interval? + if (defined($trace_data)) { + my @counts = $trace_data->count_totals(); + my $intervalData = $self->[INTERVAL_COUNTS]; + if ($counts[0] != + $intervalData->[FILE_COUNT] || # number source files + $counts[1]->[0] != + $intervalData->[LINE_COUNT] || # line coverpoints + $counts[2]->[0] != + $intervalData->[BRANCH_COUNT] || # branch coverpoints + $counts[3]->[0] != $intervalData->[FUNCTION_COUNT] + ) # function coverpoints + { + lcovutil::info(1, + " added files:%d ln:%d br:%d fn:%d\n", + $counts[0] - $intervalData->[FILE_COUNT], + $counts[1]->[0] - $intervalData->[LINE_COUNT], + $counts[2]->[0] - $intervalData->[BRANCH_COUNT], + $counts[3]->[0] - $intervalData->[FUNCTION_COUNT] + ); + $self->[INTERVAL_COUNTS] = [$counts[0], $counts[1]->[0], + $counts[2]->[0], $counts[3]->[0] + ]; + } + } + } +} + +package BuildWorkList; -sub transform_pattern($) +sub new { - my $pattern = $_[0]; - - # Escape special chars - - $pattern =~ s/\\/\\\\/g; - $pattern =~ s/\//\\\//g; - $pattern =~ s/\^/\\\^/g; - $pattern =~ s/\$/\\\$/g; - $pattern =~ s/\(/\\\(/g; - $pattern =~ s/\)/\\\)/g; - $pattern =~ s/\[/\\\[/g; - $pattern =~ s/\]/\\\]/g; - $pattern =~ s/\{/\\\{/g; - $pattern =~ s/\}/\\\}/g; - $pattern =~ s/\./\\\./g; - $pattern =~ s/\,/\\\,/g; - $pattern =~ s/\|/\\\|/g; - $pattern =~ s/\+/\\\+/g; - $pattern =~ s/\!/\\\!/g; - - # Transform ? => (.) and * => (.*) - - $pattern =~ s/\*/\(\.\*\)/g; - $pattern =~ s/\?/\(\.\)/g; - - return $pattern; + my $class = shift; + my $self = [[], {}, {}]; # [worklist, processedFiles, messages] + return bless $self, $class; } +sub worklist +{ + my $self = shift; + # we saved the messages for the end... + foreach my $msg (values %{$self->[2]}) { + lcovutil::ignorable_error($msg->[0], $msg->[1]); + } + return $self->[0]; +} -# -# get_common_prefix(min_dir, filenames) -# -# Return the longest path prefix shared by all filenames. MIN_DIR specifies -# the minimum number of directories that a filename may have after removing -# the prefix. -# +sub find_corresponding_gcno_file +{ + my ($gcda_file, $searchdir) = @_; + my ($name, $d, $e) = File::Basename::fileparse($gcda_file, qr/\.[^.]*/); + my $gcno_file = File::Spec->catfile($d, $name . ".gcno"); + foreach ($gcno_file) { + return $gcno_file if (-f $gcno_file || -l $gcno_file); + + my $alt = lcovutil::subst_file_name($gcno_file); + my $prefix = "looking for GCNO matching '$gcda_file':\n"; + if ($alt ne $gcno_file) { + $gcno_file = $alt; + lcovutil::info(1, "$prefix at '$gcno_file'\n"); + $prefix = ''; + return $gcno_file if (-f $gcno_file || -l $gcno_file); + } + # check to see if this file is in a different directory + my $dir = $d; + $dir =~ s#^$searchdir##g; + $prefix = "looking for GCNO matching '$gcda_file' prefix:'$dir':\n"; + # handle case that gcda and gcno are in different directories + # - say, where is the gcda that we found, then see if gcno is + # in the same directory. If not, then look in a gcno path and + # link both gcda and gcno into this tempdir, run gcov, then + # unlink + # from the directory where the gcda file is found: + # strip off the directory where we start the search + # then strip off the GCOV_PREFIX (if there is one) + # then append the remaining path the the GCDA file, to the + # 'GCNO_PATH' that we were provided. + # - if there is a file there: use it. + foreach my $d (@$buildDirSearchPath) { + # is the GCNO in the relative path from build_dir? + my $build_directory = $d->[0]; + $gcno_file = + File::Spec->catfile($build_directory, $dir, $name . ".gcno"); + + lcovutil::info(1, "$prefix at '$gcno_file'\n"); + $prefix = ''; + if (-f $gcno_file || -l $gcno_file) { + ++$d->[1]; + return $gcno_file; + } + + # is the GCNO in the relative path after we apply substitutions? + $alt = lcovutil::subst_file_name($gcno_file); + if ($alt ne $gcno_file) { + $gcno_file = $alt; + lcovutil::info(1, "$prefix at '$gcno_file'\n"); + $prefix = ''; + if (-f $gcno_file || -l $gcno_file) { + ++$d->[1]; + return $gcno_file; + } + } + } # foreach build_directory + + if (@lcovutil::resolveCallback) { + $gcno_file = File::Spec->catfile($d, $name . ".gcno"); + + $gcno_file = SearchPath::resolveCallback($gcno_file, 1); + return $gcno_file if (-f $gcno_file || -l $gcno_file); + } + } # foreach switch + + # skip the .gcda file if there is no .gcno + lcovutil::ignorable_error( + $lcovutil::ERROR_PATH, + (lcovutil::is_ignored($lcovutil::ERROR_PATH) ? 'skipping' : + 'cannot process') . + " .gcda file $gcda_file because corresponding .gcno file '$gcno_file' is missing" + . + ( + $lcovutil::verbose || + lcovutil::message_count($lcovutil::ERROR_PATH) == 0 ? + " (see the '--build-directory' entry in 'man geninfo' for suggestions)" + : + '') . + '.'); + + return undef; +} -sub get_common_prefix($@) +sub add_worklist_entry { - my ($min_dir, @files) = @_; - my $file; - my @prefix; - my $i; - - foreach $file (@files) { - my ($v, $d, $f) = splitpath($file); - my @comp = splitdir($d); - - if (!@prefix) { - @prefix = @comp; - next; - } - for ($i = 0; $i < scalar(@comp) && $i < scalar(@prefix); $i++) { - if ($comp[$i] ne $prefix[$i] || - ((scalar(@comp) - ($i + 1)) <= $min_dir)) { - delete(@prefix[$i..scalar(@prefix)]); - last; - } - } - } - - return catdir(@prefix); + my ($self, $filename, $directory) = @_; + if (exists($self->[1]->{$filename})) { + lcovutil::ignorable_error( + $lcovutil::ERROR_USAGE, + "duplicate file $filename in both " . + $self->[1]->{$filename} . " and $directory" + . + (lcovutil::is_ignored( + $lcovutil::ERROR_USAGE) ? + ' (skip latter)' : + '')); + return; + } + $self->[1]->{$filename} = $directory; + + my ($gcda_file, $gcno_file); + if ($filename !~ /$data_file_extension$/) { + $gcno_file = $filename; + } else { + $gcda_file = $filename; + $gcno_file = find_corresponding_gcno_file($filename, $directory); + return unless $gcno_file; # would have errored out + + my ($vol, $dir, $name) = File::Spec->splitpath($gcno_file); + # remove trailing slash + $self->[1]->{$gcno_file} = substr($dir, 0, -1); + } + push(@{$self->[0]}, [$directory, $gcda_file, $gcno_file]); } +sub find_files +{ + my $self = shift; + my $processGcno = shift; + my ($type, $ext); + + if ($processGcno) { + $type = "graph"; + $ext = $graph_file_extension; + } else { + $type = "data"; + $ext = $data_file_extension; + } + foreach my $directory (@_) { + unless (-e $directory) { + # hold error until the end of processing - we might be looking + # for both .gcno and .gcda files - and don't want to generate + # the same 'no such directory' message twice. Nor do we want + # to produce an error if only one type of file is in this + # directory + $self->[2]->{$directory} = [$lcovutil::ERROR_USAGE, + "no such file or directory '$directory'" + ]; + next; + } + if (-d $directory) { + lcovutil::info("Scanning $directory for $ext files ...\n"); + + my $now = Time::HiRes::gettimeofday(); + my $follow = $lcovutil::opt_follow ? '-follow' : ''; + my ($stdout, $stderr, $code) = Capture::Tiny::capture { + system( + "find '$directory' $maxdepth $follow -name \\*$ext -type f -o -name \\*$ext -type l" + ); + }; + lcovutil::ignorable_error($lcovutil::ERROR_UTILITY, + "error in 'find \"$directory\" ...': $stderr") + if ($code); + + my $time = Time::HiRes::gettimeofday() - $now; + if (exists($lcovutil::profileData{find}) && + exists($lcovutil::profileData{find}{$directory})) { + $lcovutil::profileData{find}{$directory} += $time; + } else { + $lcovutil::profileData{find}{$directory} = $time; + } + # split on crlf + my @found = split(/[\x0A\x0D]/, $stdout); + if (!@found) { + if (!defined($processGcno) || $processGcno != 2) { + # delay message: might be a file of other type here + $self->[2]->{$directory} = [ + $lcovutil::ERROR_EMPTY, + "no $ext files found in $directory" + ]; + } + + next; + } + # we found something here - remove the pending error message, if any + delete($self->[2]->{$directory}) + ; # if exists($self->[2]->{$directory}); + lcovutil::info("Found %d %s files in %s\n", + scalar(@found), $type, $directory); + # keep track of directory where we found the file + foreach my $entry (@found) { + # don't add gcno file again, if we already handled the .gcda + $self->add_worklist_entry($entry, $directory) + unless (defined($processGcno) && + $processGcno == 2 && + exists($self->[1]->{$entry})); + } + } elsif (!defined($processGcno) || $processGcno == 1) { + my ($name, $d, $e) = + File::Basename::fileparse($directory, qr/\.[^.]*/); + if ($e ne $ext && + (!$lcovutil::geninfo_captureAll || $e ne $graph_file_extension) + ) { + $self->[2]->{$directory} = [ + $lcovutil::ERROR_USAGE, + "$directory has unsupported extension: expected '$ext'" . + ($initial ? " for initial capture" : "") + ]; + next; + } + delete($self->[2]->{$directory}); + # use the directory where we find the file as the base dir + $self->add_worklist_entry($directory, $d); + } + } +} + +package main; + # -# gen_info(directory) +# gen_info(directory_list) # -# Traverse DIRECTORY and create a .info file for each data file found. +# Traverse each DIRECTORY in list and create a .info file for each data file found. # The .info file contains TEST_NAME in the following format: # # TN: # # For each source file name referenced in the data file, there is a section -# containing source code and coverage data: -# -# SF: -# FN:, for each function -# DA:, for each instrumented line -# LH: greater than 0 -# LF: -# -# Sections are separated by: -# -# end_of_record +# containing source code and coverage data. See geninfo(1) man page for +# more details. # # In addition to the main source code file there are sections for each # #included file containing executable code. Note that the absolute path @@ -810,68 +1052,530 @@ sub get_common_prefix($@) # Die on error. # -sub gen_info($) +my $chunkSize; + +sub _process_one_chunk($$$$) +{ + my ($chunk, $chunkId, $combined, $pid) = @_; + + my $start = Time::HiRes::gettimeofday(); + + my $idx = 0; + foreach my $data (@$chunk) { + my $now = Time::HiRes::gettimeofday(); + if (defined($pid) && + 0 != $pid) { + # if parent died, then time for me to go + lcovutil::check_parent_process(); + } + + my ($searchdir, $gcda_file, $gcno_file) = @$data; + + # "name" will be .gcno if "$initial" else will be $gcda + my $name = defined($gcda_file) ? $gcda_file : $gcno_file; + info(1, + "Processing $name%s\n", + defined($pid) ? " in child $pid" : "" . "\n"); + my $context = MessageContext->new("capturing from $name"); + + # multiple gcda files may refer to the same source - so generate the + # same 'source.gcda' output file - so they each need a different directory + # This is necessary to preserve intermediates, and if we are running + # in parallel; we don't want to overwrite and don't want multiple children to + # conflict. + my $tmp = File::Temp->newdir( + "geninfo_XXXXX", + DIR => $tempFileDir, + CLEANUP => !defined($lcovutil::preserve_intermediates) + ) if ($intermediate || defined($gcda_file)); + + # keep track of order - so we can estimate which files were processed + # at same time + $lcovutil::profileData{order}{$name} = + ($chunkId * $chunkSize) + $idx; + ++$idx; + + my $trace; + if ($intermediate) { + $trace = + process_intermediate($searchdir, $gcda_file, + $gcno_file, $tmp->dirname); + } elsif (!defined($gcda_file)) { + # just read the gcno file and set all the counters to zero + $trace = process_graphfile($searchdir, $gcno_file); + } else { + $trace = + process_dafile($searchdir, $gcda_file, + $gcno_file, $tmp->dirname); + } + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{parse}{$name} = $then - $now; + + if (defined($trace)) { + + if (!$single_file) { + # Create one .info file per input file + $trace->applyFilters(); + $trace->add_comments(@lcovutil::comments); + $trace->write_info_file(solve_relative_path( + $cwd, $name . ".info" + ), + $lcovutil::verify_checksum); + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{filter_file}{$name} = $then - $now; + $files_created++; + } else { + if (defined($combined)) { + $combined->merge_tracefile($trace, TraceInfo::UNION); + } else { + $combined = $trace; + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{append}{$name} = $end - $then; + } + } + my $done = Time::HiRes::gettimeofday(); + die("unexpected duplicate file '$name'") + if exists($lcovutil::profileData{file}{$name}); + $lcovutil::profileData{file}{$name} = $done - $now; + } + my $end = Time::HiRes::gettimeofday(); + + return $combined; +} + +sub _merge_one_child($$$) { - my $directory = $_[0]; - my @file_list; - my $file; - my $prefix; - my $type; - my $ext; - my $tempdir; - - if ($initial) { - $type = "graph"; - $ext = $graph_file_extension; - } else { - $type = "data"; - $ext = $data_file_extension; - } - - if (-d $directory) - { - info("Scanning $directory for $ext files ...\n"); - - @file_list = `find "$directory" $maxdepth $follow -name \\*$ext -type f -o -name \\*$ext -type l 2>/dev/null`; - chomp(@file_list); - if (!@file_list) { - warn("WARNING: no $ext files found in $directory - ". - "skipping!\n"); - return; - } - $prefix = get_common_prefix(1, @file_list); - info("Found %d %s files in %s\n", $#file_list+1, $type, - $directory); - } - else - { - @file_list = ($directory); - $prefix = ""; - } - - $tempdir = tempdir(CLEANUP => 1); - - # Process all files in list - foreach $file (@file_list) { - # Process file - if ($intermediate) { - process_intermediate($file, $prefix, $tempdir); - } elsif ($initial) { - process_graphfile($file, $prefix); - } else { - process_dafile($file, $prefix); - } - } - - unlink($tempdir); - - # Report whether files were excluded. - if (%excluded_files) { - info("Excluded data for %d files due to include/exclude options\n", - scalar keys %excluded_files); - } + my ($children, $tempFileExt, $worklist) = @_; + + my $child = wait(); + my $start = Time::HiRes::gettimeofday(); + my $childstatus = $?; + unless (exists($children->{$child})) { + lcovutil::report_unknown_child($child); + return 0; + } + + debug( + "_merge_one_child: $child (parent $$) status $childstatus from $tempFileDir\n" + ); + my ($chunk, $forkAt, $chunkId) = @{$children->{$child}}; + my $dumped = File::Spec->catfile($tempFileDir, "dumper_$child"); + my $childLog = File::Spec->catfile($tempFileDir, "geninfo_$child.log"); + my $childErr = File::Spec->catfile($tempFileDir, "geninfo_$child.err"); + + foreach my $f ($childLog, $childErr) { + if (!-f $f) { + # no data was printed.. + $f = ''; + next; + } + if (open(RESTORE, "<", $f)) { + # slurp into a string and eval.. + my $str = do { local $/; }; # slurp whole thing + close(RESTORE) or die("unable to close $f: $!\n"); + unlink $f; + $f = $str; + } else { + $f = "unable to open $f: $!"; + if (0 == $childstatus) { + report_parallel_error('geninfo', $ERROR_PARALLEL, $child, 0, + $f, keys(%$children)); + } + } + } + my $signal = $childstatus & 0xFF; + print(STDOUT $childLog) + if ((0 != $childstatus && + $signal != POSIX::SIGKILL && + $lcovutil::max_fork_fails != 0) || + $lcovutil::verbose); + print(STDERR $childErr); + # look for spaceout message in the gcov log + if (0 == $signal && + 0 != $childstatus && + 0 != $lcovutil::max_fork_fails && + lcovutil::is_ignored($lcovutil::ERROR_FORK) && + grep( + { /(std::bad_alloc|annot allocate memory|out of memory|integretity check failed for compressed file)/ + } ($childLog, $childErr)) + ) { + + # pretend it was killed so we retry + $signal = POSIX::SIGKILL; + } + my $data = Storable::retrieve($dumped) + if (-f $dumped && 0 == $childstatus); + # note that $data will not be defined (no data dumped) if there was + # no child data extracted (e.g., all files excluded) + if (defined($data)) { + eval { + my ($childInfo, $buildDirCounts, $counts, $updates) = @$data; + lcovutil::update_state(@$updates); + $files_created += $counts->[0]; + $processedFiles += $counts->[1]; + my $childFinish = $counts->[2]; + $buildDirSearchPath->update_count(@$buildDirCounts); + my $childCpuTime = $lcovutil::profileData{child}{$chunkId}; + $totalChildCpuTime += $childCpuTime; + $intervalChildCpuTime += $childCpuTime; + + my $now = Time::HiRes::gettimeofday(); + $lcovutil::profileData{undump}{$chunkId} = $now - $start; + if (defined($childInfo)) { + if (defined($trace_data)) { + $trace_data->merge_tracefile($childInfo, TraceInfo::UNION); + my $final = Time::HiRes::gettimeofday(); + $lcovutil::profileData{append}{$chunkId} = $final - $now; + } else { + $trace_data = $childInfo; + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{merge}{$chunkId} = $end - $now; + $lcovutil::profileData{queue}{$chunkId} = $start - $childFinish; + } + $intervalMonitor->checkUpdate($processedFiles); + }; + if ($@) { + $childstatus = 1 << 8 unless $childstatus; + print STDOUT $@; + report_parallel_error('geninfo', $ERROR_PARALLEL, $child, + $childstatus, "unable to deserialize $dumped: $@", + keys(%$children)); + } + } + if ($childstatus != 0) { + if (POSIX::SIGKILL == $signal) { + if (exists($childRetryCounts{$chunkId})) { + $childRetryCounts{$chunkId} += 1; + } else { + $childRetryCounts{$chunkId} = 1; + } + lcovutil::report_fork_failure( + "compute job $chunkId", + "killed by OS - possibly due to out-of-memory", + $childRetryCounts{$chunkId}); + push(@$worklist, $chunk); + } else { + report_parallel_error('geninfo', $ERROR_CHILD, $child, $childstatus, + "ignoring data in chunk $chunkId", + keys(%$children)); + } + } + foreach my $f ($dumped) { + unlink $f + if -f $f; + } + my $to = Time::HiRes::gettimeofday(); + $lcovutil::profileData{chunk}{$chunkId} = $to - $forkAt; + if (exists($lcovutil::profileData{process}{$chunkId}) && + exists($lcovutil::profileData{merge}{$chunkId})) { + $lcovutil::profileData{work}{$chunkId} = + $lcovutil::profileData{process}{$chunkId} + + $lcovutil::profileData{merge}{$chunkId}; + } + return 0 == $childstatus; } +sub gen_info(@) +{ + my $builder = BuildWorkList->new(); + + $builder->find_files($initial, @_); + if (!defined($initial) && + defined($lcovutil::geninfo_captureAll) && + $lcovutil::geninfo_captureAll) { + $builder->find_files(2, @_); + } + my $filelist = $builder->worklist(); + my @sorted_filelist; + if ($lcovutil::sort_inputs) { + @sorted_filelist = sort({ + my $na = $a->[0] . + (defined($a->[1]) ? $a->[1] : $a->[2]); + my $nb = $b->[0] . + (defined($b->[1]) ? $b->[1] : $b->[2]); + $na cmp $nb + } @$filelist); + $filelist = \@sorted_filelist; + } + my $total = scalar(@$filelist); + if (1 < $lcovutil::maxParallelism) { + my $floor = + $lcovutil::maxParallelism ? + (int($total / $lcovutil::maxParallelism)) : + 1; + if (defined($lcovutil::defaultChunkSize)) { + if ($lcovutil::defaultChunkSize =~ /^(\d+)\s*(%?)$/) { + if (defined($2) && $2) { + # a percentage + $chunkSize = int($total * $1 / 100); + } else { + # an absolute value + $chunkSize = $1; + } + } else { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "geninfo_chunk_size '$lcovutil::defaultChunkSize' is not recognized" + ); + } + } + # Need to balance time in child vs. time to merge child data - + # - if we have too many children, then they finish and wait in the + # queue to be merged. + # - if we have too few, then the merge time in child gets long + # build up in set to 80% of + $chunkSize = int(0.8 * $floor) unless defined($chunkSize); + + $chunkSize = 1 if $chunkSize < 2; + } else { + $chunkSize = 1; + } + my @worklist; + my $serialChunk = [1, []]; + my $chunk = [0, []]; # [isSerial, [fileList]] + FILE: foreach my $j (@$filelist) { + my ($dir, $gcda, $gcno) = @$j; + foreach my $f ($gcda, $gcno) { + next unless defined($f); # might not be a GCDA file + my $filename = $dir . $lcovutil::dirseparator . $f; + if (grep({ $filename =~ $_ } @main::large_files)) { + lcovutil::info(1, "large file: $filename\n"); + push(@{$serialChunk->[1]}, $j); + next FILE; + } + } + push(@{$chunk->[1]}, $j); + if (scalar(@{$chunk->[1]}) == $chunkSize) { + push(@worklist, $chunk); + $chunk = [0, []]; + } + } #foreach DATA_FILE + push(@worklist, $chunk) if @{$chunk->[1]}; + # serial chunk is at the top of the stack - so serial processing + # happens before we fork multiple processes + push(@worklist, $serialChunk) + if (@{$serialChunk->[1]}); + + # Process all files in list + my $currentParallel = 0; + my %children; + my $tempFileExt = ''; + $tempFileExt = ".gz" + if (defined $output_filename) && $output_filename =~ /\.gz$/; + + my $totalChunks = scalar(@worklist); + my $processedChunks = 0; + + # process at least 5% of files before printing stats + $lcovutil::defaultInterval = 5 unless defined($lcovutil::defaultInterval); + + my $intervalLength = int($total * $lcovutil::defaultInterval / 100); + my $start = Time::HiRes::gettimeofday(); + + $intervalMonitor = + IntervalMonitor->new($total, $totalChunks, $chunkSize, $intervalLength); + + lcovutil::info("using: chunkSize: %d, nchunks:%d, intervalLength:%d\n", + $chunkSize, $totalChunks, $intervalLength); + $lcovutil::profileData{chunkSize} = $chunkSize; + $lcovutil::profileData{nChunks} = $totalChunks; + $lcovutil::profileData{interval} = $intervalLength; + $lcovutil::profileData{nFiles} = $total; + $lcovutil::maxParallelism = 1 unless scalar(@worklist) > 1; + + my $failedAttempts = 0; + do { + CHUNK: while (@worklist) { + + my $chunk = pop(@worklist); + ++$processedChunks; + + if (1 < $lcovutil::maxParallelism && + 1 != $chunk->[0]) { + + my $currentSize = 0; + if (0 != $lcovutil::maxMemory) { + $currentSize = lcovutil::current_process_size(); + } + if ($currentParallel >= $lcovutil::maxParallelism || + ($currentParallel > 1 && + (($currentParallel + 1) * $currentSize) > + $lcovutil::maxMemory) + ) { + lcovutil::info(1, + "memory constraint ($currentParallel + 1) * $currentSize > $lcovutil::maxMemory violated: waiting. " + . ($total + 1) + . " remaining\n") + if ((($currentParallel + 1) * $currentSize) > + $lcovutil::maxMemory); + + $currentParallel -= + _merge_one_child(\%children, $tempFileExt, \@worklist); + # put the job back in the list + --$processedChunks; + push(@worklist, $chunk); + next CHUNK; + } + + $lcovutil::deferWarnings = 1; + my $now = Time::HiRes::gettimeofday(); + my $pid = fork(); + if (!defined($pid)) { + # fork failed + ++$failedAttempts; + lcovutil::report_fork_failure("process chunk", + $!, $failedAttempts); + --$processedChunks; + push(@worklist, $chunk); + next CHUNK; + } + $failedAttempts = 0; + if (0 == $pid) { + # I'm the child... + # set my output file to temp location so my dump won't + # collide with another child - then merge at the end... + # would be better if the various gcov data readers would + # build a datastructure that we could dump - rather than + # printing a .info file that we have to parse....but so + # be it. + my $childStart = Time::HiRes::gettimeofday(); + my $currentState = lcovutil::initial_state(); + $buildDirSearchPath->reset(); + $output_filename = + File::Spec->catfile($tempFileDir, + "geninfo_$$.info" . $tempFileExt); + my $childInfo; + # set count to zero so we know how many got created in + # the child process + $files_created = 0; + my $now = Time::HiRes::gettimeofday(); + # using 'capture' here so that we can both capture/redirect geninfo + # messages from a child process during parallel execution AND + # redirect stdout/stderr from gcov calls. + # It does not work to directly open/reopen the STDOUT and STDERR + # descriptors due to interactions between the child and parent + # processes (see the Capture::Tiny doc for some details) + my $status = 0; + my ($stdout, $stderr, $code) = Capture::Tiny::capture { + eval { + $childInfo = + _process_one_chunk($chunk->[1], + $processedChunks, $childInfo, $$); + }; + if ($@) { + $status = 1; # error + print(STDERR $@); # capture messages in $stderr + } + }; + # parent might have already caught an error, cleaned up and + # removed the tempdir and exited. + lcovutil::check_parent_process(); + + # print stdout and stderr ... + foreach my $d (['kog', $stdout], ['err', $stderr]) { + my ($ext, $str) = @$d; + # only print if there is something to print + next + unless ($str); + my $tmpf = + File::Spec->catfile($tempFileDir, + "geninfo_$$.$ext"); + my $f = InOutFile->out($tmpf); + my $h = $f->hdl(); + print($h $str); + } + my $dumpf = File::Spec->catfile($tempFileDir, "dumper_$$"); + foreach my $f ($output_filename) { + unlink $f + if -f $f && !$lcovutil::preserve_intermediates; + } + my $buildDirCounts = $buildDirSearchPath->current_count(); + + my $then = Time::HiRes::gettimeofday(); + # keep separate timestamp for when this child block was entered + # vs when fork() was called - lest this job waited in queue for + # a while + $lcovutil::profileData{process}{$processedChunks} = + $then - $now; + + $lcovutil::profileData{child}{$processedChunks} = + $then - $childStart; + # dump parsed data - then read back and merge + my $data; + eval { + # NOTE: not storing anything if we extracted nothing/ + # there is no childInfo data + $data = + Storable::store( + [$single_file ? $childInfo : undef, + $buildDirCounts, + [$files_created, scalar(@{$chunk->[1]}), + $then + ], + lcovutil::compute_update($currentState) + ], + $dumpf) if defined($childInfo); + }; + if ($@ || (defined($childInfo) && !defined($data))) { + lcovutil::ignorable_error($lcovutil::ERROR_PARALLEL, + "Child $$ serialize failed" . ($@ ? ": $@" : '')); + } + exit($status); + } else { + # I'm the parent + $children{$pid} = [$chunk, $now, $processedChunks]; + ++$currentParallel; + } + } else { + # not parallel.. + my $saveParallel = $lcovutil::maxParallelism; + $lcovutil::maxParallelism = 1; + if ($chunk->[0]) { + my $num = scalar(@{$chunk->[1]}); + lcovutil::info( + "Processing $num file" . ($num == 1 ? '' : 's') . + " from chunk 0 serially\n"); + } + my $now = Time::HiRes::gettimeofday(); + $trace_data = + _process_one_chunk($chunk->[1], $processedChunks, + $trace_data, undef); + $processedFiles += scalar(@{$chunk->[1]}); + if ($chunk->[0]) { + lcovutil::info("Finished processing chunk 0\n"); + } + my $then = Time::HiRes::gettimeofday(); + $lcovutil::maxParallelism = $saveParallel; + $lcovutil::profileData{process}{$processedChunks} = + $then - $now; + } + } # end foreach + + while ($currentParallel != 0) { + $currentParallel -= + _merge_one_child(\%children, $tempFileExt, \@worklist); + } + # wrap in outer loop in case we get a spaceout/child failure + # during 'tail' processing. + } while (@worklist); + + info("Finished processing %d " + . + ($initial ? 'GCNO' : + ($lcovutil::geninfo_captureAll ? 'GCDA/GCNO' : 'GCDA')) . + " file%s\n", + $processedFiles, + 1 == $processedFiles ? '' : 's'); + # Report whether files were excluded. + if (%lcovutil::excluded_files) { + my $count = scalar keys %lcovutil::excluded_files; + + info("Excluded data for %d file%s due to include/exclude options\n", + $count, 1 == $count ? '' : 's'); + } + return $processedFiles; +} # # derive_data(contentdata, funcdata, bbdata) @@ -894,606 +1598,482 @@ sub gen_info($) sub derive_data($$$) { - my ($contentdata, $funcdata, $bbdata) = @_; - my @gcov_content = @{$contentdata}; - my @gcov_functions = @{$funcdata}; - my %fn_count; - my %ln_fn; - my $line; - my $maxline; - my %fn_name; - my $fn; - my $count; - - if (!defined($bbdata)) { - return @gcov_functions; - } - - # First add existing function data - while (@gcov_functions) { - $count = shift(@gcov_functions); - $fn = shift(@gcov_functions); - - $fn_count{$fn} = $count; - } - - # Convert line coverage data to function data - foreach $fn (keys(%{$bbdata})) { - my $line_data = $bbdata->{$fn}; - my $line; - my $fninstr = 0; - - if ($fn eq "") { - next; - } - # Find the lowest line count for this function - $count = 0; - foreach $line (@$line_data) { - my $linstr = $gcov_content[ ( $line - 1 ) * 3 + 0 ]; - my $lcount = $gcov_content[ ( $line - 1 ) * 3 + 1 ]; - - next if (!$linstr); - $fninstr = 1; - if (($lcount > 0) && - (($count == 0) || ($lcount < $count))) { - $count = $lcount; - } - } - next if (!$fninstr); - $fn_count{$fn} = $count; - } - - - # Check if we got data for all functions - foreach $fn (keys(%fn_name)) { - if ($fn eq "") { - next; - } - if (defined($fn_count{$fn})) { - next; - } - warn("WARNING: no derived data found for function $fn\n"); - } - - # Convert hash to list in @gcov_functions format - foreach $fn (sort(keys(%fn_count))) { - push(@gcov_functions, $fn_count{$fn}, $fn); - } - - return @gcov_functions; + my ($contentdata, $funcdata, $bbdata) = @_; + my @gcov_content = @{$contentdata}; + my @gcov_functions = @{$funcdata}; + my %fn_count; + + if (!defined($bbdata)) { + return @gcov_functions; + } + + # First add existing function data + while (@gcov_functions) { + my $count = shift(@gcov_functions); + my $fn = shift(@gcov_functions); + + $fn_count{$fn} = $count; + } + + # Convert line coverage data to function data + foreach my $fn (keys(%{$bbdata})) { + my $line_data = $bbdata->{$fn}; + my $line; + my $fninstr = 0; + + if ($fn eq "") { + next; + } + # Find the lowest line count for this function + my $count = 0; + foreach my $line (@$line_data) { + my $linstr = $gcov_content[($line - 1) * 3 + 0]; + my $lcount = $gcov_content[($line - 1) * 3 + 1]; + + next if (!$linstr); + $fninstr = 1; + if (($lcount > 0) && + (($count == 0) || ($lcount < $count))) { + $count = $lcount; + } + } + next if (!$fninstr); + $fn_count{$fn} = $count; + } + + # Convert hash to list in @gcov_functions format + foreach my $fn (sort(keys(%fn_count))) { + push(@gcov_functions, $fn_count{$fn}, $fn); + } + + return @gcov_functions; } # -# get_filenames(directory, pattern) +# process_dafile(dirname, da_filename, gcno_filename, tempdir) # -# Return a list of filenames found in directory which match the specified -# pattern. +# Create a .info file for a single data file. # # Die on error. # -sub get_filenames($$) +sub process_dafile($$$$) { - my ($dirname, $pattern) = @_; - my @result; - my $directory; - local *DIR; - - opendir(DIR, $dirname) or - die("ERROR: cannot read directory $dirname\n"); - while ($directory = readdir(DIR)) { - push(@result, $directory) if ($directory =~ /$pattern/); - } - closedir(DIR); - - return @result; + my ($dirname, $gcda_file, $gcno_file, $tempdir) = @_; + my $da_filename; # Name of data file to process + my $da_dir; # Directory of data file + my $source_dir; # Directory of source file + my $da_basename; # data filename without ".da/.gcda" extension + my $bb_filename; # Name of respective graph file + my $bb_basename; # Basename of the original graph file + my $graph; # Contents of graph file + my $instr; # Contents of graph file part 2 + my $object_dir; # Directory containing all object files + my $source_filename; # Name of a source code file + my $gcov_file; # Name of a .gcov file + my @gcov_content; # Content of a .gcov file + my $gcov_branches; # Branch content of a .gcov file + my @gcov_functions; # Function calls of a .gcov file + my $line_number; # Line number count + my @matches; # List of absolute paths matching filename + my $base_dir; # Base directory for current file + + # Get path to data file in absolute and normalized form (begins with /, + # contains no more ../ or ./) + $da_filename = solve_relative_path($cwd, $gcda_file); + my $gcno_filename = solve_relative_path($cwd, $gcno_file); + + # Get directory and basename of data file + ($da_dir, $da_basename) = split_filename($da_filename); + + $source_dir = $da_dir; + if (is_compat($COMPAT_MODE_LIBTOOL)) { + # Avoid files from .libs dirs + $source_dir =~ s/\.libs$//; + } + + # Construct base_dir for current file + if ($base_directory) { + $base_dir = $base_directory; + } else { + $base_dir = $source_dir; + } + + # Construct name of graph file + $bb_filename = solve_relative_path($cwd, $gcno_file); + + # Find out the real location of graph file in case we're just looking at + # a link + while (readlink($bb_filename)) { + my $last_dir = dirname($bb_filename); + + $bb_filename = readlink($bb_filename); + $bb_filename = solve_relative_path($last_dir, $bb_filename); + } + + # Ignore empty graph file (e.g. source file with no statement) + if (-z $bb_filename) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, "empty $bb_filename"); + chdir($cwd) or die("can't cd back to $cwd: $!"); + return undef; + } + + # Read contents of graph file into hash. We need it later to find out + # the absolute path to each .gcov file created as well as for + # information about functions and their source code positions. + ($instr, $graph) = read_gcno($bb_filename); + + # Try to find base directory automatically if requested by user + if ($lcovutil::rc_auto_base) { + $base_dir = find_base_from_source($base_dir, + [keys(%{$instr}), keys(%{$graph})]); + } + + adjust_source_filenames($instr, $base_dir); + adjust_source_filenames($graph, $base_dir); + + # Set $object_dir to real location of object files. This may differ + # from $da_dir if the graph file is just a link to the "real" object + # file location. + $object_dir = dirname($bb_filename); + my $da_arg = File::Spec->catfile($base_dir, $da_filename); + # Is the data file in a different directory? (this happens e.g. with + # the gcov-kernel patch). + if ($object_dir ne $da_dir) { + # Use links in tempdir + $da_arg = File::Basename::basename($da_filename); + my $gcda = File::Spec->catfile($tempdir, $da_arg); + symlink($da_filename, $gcda) or + die("cannot create link $gcda: $!\n"); + my $gcno = File::Spec->catfile($tempdir, + File::Basename::basename($bb_filename)); + symlink($bb_filename, $gcno) or + die("cannot create link $gcno: $!\n"); + $object_dir = '.'; + } + + chdir($tempdir) or die("can't cd to $tempdir: $!"); + # Execute gcov command and suppress standard output + # also redirect stderr to /dev/null if 'quiet' + # HGC: what we really want to do is to redirect stdout/stderr + # unless verbose - but echo them for non-zero exit status. + my $now = Time::HiRes::gettimeofday(); + debug("call gcov: " . join(' ', @gcov_tool) . " $da_arg -o $object_dir\n"); + lcovutil::info(2, + "process $da_arg (for $base_dir/$da_filename in $tempdir\n"); + my ($out, $err, $code) = + system_no_output(1 + 2 + 4, @gcov_tool, $da_arg, "-o", $object_dir); + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{exec}{$dirname}{$gcda_file} = $then - $now; + + if (0 != $code) { + check_gcov_fail($err, $da_filename); + } + print_gcov_warnings('stdout', $out, 0, {}) + if ('' ne $out && + (0 != $code || + $lcovutil::verbose > 1)); + print_gcov_warnings('stderr', $err, 0, {}) + if ('' ne $err && + (0 != $code || + $lcovutil::verbose)); + + # Change back to initial directory + debug(2, "chdir back to $cwd\n"); + chdir($cwd) or die("can't cd back to $cwd: $!"); + # Collect data from resulting .gcov files and create .info file + # this version of gcov wrote the files to "." - but we want to + # save them in tempdir + my @gcov_list; + foreach my $f (glob("$tempdir/*.gcov $tempdir/.*.gcov")) { + # Skip gcov file for gcc built-in code + push(@gcov_list, $f) unless ($f eq ".gcov"); + } + + # Check for files + if (!@gcov_list) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "gcov did not create any files for " . "$da_filename!\n"); + } + + if ($code) { + ignorable_error($ERROR_UTILITY, + "GCOV command failed for $da_filename!"); + return undef; + } + + my $traceFile = TraceFile->new(); + # Traverse the list of generated .gcov files and combine them into a + # single .info file + foreach $gcov_file (@gcov_list) { + GCOV_FILE_LOOP: { + next unless -f $gcov_file; # skp if we didn't copy it over + + my ($source, $object) = read_gcov_header($gcov_file); + if (!defined($source)) { + # Derive source file name from gcov file name if + # header format could not be parsed + $source = $gcov_file; + $source =~ s/\.gcov$//; + } + + $source = solve_relative_path($base_dir, $source); + + # apply more patterns here + $source = ReadCurrentSource::resolve_path($source, 1); + + @matches = match_filename($source, keys(%{$instr})); + + # Skip files that are not mentioned in the graph file + if (!@matches) { + lcovutil::ignorable_error($lcovutil::ERROR_MISMATCH, + "cannot find an entry for " . + $gcov_file . " in $graph_file_extension file"); + unlink($gcov_file) unless $lcovutil::preserve_intermediates; + next; + } + + # Read in contents of gcov file + my @result = read_gcov_file($gcov_file, $da_filename, $source); + if (!defined($result[0])) { + lcovutil::ignorable_error($lcovutil::ERROR_CORRUPT, + "$gcov_file is unreadable"); + unlink($gcov_file) unless $lcovutil::preserve_intermediates; + next; + } + @gcov_content = @{$result[0]}; + my $branchData = $result[1]; + @gcov_functions = @{$result[2]}; + + # Skip empty files + if (!@gcov_content) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "$gcov_file is empty"); + unlink($gcov_file) unless $lcovutil::preserve_intermediates; + next; + } + + if (scalar(@matches) == 1) { + # Just one match + $source_filename = $matches[0]; + } else { + # Try to solve the ambiguity + $source_filename = solve_ambiguous_match($gcov_file, \@matches, + \@gcov_content); + } + $source_filename = + ReadCurrentSource::resolve_path($source_filename, 1); + + if (TraceFile::skipCurrentFile($source_filename)) { + $lcovutil::excluded_files{$source_filename} = 1; + unlink($gcov_file) unless $lcovutil::preserve_intermediates; + next GCOV_FILE_LOOP; + } + + # Skip external files if requested + if (is_external($source_filename)) { + info(" ignoring data for external file $source_filename\n"); + unlink($gcov_file) unless $lcovutil::preserve_intermediates; + next; + } + + my $fileData = $traceFile->data($source_filename); + my $functionMap = $fileData->testfnc($test_name); + my $branchMap = $fileData->testbr($test_name); + my $lineMap = $fileData->test($test_name); + + if (@lcovutil::extractVersionScript) { + my $version = lcovutil::extractFileVersion($source_filename); + $fileData->version($version) + if (defined($version) && $version ne ""); + } + + # If requested, derive function coverage data from + # line coverage data of the first line of a function + if ($opt_derive_func_data) { + @gcov_functions = + derive_data(\@gcov_content, \@gcov_functions, + $graph->{$source_filename}); + } + + # Hold function-related information + my %functionData; + if (defined($graph->{$source_filename})) { + my $fn_data = $graph->{$source_filename}; + + while (my ($fn, $ln_data) = each(%$fn_data)) { + next if ($fn eq ""); + my $line = $ln_data->[0]; + + # Normalize function name - need to demangle here because + # the graph data came from reading the gcno file and isn't + # demangled already + $fn = filter_fn_name($fn, + defined($lcovutil::demangle_cpp_cmd)); + $functionData{$fn} = + $functionMap->define_function($fn, $line); + } + } + + while (@gcov_functions) { + my $count = shift(@gcov_functions); + my $fn = shift(@gcov_functions); + + # don't need to demangle here because this came from reading + # the gcov result - and we demangled that. + $fn = filter_fn_name($fn, 0); + next unless exists($functionData{$fn}); + $functionData{$fn}->addAlias($fn, $count); + } + + # Coverage information for each instrumented branch: + foreach my $line ($branchData->keylist()) { + + my $branchEntry = $branchData->value($line); + + # gcov extraction block numbers can be strange - so + # just renumber them. + my $blockRenumber = 0; + foreach my $blockId ($branchEntry->blocks()) { + my $blockData = $branchEntry->getBlock($blockId); + foreach my $br (@$blockData) { + $branchMap->append($line, $blockRenumber, + $br, $source_filename); + } + ++$blockRenumber; + } + } # end for each branch + + # Reset line counters + $line_number = 0; + + # Write coverage information for each instrumented line + # Note: @gcov_content contains a list of (flag, count, source) + # tuple for each source code line + while (@gcov_content) { + + $line_number++; + + # Check for instrumented line + if ($gcov_content[0]) { + my $hit = $gcov_content[1]; + # do we have branch data on this line? + # if no branch data and this line was marked as + # 'unexecuted' - then set its hit count to zero + if ('ARRAY' eq ref($hit)) { + die("unexpected 'unexec' count") unless $hit->[1] == 1; + $hit = $hit->[0]; + if ($hit != 0 && + !defined($branchMap->value($line_number))) { + lcovutil::debug( + "$source_filename:$line_number: unexecuted block on non-branch line with count=$hit\n" + ); + if ($lcovutil::opt_adjust_unexecuted_blocks) { + $hit = 0; + } elsif (lcovutil::warn_once( + "unexecuted block", + $ERROR_INCONSISTENT_DATA + )) { + lcovutil::ignorable_warning( + $ERROR_INCONSISTENT_DATA, + "$source_filename:$line_number: unexecuted block on non-branch line with non-zero hit count. Use \"geninfo --rc geninfo_unexecuted_blocks=1 to set count to zero." + ); + } + } + } + $lineMap->append($line_number, $hit); + } + # Remove already processed data from array + splice(@gcov_content, 0, 3); + } + # now go through lines, functions, branches - append to test_name data + $fileData->sum()->union($lineMap); + $fileData->sumbr()->union($branchMap); + $fileData->func()->union($functionMap); + + # Remove .gcov file after processing + unlink($gcov_file) unless $lcovutil::preserve_intermediates; + } + } + + return $traceFile; } # -# process_dafile(da_filename, dir) +# compute_internal_directories(list) # -# Create a .info file for a single data file. -# -# Die on error. +# Walk directory tree to find set of 'internal' directories +# - soft links within the 'internal' tree which point to directories outside +# the tree cause those target directories to be considered 'internal' if +# the "--follow" flag is specified - else, 'external' # -sub process_dafile($$) +sub compute_internal_directories(@) { - my ($file, $dir) = @_; - my $da_filename; # Name of data file to process - my $da_dir; # Directory of data file - my $source_dir; # Directory of source file - my $da_basename; # data filename without ".da/.gcda" extension - my $bb_filename; # Name of respective graph file - my $bb_basename; # Basename of the original graph file - my $graph; # Contents of graph file - my $instr; # Contents of graph file part 2 - my $gcov_error; # Error code of gcov tool - my $object_dir; # Directory containing all object files - my $source_filename; # Name of a source code file - my $gcov_file; # Name of a .gcov file - my @gcov_content; # Content of a .gcov file - my $gcov_branches; # Branch content of a .gcov file - my @gcov_functions; # Function calls of a .gcov file - my @gcov_list; # List of generated .gcov files - my $line_number; # Line number count - my $lines_hit; # Number of instrumented lines hit - my $lines_found; # Number of instrumented lines found - my $funcs_hit; # Number of instrumented functions hit - my $funcs_found; # Number of instrumented functions found - my $br_hit; - my $br_found; - my $source; # gcov source header information - my $object; # gcov object header information - my @matches; # List of absolute paths matching filename - my $base_dir; # Base directory for current file - my @tmp_links; # Temporary links to be cleaned up - my @result; - my $index; - my $da_renamed; # If data file is to be renamed - local *INFO_HANDLE; - - info("Processing %s\n", abs2rel($file, $dir)); - # Get path to data file in absolute and normalized form (begins with /, - # contains no more ../ or ./) - $da_filename = solve_relative_path($cwd, $file); - - # Get directory and basename of data file - ($da_dir, $da_basename) = split_filename($da_filename); - - $source_dir = $da_dir; - if (is_compat($COMPAT_MODE_LIBTOOL)) { - # Avoid files from .libs dirs - $source_dir =~ s/\.libs$//; - } - - if (-z $da_filename) - { - $da_renamed = 1; - } - else - { - $da_renamed = 0; - } - - # Construct base_dir for current file - if ($base_directory) - { - $base_dir = $base_directory; - } - else - { - $base_dir = $source_dir; - } - - # Check for writable $base_dir (gcov will try to write files there) - stat($base_dir); - if (!-w _) - { - die("ERROR: cannot write to directory $base_dir!\n"); - } - - # Construct name of graph file - $bb_basename = $da_basename.$graph_file_extension; - $bb_filename = "$da_dir/$bb_basename"; - - # Find out the real location of graph file in case we're just looking at - # a link - while (readlink($bb_filename)) - { - my $last_dir = dirname($bb_filename); - - $bb_filename = readlink($bb_filename); - $bb_filename = solve_relative_path($last_dir, $bb_filename); - } - - # Ignore empty graph file (e.g. source file with no statement) - if (-z $bb_filename) - { - warn("WARNING: empty $bb_filename (skipped)\n"); - return; - } - - # Read contents of graph file into hash. We need it later to find out - # the absolute path to each .gcov file created as well as for - # information about functions and their source code positions. - if ($gcov_version < $GCOV_VERSION_3_4_0) - { - if (is_compat($COMPAT_MODE_HAMMER)) - { - ($instr, $graph) = read_bbg($bb_filename); - } - else - { - ($instr, $graph) = read_bb($bb_filename); - } - } - else - { - ($instr, $graph) = read_gcno($bb_filename); - } - - # Try to find base directory automatically if requested by user - if ($rc_auto_base) { - $base_dir = find_base_from_source($base_dir, - [ keys(%{$instr}), keys(%{$graph}) ]); - } - - adjust_source_filenames($instr, $base_dir); - adjust_source_filenames($graph, $base_dir); - - # Set $object_dir to real location of object files. This may differ - # from $da_dir if the graph file is just a link to the "real" object - # file location. - $object_dir = dirname($bb_filename); - - # Is the data file in a different directory? (this happens e.g. with - # the gcov-kernel patch) - if ($object_dir ne $da_dir) - { - # Need to create link to data file in $object_dir - system("ln", "-s", $da_filename, - "$object_dir/$da_basename$data_file_extension") - and die ("ERROR: cannot create link $object_dir/". - "$da_basename$data_file_extension!\n"); - push(@tmp_links, - "$object_dir/$da_basename$data_file_extension"); - # Need to create link to graph file if basename of link - # and file are different (CONFIG_MODVERSION compat) - if ((basename($bb_filename) ne $bb_basename) && - (! -e "$object_dir/$bb_basename")) { - symlink($bb_filename, "$object_dir/$bb_basename") or - warn("WARNING: cannot create link ". - "$object_dir/$bb_basename\n"); - push(@tmp_links, "$object_dir/$bb_basename"); - } - } - - # Change to directory containing data files and apply GCOV - debug("chdir($base_dir)\n"); - chdir($base_dir); - - if ($da_renamed) - { - # Need to rename empty data file to workaround - # gcov <= 3.2.x bug (Abort) - system_no_output(3, "mv", "$da_filename", "$da_filename.ori") - and die ("ERROR: cannot rename $da_filename\n"); - } - - # Execute gcov command and suppress standard output - $gcov_error = system_no_output(1, $gcov_tool, $da_filename, - "-o", $object_dir, @gcov_options); - - if ($da_renamed) - { - system_no_output(3, "mv", "$da_filename.ori", "$da_filename") - and die ("ERROR: cannot rename $da_filename.ori"); - } - - # Clean up temporary links - foreach (@tmp_links) { - unlink($_); - } - - if ($gcov_error) - { - if ($ignore[$ERROR_GCOV]) - { - warn("WARNING: GCOV failed for $da_filename!\n"); - return; - } - die("ERROR: GCOV failed for $da_filename!\n"); - } - - # Collect data from resulting .gcov files and create .info file - @gcov_list = get_filenames('.', '\.gcov$'); - - # Check for files - if (!@gcov_list) - { - warn("WARNING: gcov did not create any files for ". - "$da_filename!\n"); - } - - # Check whether we're writing to a single file - if ($output_filename) - { - if ($output_filename eq "-") - { - *INFO_HANDLE = *STDOUT; - } - else - { - # Append to output file - open(INFO_HANDLE, ">>", $output_filename) - or die("ERROR: cannot write to ". - "$output_filename!\n"); - } - } - else - { - # Open .info file for output - open(INFO_HANDLE, ">", "$da_filename.info") - or die("ERROR: cannot create $da_filename.info!\n"); - } - - # Write test name - printf(INFO_HANDLE "TN:%s\n", $test_name); - - # Traverse the list of generated .gcov files and combine them into a - # single .info file - foreach $gcov_file (sort(@gcov_list)) - { - my $i; - my $num; - - # Skip gcov file for gcc built-in code - next if ($gcov_file eq ".gcov"); - - ($source, $object) = read_gcov_header($gcov_file); - - if (!defined($source)) { - # Derive source file name from gcov file name if - # header format could not be parsed - $source = $gcov_file; - $source =~ s/\.gcov$//; - } - - $source = solve_relative_path($base_dir, $source); - - if (defined($adjust_src_pattern)) { - # Apply transformation as specified by user - $source =~ s/$adjust_src_pattern/$adjust_src_replace/g; - } - - # gcov will happily create output even if there's no source code - # available - this interferes with checksum creation so we need - # to pull the emergency brake here. - if (! -r $source && $checksum) - { - if ($ignore[$ERROR_SOURCE]) - { - warn("WARNING: could not read source file ". - "$source\n"); - next; - } - die("ERROR: could not read source file $source\n"); - } - - @matches = match_filename($source, keys(%{$instr})); - - # Skip files that are not mentioned in the graph file - if (!@matches) - { - warn("WARNING: cannot find an entry for ".$gcov_file. - " in $graph_file_extension file, skipping ". - "file!\n"); - unlink($gcov_file); - next; - } - - # Read in contents of gcov file - @result = read_gcov_file($gcov_file); - if (!defined($result[0])) { - warn("WARNING: skipping unreadable file ". - $gcov_file."\n"); - unlink($gcov_file); - next; - } - @gcov_content = @{$result[0]}; - $gcov_branches = $result[1]; - @gcov_functions = @{$result[2]}; - - # Skip empty files - if (!@gcov_content) - { - warn("WARNING: skipping empty file ".$gcov_file."\n"); - unlink($gcov_file); - next; - } - - if (scalar(@matches) == 1) - { - # Just one match - $source_filename = $matches[0]; - } - else - { - # Try to solve the ambiguity - $source_filename = solve_ambiguous_match($gcov_file, - \@matches, \@gcov_content); - } - - if (@include_patterns) - { - my $keep = 0; - - foreach my $pattern (@include_patterns) - { - $keep ||= ($source_filename =~ (/^$pattern$/)); - } - - if (!$keep) - { - $excluded_files{$source_filename} = (); - unlink($gcov_file); - next; - } - } - - if (@exclude_patterns) - { - my $exclude = 0; - - foreach my $pattern (@exclude_patterns) - { - $exclude ||= ($source_filename =~ (/^$pattern$/)); - } - - if ($exclude) - { - $excluded_files{$source_filename} = (); - unlink($gcov_file); - next; - } - } - - # Skip external files if requested - if (!$opt_external) { - if (is_external($source_filename)) { - info(" ignoring data for external file ". - "$source_filename\n"); - unlink($gcov_file); - next; - } - } - - # Write absolute path of source file - printf(INFO_HANDLE "SF:%s\n", $source_filename); - - # If requested, derive function coverage data from - # line coverage data of the first line of a function - if ($opt_derive_func_data) { - @gcov_functions = - derive_data(\@gcov_content, \@gcov_functions, - $graph->{$source_filename}); - } - - # Write function-related information - if (defined($graph->{$source_filename})) - { - my $fn_data = $graph->{$source_filename}; - my $fn; - - foreach $fn (sort - {$fn_data->{$a}->[0] <=> $fn_data->{$b}->[0]} - keys(%{$fn_data})) { - my $ln_data = $fn_data->{$fn}; - my $line = $ln_data->[0]; - - # Skip empty function - if ($fn eq "") { - next; - } - # Remove excluded functions - if (!$no_markers) { - my $gfn; - my $found = 0; - - foreach $gfn (@gcov_functions) { - if ($gfn eq $fn) { - $found = 1; - last; - } - } - if (!$found) { - next; - } - } - - # Normalize function name - $fn = filter_fn_name($fn); - - print(INFO_HANDLE "FN:$line,$fn\n"); - } - } - - #-- - #-- FNDA: , - #-- FNF: overall count of functions - #-- FNH: overall count of functions with non-zero call count - #-- - $funcs_found = 0; - $funcs_hit = 0; - while (@gcov_functions) - { - my $count = shift(@gcov_functions); - my $fn = shift(@gcov_functions); - - $fn = filter_fn_name($fn); - printf(INFO_HANDLE "FNDA:$count,$fn\n"); - $funcs_found++; - $funcs_hit++ if ($count > 0); - } - if ($funcs_found > 0) { - printf(INFO_HANDLE "FNF:%s\n", $funcs_found); - printf(INFO_HANDLE "FNH:%s\n", $funcs_hit); - } - - # Write coverage information for each instrumented branch: - # - # BRDA:,,, - # - # where 'taken' is the number of times the branch was taken - # or '-' if the block to which the branch belongs was never - # executed - $br_found = 0; - $br_hit = 0; - $num = br_gvec_len($gcov_branches); - for ($i = 0; $i < $num; $i++) { - my ($line, $block, $branch, $taken) = - br_gvec_get($gcov_branches, $i); - - $block = $BR_VEC_MAX if ($block < 0); - print(INFO_HANDLE "BRDA:$line,$block,$branch,$taken\n"); - $br_found++; - $br_hit++ if ($taken ne '-' && $taken > 0); - } - if ($br_found > 0) { - printf(INFO_HANDLE "BRF:%s\n", $br_found); - printf(INFO_HANDLE "BRH:%s\n", $br_hit); - } - - # Reset line counters - $line_number = 0; - $lines_found = 0; - $lines_hit = 0; - - # Write coverage information for each instrumented line - # Note: @gcov_content contains a list of (flag, count, source) - # tuple for each source code line - while (@gcov_content) - { - $line_number++; - - # Check for instrumented line - if ($gcov_content[0]) - { - $lines_found++; - printf(INFO_HANDLE "DA:".$line_number.",". - $gcov_content[1].($checksum ? - ",". md5_base64($gcov_content[2]) : ""). - "\n"); - - # Increase $lines_hit in case of an execution - # count>0 - if ($gcov_content[1] > 0) { $lines_hit++; } - } - - # Remove already processed data from array - splice(@gcov_content,0,3); - } - - # Write line statistics and section separator - printf(INFO_HANDLE "LF:%s\n", $lines_found); - printf(INFO_HANDLE "LH:%s\n", $lines_hit); - print(INFO_HANDLE "end_of_record\n"); - - # Remove .gcov file after processing - unlink($gcov_file); - } - - if (!($output_filename && ($output_filename eq "-"))) - { - close(INFO_HANDLE); - } - - # Change back to initial directory - chdir($cwd); + my @dirstack; + foreach my $entry (@_) { + next unless (defined($entry)); + # do not apply substitution patterns to the dir name - + # if user really wants that, they can specify a substitution and/or + # include/exclude patterns + my $p = solve_relative_path($cwd, $entry); + push(@dirstack, $p) + if $lcovutil::opt_follow && !grep($p eq $_, @dirstack); + push(@lcovutil::internal_dirs, $p) + unless + grep($p eq $_ || ($lcovutil::case_insensitive && lc($p) eq lc($_)), + @lcovutil::internal_dirs); + if (!file_name_is_absolute($entry) && + $entry ne $p) { + push(@lcovutil::internal_dirs, $entry) + unless grep($entry eq $_ || ($lcovutil::case_insensitive && + lc($entry) eq lc($_)), + @lcovutil::internal_dirs); + } + } + + my %visited; + while (@dirstack) { + my $top = pop(@dirstack); + if (-l $top) { + my $t = Cwd::realpath($top); + die("expected directory found '$t'") unless -d $t; + unless (exists($visited{$t})) { + lcovutil::info(1, + "internal directory: target '$t' of link '$top'\n"); + $visited{$t} = $top; + push(@dirstack, $t); + push(@lcovutil::internal_dirs, $t) + if lcovutil::is_external($t); + } + next; + } + opendir(my $dh, $top) or die("can't open directory: $!"); + while (my $e = readdir($dh)) { + next if $e eq '.' || $e eq '..'; + my $p = File::Spec->catfile($top, $e); + if (-l $p) { + my $l = Cwd::realpath($p); + push(@dirstack, $p) if (-d $l); + } elsif (-d $p) { + push(@dirstack, $p) unless exists($visited{$p}); + $visited{$p} = $top; + } + } # while + closedir($dh); + } + + if (@lcovutil::internal_dirs) { + lcovutil::info("Recording 'internal' directories:\n\t" . + join("\n\t", @lcovutil::internal_dirs) . "\n"); + } + + # Function is_external() requires all internal_dirs to end with a slash + foreach my $dir (@lcovutil::internal_dirs) { + $dir =~ s#$lcovutil::dirseparator*$#$lcovutil::dirseparator#; + } } - # # solve_relative_path(path, dir) # @@ -1502,73 +2082,70 @@ sub process_dafile($$) sub solve_relative_path($$) { - my $path = $_[0]; - my $dir = $_[1]; - my $volume; - my $directories; - my $filename; - my @dirs; # holds path elements - my $result; - - # Convert from Windows path to msys path - if( $^O eq "msys" ) - { - # search for a windows drive letter at the beginning - ($volume, $directories, $filename) = File::Spec::Win32->splitpath( $dir ); - if( $volume ne '' ) - { - my $uppercase_volume; - # transform c/d\../e/f\g to Windows style c\d\..\e\f\g - $dir = File::Spec::Win32->canonpath( $dir ); - # use Win32 module to retrieve path components - # $uppercase_volume is not used any further - ( $uppercase_volume, $directories, $filename ) = File::Spec::Win32->splitpath( $dir ); - @dirs = File::Spec::Win32->splitdir( $directories ); - - # prepend volume, since in msys C: is always mounted to /c - $volume =~ s|^([a-zA-Z]+):|/\L$1\E|; - unshift( @dirs, $volume ); - - # transform to Unix style '/' path - $directories = File::Spec->catdir( @dirs ); - $dir = File::Spec->catpath( '', $directories, $filename ); - } else { - # eliminate '\' path separators - $dir = File::Spec->canonpath( $dir ); - } - } - - $result = $dir; - # Prepend path if not absolute - if ($dir =~ /^[^\/]/) - { - $result = "$path/$result"; - } - - # Remove // - $result =~ s/\/\//\//g; - - # Remove . - while ($result =~ s/\/\.\//\//g) - { - } - $result =~ s/\/\.$/\//g; - - # Remove trailing / - $result =~ s/\/$//g; - - # Solve .. - while ($result =~ s/\/[^\/]+\/\.\.\//\//) - { - } - - # Remove preceding .. - $result =~ s/^\/\.\.\//\//g; - - return $result; + my ($path, $dir) = @_; + + # Convert from Windows path to msys path + if ($^O eq "msys") { + # search for a windows drive letter at the beginning + my ($volume, $directories, $filename) = + File::Spec::Win32->splitpath($dir); + if ($volume ne '') { + my $uppercase_volume; + # transform c/d\../e/f\g to Windows style c\d\..\e\f\g + $dir = File::Spec::Win32->canonpath($dir); + # use Win32 module to retrieve path components + # $uppercase_volume is not used any further + ($uppercase_volume, $directories, $filename) = + File::Spec::Win32->splitpath($dir); + my @dirs = File::Spec::Win32->splitdir($directories); + + # prepend volume, since in msys C: is always mounted to /c + $volume =~ s|^([a-zA-Z]+):|/\L$1\E|; + unshift(@dirs, $volume); + + # transform to Unix style '/' path + $directories = File::Spec->catdir(@dirs); + $dir = File::Spec->catpath('', $directories, $filename); + } else { + # eliminate '\' path separators + $dir = File::Spec->canonpath($dir); + } + } + + my $result = $dir; + # Prepend path if not absolute + if (!File::Spec->file_name_is_absolute($dir)) { + $result = File::Spec->catfile($path, $result); + } + # can't just use Cwd::abs_path on the pathname because it understands + # soft links and resolves them - so we end up pointing to the actual file + # and not to the carefully constructed linked-build paths that the user + # wanted. Have to do it the hard way + # return Cwd::abs_path($result); + + # Remove // in favor of / + my $d = $lcovutil::dirseparator; + $result =~ s#$d$d#$d#g; + + # Remove . in middle or at end of path + while ($result =~ s#$d\.($d|$)#$d#) { + } + + # Remove trailing / + $result =~ s#$d$##g if ($result ne $d); + + # change "X/dirname/../Y" into "X/Y" + while ($result =~ s#$d[^$d]+$d\.\.($d|$)#$d#) { + } + # change "dirname/../Y" into "./Y" (i.e,, at head of path) + $result =~ s#^[^$d]+\/\.\.$d#.$d#; + + # Remove preceding .. + $result =~ s#^$d\.\.$d#$d#g; + + return $result; } - # # match_filename(gcov_filename, list) # @@ -1578,40 +2155,35 @@ sub solve_relative_path($$) sub match_filename($@) { - my ($filename, @list) = @_; - my ($vol, $dir, $file) = splitpath($filename); - my @comp = splitdir($dir); - my $comps = scalar(@comp); - my $entry; - my @result; - -entry: - foreach $entry (@list) { - my ($evol, $edir, $efile) = splitpath($entry); - my @ecomp; - my $ecomps; - my $i; - - # Filename component must match - if ($efile ne $file) { - next; - } - # Check directory components last to first for match - @ecomp = splitdir($edir); - $ecomps = scalar(@ecomp); - if ($ecomps < $comps) { - next; - } - for ($i = 0; $i < $comps; $i++) { - if ($comp[$comps - $i - 1] ne - $ecomp[$ecomps - $i - 1]) { - next entry; - } - } - push(@result, $entry), - } - - return @result; + my ($filename, @list) = @_; + my ($vol, $dir, $file) = splitpath($filename); + my @comp = splitdir($dir); + my $comps = scalar(@comp); + my $entry; + my @result; + + entry: + foreach $entry (@list) { + my ($evol, $edir, $efile) = splitpath($entry); + # Filename component must match + if ($efile ne $file) { + next; + } + # Check directory components last to first for match + my @ecomp = splitdir($edir); + my $ecomps = scalar(@ecomp); + if ($ecomps < $comps) { + next; + } + for (my $i = 0; $i < $comps; $i++) { + if ($comp[$comps - $i - 1] ne $ecomp[$ecomps - $i - 1]) { + next entry; + } + } + push(@result, $entry); + } + + return @result; } # @@ -1621,55 +2193,47 @@ entry: # by comparing source code provided in the GCOV file with that of the files # in MATCHES. REL_FILENAME identifies the relative filename of the gcov # file. -# +# # Return the one real match or die if there is none. # sub solve_ambiguous_match($$$) { - my $rel_name = $_[0]; - my $matches = $_[1]; - my $content = $_[2]; - my $filename; - my $index; - my $no_match; - local *SOURCE; - - # Check the list of matches - foreach $filename (@$matches) - { - - # Compare file contents - open(SOURCE, "<", $filename) - or die("ERROR: cannot read $filename!\n"); - - $no_match = 0; - for ($index = 2; ; $index += 3) - { - chomp; - - # Also remove CR from line-end - s/\015$//; - - if ($_ ne @$content[$index]) - { - $no_match = 1; - last; - } - } - - close(SOURCE); - - if (!$no_match) - { - info("Solved source file ambiguity for $rel_name\n"); - return $filename; - } - } - - die("ERROR: could not match gcov data for $rel_name!\n"); -} + my $rel_name = $_[0]; + my $matches = $_[1]; + my $content = $_[2]; + local *SOURCE; + + # Check the list of matches + foreach my $filename (@$matches) { + + # Compare file contents + open(SOURCE, "<", $filename) or + die("cannot read $filename: $!\n"); + my $no_match = 0; + for (my $index = 2; ; $index += 3) { + chomp; + + # Also remove CR from line-end + s/\015$//; + + if ($_ ne @$content[$index]) { + $no_match = 1; + last; + } + } + + close(SOURCE) or die("unable to close $filename: $!\n"); + + if (!$no_match) { + info("Solved source file ambiguity for $rel_name\n"); + return $filename; + } + } + + die("could not match gcov data for $rel_name!\n"); +} # # split_filename(filename) @@ -1679,14 +2243,13 @@ sub solve_ambiguous_match($$$) sub split_filename($) { - my @path_components = split('/', $_[0]); - my @file_components = split('\.', pop(@path_components)); - my $extension = pop(@file_components); + my ($vol, $dir, $name) = File::Spec->splitpath($_[0]); - return (join("/",@path_components), join(".",@file_components), - $extension); -} + my @file_components = split('\.', $name); + my $extension = pop(@file_components); + return ($vol . $dir, join(".", @file_components), $extension); +} # # read_gcov_header(gcov_filename) @@ -1706,131 +2269,39 @@ sub split_filename($) sub read_gcov_header($) { - my $source; - my $object; - local *INPUT; - - if (!open(INPUT, "<", $_[0])) - { - if ($ignore_errors[$ERROR_GCOV]) - { - warn("WARNING: cannot read $_[0]!\n"); - return (undef,undef); - } - die("ERROR: cannot read $_[0]!\n"); - } - - while () - { - chomp($_); - - # Also remove CR from line-end - s/\015$//; - - if (/^\s+-:\s+0:Source:(.*)$/) - { - # Source: header entry - $source = $1; - } - elsif (/^\s+-:\s+0:Object:(.*)$/) - { - # Object: header entry - $object = $1; - } - else - { - last; - } - } - - close(INPUT); - - return ($source, $object); -} - - -# -# br_gvec_len(vector) -# -# Return the number of entries in the branch coverage vector. -# - -sub br_gvec_len($) -{ - my ($vec) = @_; - - return 0 if (!defined($vec)); - return (length($vec) * 8 / $BR_VEC_WIDTH) / $BR_VEC_ENTRIES; -} - - -# -# br_gvec_get(vector, number) -# -# Return an entry from the branch coverage vector. -# - -sub br_gvec_get($$) -{ - my ($vec, $num) = @_; - my $line; - my $block; - my $branch; - my $taken; - my $offset = $num * $BR_VEC_ENTRIES; - - # Retrieve data from vector - $line = vec($vec, $offset + $BR_LINE, $BR_VEC_WIDTH); - $block = vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH); - $block = -1 if ($block == $BR_VEC_MAX); - $branch = vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH); - $taken = vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH); - - # Decode taken value from an integer - if ($taken == 0) { - $taken = "-"; - } else { - $taken--; - } - - return ($line, $block, $branch, $taken); -} - - -# -# br_gvec_push(vector, line, block, branch, taken) -# -# Add an entry to the branch coverage vector. -# - -sub br_gvec_push($$$$$) -{ - my ($vec, $line, $block, $branch, $taken) = @_; - my $offset; - - $vec = "" if (!defined($vec)); - $offset = br_gvec_len($vec) * $BR_VEC_ENTRIES; - $block = $BR_VEC_MAX if $block < 0; - - # Encode taken value into an integer - if ($taken eq "-") { - $taken = 0; - } else { - $taken++; - } - - # Add to vector - vec($vec, $offset + $BR_LINE, $BR_VEC_WIDTH) = $line; - vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH) = $block; - vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH) = $branch; - vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH) = $taken; - - return $vec; + my $source; + my $object; + local *INPUT; + + if (!open(INPUT, "<", $_[0])) { + ignorable_error($ERROR_GCOV, "cannot read $_[0]: $!"); + return (undef, undef); + } + + while () { + chomp($_); + + # Also remove CR from line-end + s/\015$//; + + if (/^\s+-:\s+0:Source:(.*)$/) { + # Source: header entry + $source = $1; + } elsif (/^\s+-:\s+0:Object:(.*)$/) { + # Object: header entry + $object = $1; + } else { + last; + } + } + + close(INPUT) or die("unable to close $_[0]: $!\n"); + + return ($source, $object); } - # -# read_gcov_file(gcov_filename) +# read_gcov_file(gcov_filename, gcda_filename, source_filename) # # Parse file GCOV_FILENAME (.gcov file format) and return the list: # (reference to gcov_content, reference to gcov_branch, reference to gcov_func) @@ -1842,8 +2313,7 @@ sub br_gvec_push($$$$$) # $result[($line_number-1)*3+1] = execution count for line $line_number # $result[($line_number-1)*3+2] = source code text for line $line_number # -# gcov_branch is a vector of 4 4-byte long elements for each branch: -# line number, block number, branch number, count + 1 or 0 +# gcov_branch is a BranchData instance - see lcovutil.pm # # gcov_func is a list of 2 elements # (number of calls, function name) for each function @@ -1851,255 +2321,113 @@ sub br_gvec_push($$$$$) # Die on error. # -sub read_gcov_file($) +sub read_gcov_file($$$) { - my $filename = $_[0]; - my @result = (); - my $branches = ""; - my @functions = (); - my $number; - my $exclude_flag = 0; - my $exclude_line = 0; - my $exclude_br_flag = 0; - my $exclude_exception_br_flag = 0; - my $exclude_branch = 0; - my $exclude_exception_branch = 0; - my $last_block = $UNNAMED_BLOCK; - my $last_line = 0; - local *INPUT; - - if (!open(INPUT, "<", $filename)) { - if ($ignore_errors[$ERROR_GCOV]) - { - warn("WARNING: cannot read $filename!\n"); - return (undef, undef, undef); - } - die("ERROR: cannot read $filename!\n"); - } - - if ($gcov_version < $GCOV_VERSION_3_3_0) - { - # Expect gcov format as used in gcc < 3.3 - while () - { - chomp($_); - - # Also remove CR from line-end - s/\015$//; - - if (/^branch\s+(\d+)\s+taken\s+=\s+(\d+)/) { - next if (!$br_coverage); - next if ($exclude_line); - next if ($exclude_branch); - $branches = br_gvec_push($branches, $last_line, - $last_block, $1, $2); - } elsif (/^branch\s+(\d+)\s+never\s+executed/) { - next if (!$br_coverage); - next if ($exclude_line); - next if ($exclude_branch); - $branches = br_gvec_push($branches, $last_line, - $last_block, $1, '-'); - } - elsif (/^call/ || /^function/) - { - # Function call return data - } - else - { - $last_line++; - # Check for exclusion markers - if (!$no_markers) { - if (/$EXCL_STOP/) { - $exclude_flag = 0; - } elsif (/$EXCL_START/) { - $exclude_flag = 1; - } - if (/$excl_line/ || $exclude_flag) { - $exclude_line = 1; - } else { - $exclude_line = 0; - } - } - # Check for exclusion markers (branch exclude) - if (!$no_markers) { - if (/$EXCL_BR_STOP/) { - $exclude_br_flag = 0; - } elsif (/$EXCL_BR_START/) { - $exclude_br_flag = 1; - } - if (/$excl_br_line/ || $exclude_br_flag) { - $exclude_branch = 1; - } else { - $exclude_branch = 0; - } - } - # Check for exclusion markers (exception branch exclude) - if (!$no_markers && - /($EXCL_EXCEPTION_BR_STOP|$EXCL_EXCEPTION_BR_START|$excl_exception_br_line)/) { - warn("WARNING: $1 found at $filename:$last_line but ". - "branch exceptions exclusion is not supported with ". - "gcov versions older than 3.3\n"); - } - # Source code execution data - if (/^\t\t(.*)$/) - { - # Uninstrumented line - push(@result, 0); - push(@result, 0); - push(@result, $1); - next; - } - $number = (split(" ",substr($_, 0, 16)))[0]; - - # Check for zero count which is indicated - # by ###### - if ($number eq "######") { $number = 0; } - - if ($exclude_line) { - # Register uninstrumented line instead - push(@result, 0); - push(@result, 0); - } else { - push(@result, 1); - push(@result, $number); - } - push(@result, substr($_, 16)); - } - } - } - else - { - # Expect gcov format as used in gcc >= 3.3 - while () - { - chomp($_); - - # Also remove CR from line-end - s/\015$//; - - if (/^\s*(\d+|\$+|\%+):\s*(\d+)-block\s+(\d+)\s*$/) { - # Block information - used to group related - # branches - $last_line = $2; - $last_block = $3; - } elsif (/^branch\s+(\d+)\s+taken\s+(\d+)(?:\s+\(([^)]*)\))?/) { - next if (!$br_coverage); - next if ($exclude_line); - next if ($exclude_branch); - next if (($exclude_exception_branch || $no_exception_br) && - defined($3) && ($3 eq "throw")); - $branches = br_gvec_push($branches, $last_line, - $last_block, $1, $2); - } elsif (/^branch\s+(\d+)\s+never\s+executed/) { - next if (!$br_coverage); - next if ($exclude_line); - next if ($exclude_branch); - $branches = br_gvec_push($branches, $last_line, - $last_block, $1, '-'); - } - elsif (/^function\s+(.+)\s+called\s+(\d+)\s+/) - { - next if (!$func_coverage); - if ($exclude_line) { - next; - } - push(@functions, $2, $1); - } - elsif (/^call/) - { - # Function call return data - } - elsif (/^\s*([^:]+):\s*([^:]+):(.*)$/) - { - my ($count, $line, $code) = ($1, $2, $3); - - # Skip instance-specific counts - next if ($line <= (scalar(@result) / 3)); - - $last_line = $line; - $last_block = $UNNAMED_BLOCK; - # Check for exclusion markers - if (!$no_markers) { - if (/$EXCL_STOP/) { - $exclude_flag = 0; - } elsif (/$EXCL_START/) { - $exclude_flag = 1; - } - if (/$excl_line/ || $exclude_flag) { - $exclude_line = 1; - } else { - $exclude_line = 0; - } - } - # Check for exclusion markers (branch exclude) - if (!$no_markers) { - if (/$EXCL_BR_STOP/) { - $exclude_br_flag = 0; - } elsif (/$EXCL_BR_START/) { - $exclude_br_flag = 1; - } - if (/$excl_br_line/ || $exclude_br_flag) { - $exclude_branch = 1; - } else { - $exclude_branch = 0; - } - } - # Check for exclusion markers (exception branch exclude) - if (!$no_markers) { - if (/$EXCL_EXCEPTION_BR_STOP/) { - $exclude_exception_br_flag = 0; - } elsif (/$EXCL_EXCEPTION_BR_START/) { - $exclude_exception_br_flag = 1; - } - if (/$excl_exception_br_line/ || $exclude_exception_br_flag) { - $exclude_exception_branch = 1; - } else { - $exclude_exception_branch = 0; - } - } - - # Strip unexecuted basic block marker - $count =~ s/\*$//; - - # :: - if ($line eq "0") - { - # Extra data - } - elsif ($count eq "-") - { - # Uninstrumented line - push(@result, 0); - push(@result, 0); - push(@result, $code); - } - else - { - if ($exclude_line) { - push(@result, 0); - push(@result, 0); - } else { - # Check for zero count - if ($count =~ /^[#=]/) { - $count = 0; - } - push(@result, 1); - push(@result, $count); - } - push(@result, $code); - } - } - } - } - - close(INPUT); - if ($exclude_flag || $exclude_br_flag || $exclude_exception_br_flag) { - warn("WARNING: unterminated exclusion section in $filename\n"); - } - return(\@result, $branches, \@functions); + my ($filename, $da_filename, $source_filename) = @_; + my @result = (); + my $branchData = BranchData->new(); + my @functions = (); + my $number; + my $last_block = $UNNAMED_BLOCK; + my $last_line = 0; + my $branchId; + my $unexec; + + my $f = InOutFile->in($filename, $lcovutil::demangle_cpp_cmd); + my $input = $f->hdl(); + + my $currentBlock; + + # Expect gcov format as used in gcc >= 3.3 + debug("reading $filename\n"); + # line content "/*EOF*/" occurs when gcov knows the + # number of lines in the file but can't find the source code - + # most of the time, that is deliberate because we run gcov in a + # temp directory in order to avoid conflicts from parallel execution + # we do a bit of error checking that the content is not inconsistent + # - e.g., if we find the file due to fully qualified paths, and find + # a non-EOF line following an EOF line. + my $foundEOF = 0; + while (<$input>) { + chomp($_); + # Also remove CR from line-end + s/\015$//; + + if (/^\s*(\d+|\$+|\%+):\s*(\d+)-block\s+(\d+)\s*$/) { + # Block information - used to group related branches + $branchId = 0; + $last_line = $2; + $last_block = $3; + } elsif (/^branch\s+(\d+)\s+taken\s+(\d+)(?:\s+\(([^)]*)\))?/) { + next unless $lcovutil::br_coverage; + # $1 is block ID, $2 is hit count + my $count = $2; + my $br = BranchBlock->new($branchId, $count, undef, + defined($3) && $3 eq 'throw'); + $branchData->append($last_line, $last_block, $br, $filename); + ++$branchId; + } elsif (/^branch\s+(\d+)\s+never\s+executed/) { + next unless $lcovutil::br_coverage; + # this branch not taken + my $br = BranchBlock->new($branchId, '-'); + $branchData->append($last_line, $last_block, $br, $filename); + ++$branchId; + } elsif (/^function\s+(.+)\s+called\s+(\d+)\s+/) { + next unless $lcovutil::func_coverage; + my $name = $1; + my $count = $2; + push(@functions, $count, $name); + } elsif (/^call/) { + # Function call return data + } elsif (/^\s*([^:]+):\s*([^:]+):(.*)$/) { + my ($count, $line, $code) = ($1, $2, $3); + # Skip instance-specific counts + next if ($line <= (scalar(@result) / 3)); + # skip fake line inserted by gcov + if ($code eq '/*EOF*/') { + $foundEOF = 1; + } elsif ($foundEOF) { + # data looks inconsistent...we started finding some EOF entries + # and now we found a following entry which claims not to be EOF + lcovutil::ignorable_error($ERROR_FORMAT, + "non-EOF for $source_filename:$line at $filename:$. while processing $da_filename: '$code'" + ); + } + $branchId = 0; # if $last_line != $line; + $last_line = $line; + $last_block = $UNNAMED_BLOCK; + # Strip unexecuted basic block marker + if ($count =~ /^([^*]+)\*$/) { + # need to do something about lines which have non-zero count + # but unexecuted block. If there are no branches associated + # with this line, then we should mark the line as not hit. + # Otherwise, result is misleading because we can see + # (for example) a non-zero hit could for the the 'if' clause + # of an untaken branch. + $unexec = 1; + $count = $1; + } else { + $unexec = 0; + } + + # :: + if ($line eq "0") { + # Extra data + } elsif ($count eq "-") { + # Uninstrumented line + push(@result, 0, 0, $code); + } else { + # Check for zero count + if ($count =~ /^[#=]/) { + $count = 0; + } + push(@result, 1, $unexec ? [$count, $unexec] : $count, $code); + } + } + } + return (\@result, $branchData, \@functions); } - # # read_intermediate_text(gcov_filename, data) # @@ -2112,24 +2440,22 @@ sub read_gcov_file($) sub read_intermediate_text($$) { - my ($gcov_filename, $data) = @_; - my $fd; - my $filename; - - open($fd, "<", $gcov_filename) or - die("ERROR: Could not read $gcov_filename: $!\n"); - while (my $line = <$fd>) { - if ($line =~ /^file:(.*)$/) { - $filename = $1; - chomp($filename); - } elsif (defined($filename)) { - $data->{$filename} .= $line; - } - } - close($fd); + my ($gcov_filename, $data) = @_; + my $filename; + + my $f = InOutFile->in($gcov_filename, $lcovutil::demangle_cpp_cmd); + my $h = $f->hdl(); + while (my $line = <$h>) { + if ($line =~ /^file:(.*)$/) { + $filename = $1; + $filename =~ s/[\r\n]$//g; + #filename will be simplified/sustituted in 'adjust_source_filenames' + } elsif (defined($filename)) { + $data->{$filename} .= $line; + } + } } - # # read_intermediate_json(gcov_filename, data, basedir_ref) # @@ -2144,501 +2470,569 @@ sub read_intermediate_text($$) sub read_intermediate_json($$$) { - my ($gcov_filename, $data, $basedir_ref) = @_; - my $text; - my $json; - - gunzip($gcov_filename, \$text) or - die("ERROR: Could not read $gcov_filename: $GunzipError\n"); - - $json = decode_json($text); - if (!defined($json) || !exists($json->{"files"}) || - ref($json->{"files"} ne "ARRAY")) { - die("ERROR: Unrecognized JSON output format in ". - "$gcov_filename\n"); - } - - $$basedir_ref = $json->{"current_working_directory"}; - - # Workaround for bug in MSYS GCC 9.x that encodes \ as \n in gcov JSON - # output - if ($^O eq "msys" && $$basedir_ref =~ /\n/) { - $$basedir_ref =~ s#\n#/#g; - } - - for my $file (@{$json->{"files"}}) { - my $filename = $file->{"file"}; - - $data->{$filename} = $file; - } + my ($gcov_filename, $data, $basedir_ref) = @_; + # intermediate JSON contains the demangled name + my $json = JsonSupport::load($gcov_filename); # imported from lcovutil.pm + if (!defined($json) || + !exists($json->{"files"}) || + ref($json->{"files"} ne "ARRAY")) { + die("Unrecognized JSON output format in $gcov_filename\n"); + } + + $$basedir_ref = $json->{"current_working_directory"}; + + # Workaround for bug in MSYS GCC 9.x that encodes \ as \n in gcov JSON + # output + if ($^O eq "msys" && $$basedir_ref =~ /\n/) { + $$basedir_ref =~ s#\n#/#g; + } + + for my $file (@{$json->{"files"}}) { + # decode_json() is decoding UTF-8 strings from the JSON file into + # Perl's internal encoding, but filenames on the filesystem are + # usually UTF-8 encoded, so the filename strings need to be + # converted back to UTF-8 so that they actually match the name + # on the filesystem. + utf8::encode($file->{"file"}); + + my $filename = $file->{"file"}; + $data->{$filename} = $file; + } } - # -# intermediate_text_to_info(fd, data, srcdata) +# intermediate_text_to_info(data) # # Write DATA in info format to file descriptor FD. # # data: filename -> file_data: # file_data: concatenated lines of intermediate text data # -# srcdata: filename -> [ excl, brexcl, checksums ] -# excl: lineno -> 1 for all lines for which to exclude all data -# brexcl: lineno -> 1 for all lines for which to exclude branch data -# 2 for all lines for which to exclude exception branch data -# checksums: lineno -> source code checksum -# # Note: To simplify processing, gcov data is not combined here, that is counts # that appear multiple times for the same lines/branches are not added. # This is done by lcov/genhtml when reading the data files. # -sub intermediate_text_to_info($$$) +sub intermediate_text_to_info($) { - my ($fd, $data, $srcdata) = @_; - my $branch_num = 0; - my $c; - - return if (!%{$data}); - - print($fd "TN:$test_name\n"); - for my $filename (keys(%{$data})) { - my ($excl, $brexcl, $checksums); - my $lines_found = 0; - my $lines_hit = 0; - my $functions_found = 0; - my $functions_hit = 0; - my $branches_found = 0; - my $branches_hit = 0; - - if (defined($srcdata->{$filename})) { - ($excl, $brexcl, $checksums) = @{$srcdata->{$filename}}; - } - - print($fd "SF:$filename\n"); - for my $line (split(/\n/, $data->{$filename})) { - if ($line =~ /^lcount:(\d+),(\d+),?/) { - # lcount:, - # lcount:,, - if ($checksum && exists($checksums->{$1})) { - $c = ",".$checksums->{$1}; - } else { - $c = ""; - } - print($fd "DA:$1,$2$c\n") if (!$excl->{$1}); - - # Intermediate text format does not provide - # branch numbers, and the same branch may appear - # multiple times on the same line (e.g. in - # template instances). Synthesize a branch - # number based on the assumptions: - # a) the order of branches is fixed across - # instances - # b) an instance starts with an lcount line - $branch_num = 0; - - $lines_found++; - $lines_hit++ if ($2 > 0); - } elsif ($line =~ /^function:(\d+),(\d+),([^,]+)$/) { - next if (!$func_coverage || $excl->{$1}); - - # function:,, - print($fd "FN:$1,$3\n"); - print($fd "FNDA:$2,$3\n"); - - $functions_found++; - $functions_hit++ if ($2 > 0); - } elsif ($line =~ /^function:(\d+),\d+,(\d+),([^,]+)$/) { - next if (!$func_coverage || $excl->{$1}); - - # function:,,, - # - print($fd "FN:$1,$3\n"); - print($fd "FNDA:$2,$3\n"); - - $functions_found++; - $functions_hit++ if ($2 > 0); - } elsif ($line =~ /^branch:(\d+),(taken|nottaken|notexec)/) { - next if (!$br_coverage || $excl->{$1} || - (defined($brexcl->{$1}) && ($brexcl->{$1} == 1))); - - # branch:,taken|nottaken|notexec - if ($2 eq "taken") { - $c = 1; - } elsif ($2 eq "nottaken") { - $c = 0; - } else { - $c = "-"; - } - print($fd "BRDA:$1,0,$branch_num,$c\n"); - $branch_num++; - - $branches_found++; - $branches_hit++ if ($2 eq "taken"); - } - } - - if ($functions_found > 0) { - printf($fd "FNF:%s\n", $functions_found); - printf($fd "FNH:%s\n", $functions_hit); - } - if ($branches_found > 0) { - printf($fd "BRF:%s\n", $branches_found); - printf($fd "BRH:%s\n", $branches_hit); - } - printf($fd "LF:%s\n", $lines_found); - printf($fd "LH:%s\n", $lines_hit); - print($fd "end_of_record\n"); - } + my $data = shift; + my $branch_num = 0; + + return if (!%{$data}); + + my $traceFile = TraceFile->new(); + + while (my ($filename, $fileStr) = each(%$data)) { + # note that we already substituted the source file name and handled + # include/exclude directives - so no need to check here + # see 'adjust_source_filenames() and 'filter_source_files() + + lcovutil::info(1, "emit data for $filename\n"); + + # there is no meaningful parse location for this data + my $fileData = $traceFile->data($filename); + my $functionMap = $fileData->testfnc($test_name); + my $branchMap = $fileData->testbr($test_name); + my $lineMap = $fileData->test($test_name); + + if (@lcovutil::extractVersionScript) { + my $version = lcovutil::extractFileVersion($filename); + $fileData->version($version) + if (defined($version) && $version ne ""); + } + for my $line (split(/\n/, $fileStr)) { + if ($line =~ /^lcount:(\d+),(\d+),?/) { + # lcount:, + # lcount:,, + my $lineNo = $1; + my $hit = $2; + $lineMap->append($lineNo, $hit); + + # Intermediate text format does not provide + # branch numbers, and the same branch may appear + # multiple times on the same line (e.g. in + # template instances). Synthesize a branch + # number based on the assumptions: + # a) the order of branches is fixed across + # instances + # b) an instance starts with an lcount line + $branch_num = 0; + } elsif ($line =~ /^function:((\d+)(,(\d+))?),(\d+),(.+)$/) { + next unless $lcovutil::func_coverage; + + # function:,?, + my ($lineNo, $endline, $hit, $name) = ($2, $4, $5, $6); + my $func = + $functionMap->define_function($name, $lineNo, $endline); + $func->addAlias($name, $hit); + } elsif ($line =~ /^branch:(\d+),(taken|nottaken|notexec)/) { + my $lineNo = $1; + next + unless $lcovutil::br_coverage; + my $c; + # branch:,taken|nottaken|notexec + if ($2 eq "taken") { + $c = 1; + } elsif ($2 eq "nottaken") { + $c = 0; + } else { + $c = "-"; + } + my $br = BranchBlock->new($branch_num, $c); + # "block" is always zero for intermedaite text + $branchMap->append($lineNo, 0, $br, $filename); + ++$branch_num; + } + } + # now go through lines, functions, branches - append to test_name data + $fileData->sum()->union($lineMap); + $fileData->sumbr()->union($branchMap); + $fileData->func()->union($functionMap); + } + return $traceFile; } - # -# intermediate_json_to_info(fd, data, srcdata) +# intermediate_json_to_info(data) # # Write DATA in info format to file descriptor FD. # # data: filename -> file_data: # file_data: GCOV JSON data for file # -# srcdata: filename -> [ excl, brexcl, checksums ] -# excl: lineno -> 1 for all lines for which to exclude all data -# brexcl: lineno -> 1 for all lines for which to exclude branch data -# 2 for all lines for which to exclude exception branch data -# checksums: lineno -> source code checksum -# # Note: To simplify processing, gcov data is not combined here, that is counts # that appear multiple times for the same lines/branches are not added. # This is done by lcov/genhtml when reading the data files. # -sub intermediate_json_to_info($$$) +sub intermediate_json_to_info($) { - my ($fd, $data, $srcdata) = @_; - my $branch_num = 0; - - return if (!%{$data}); - - print($fd "TN:$test_name\n"); - for my $filename (keys(%{$data})) { - my ($excl, $brexcl, $checksums); - my $file_data = $data->{$filename}; - my $lines_found = 0; - my $lines_hit = 0; - my $functions_found = 0; - my $functions_hit = 0; - my $branches_found = 0; - my $branches_hit = 0; - - if (defined($srcdata->{$filename})) { - ($excl, $brexcl, $checksums) = @{$srcdata->{$filename}}; - } - - print($fd "SF:$filename\n"); - - # Function data - if ($func_coverage) { - for my $d (@{$file_data->{"functions"}}) { - my $line = $d->{"start_line"}; - my $count = $d->{"execution_count"}; - my $name = $d->{"name"}; - - next if (!defined($line) || !defined($count) || - !defined($name) || $excl->{$line}); - - print($fd "FN:$line,$name\n"); - print($fd "FNDA:$count,$name\n"); - - $functions_found++; - $functions_hit++ if ($count > 0); - } - } - - if ($functions_found > 0) { - printf($fd "FNF:%s\n", $functions_found); - printf($fd "FNH:%s\n", $functions_hit); - } - - # Line data - for my $d (@{$file_data->{"lines"}}) { - my $line = $d->{"line_number"}; - my $count = $d->{"count"}; - my $c; - my $branches = $d->{"branches"}; - my $unexec = $d->{"unexecuted_block"}; - - next if (!defined($line) || !defined($count) || - $excl->{$line}); - - if (defined($unexec) && $unexec && $count == 0) { - $unexec = 1; - } else { - $unexec = 0; - } - - if ($checksum && exists($checksums->{$line})) { - $c = ",".$checksums->{$line}; - } else { - $c = ""; - } - print($fd "DA:$line,$count$c\n"); - - $lines_found++; - $lines_hit++ if ($count > 0); - - $branch_num = 0; - # Branch data - if ($br_coverage && (!defined($brexcl->{$line}) || - ($brexcl->{$line} != 1))) { - for my $b (@$branches) { - my $brcount = $b->{"count"}; - my $is_exception = $b->{"throw"}; - - if (!$is_exception || ((!defined($brexcl->{$line}) || - ($brexcl->{$line} != 2)) && !$no_exception_br)) { - if (!defined($brcount) || $unexec) { - $brcount = "-"; - } - print($fd "BRDA:$line,0,$branch_num,". - "$brcount\n"); - } - - $branches_found++; - $branches_hit++ if ($brcount ne "-" && $brcount > 0); - $branch_num++; - } - } - } - - if ($branches_found > 0) { - printf($fd "BRF:%s\n", $branches_found); - printf($fd "BRH:%s\n", $branches_hit); - } - printf($fd "LF:%s\n", $lines_found); - printf($fd "LH:%s\n", $lines_hit); - print($fd "end_of_record\n"); - } + my $data = shift; + my $branch_num = 0; + + return if (!%{$data}); + + my $traceFile = TraceFile->new(); + lcovutil::debug(1, + "called intermediate_json_to_info " . join(' ', keys(%$data)) . "\n"); + while (my ($filename, $file_data) = each(%$data)) { + # note that we already substituted the source file name and handled + # include/exclude directives - so no need to check here + # see 'adjust_source_filenames() and 'filter_source_files() + # there is no meaningful parse location for this data + my $fileData = $traceFile->data($filename); + my $functionMap = $fileData->testfnc($test_name); + my $branchMap = $fileData->testbr($test_name); + my $lineMap = $fileData->test($test_name); + my $mcdcMap = $fileData->testcase_mcdc($test_name); + lcovutil::debug(1, "parse $filename\n"); + + if (@lcovutil::extractVersionScript) { + my $version = lcovutil::extractFileVersion($filename); + $fileData->version($version) + if (defined($version) && $version ne ""); + } + + # Function data + if ($lcovutil::func_coverage) { + for my $d (@{$file_data->{"functions"}}) { + my $start_line = $d->{"start_line"}; + my $end_line = $d->{"end_line"} + if exists($d->{end_line}); + my $count = $d->{"execution_count"}; + my $name = $lcovutil::demangle_cpp_cmd ? $d->{demangled_name} : + $d->{"name"}; + my $func = + $functionMap->define_function($name, $start_line, + $end_line); + $func->addAlias($name, $count); + } + } + for my $d (@{$file_data->{"lines"}}) { + my $line = $d->{"line_number"}; + my $count = $d->{"count"}; + + my $branches = $d->{"branches"}; + my $unexec = $d->{"unexecuted_block"}; + my $conditions = $d->{'conditions'} + if $lcovutil::mcdc_coverage && exists($d->{'conditions'}); + + next + if (!defined($line) || !defined($count)); + + if (0 == scalar(@$branches) && $unexec && $count != 0) { + lcovutil::debug( + "$filename:$line: unexecuted block on non-branch line with count=$count\n" + ); + if ($lcovutil::opt_adjust_unexecuted_blocks) { + $count = 0; + } elsif ( + lcovutil::warn_once("unexecuted block", + $ERROR_INCONSISTENT_DATA) + ) { + lcovutil::ignorable_warning($ERROR_INCONSISTENT_DATA, + "$filename:$line: unexecuted block on non-branch line with non-zero hit count. Use \"geninfo --rc geninfo_unexecuted_blocks=1 to set count to zero." + ); + } + } + + # just add the line - worry about filtering later + $lineMap->append($line, $count); + + # Branch data + if ($lcovutil::br_coverage) { + + # there may be compiler-generated branch data on + # the closing brace of the function... + $branch_num = 0; + $unexec = (defined($unexec) && $unexec && $count == 0); + + for my $b (@$branches) { + my $brcount = $b->{"count"}; + my $is_exception = $b->{"throw"}; + # need to keep track of whether compiler thinks this + # branch is an exception - so we can skip it later. + + if (!defined($brcount) || $unexec) { + $brcount = "-"; + } + my $entry = + BranchBlock->new($branch_num, $brcount, undef, + defined($is_exception) && $is_exception != 0); + # "block" is always zero for intermediate JSON data + $branchMap->append($line, 0, $entry, $filename); + ++$branch_num; + } + } + if ($conditions && @$conditions) { + #die("unexpected multiple conditions at $line") if scalar(@$conditions) > 1; + #lcovutil::debug(1, "MCDC at $filename:$line\n"); + my $mcdc = $mcdcMap->new_mcdc($fileData, $line); + + foreach my $c (@$conditions) { + my $count = $c->{count}; + my $exprs = $count / 2; + my $coverage = $c->{covered}; + my $false = $c->{not_covered_false}; + my $true = $c->{not_covered_true}; + die("expected even condition count: $count") if $count % 2; + my @true; + my @false; + + foreach my $m (sort @$false) { + $false[$m] = 0; + } + foreach my $m (sort @$true) { + $true[$m] = 0; + } + for (my $i = 0; $i < $exprs; ++$i) { + $mcdc->insertExpr($filename, $exprs, 0, + !defined($false[$i]), + $i, $i); + $mcdc->insertExpr($filename, $exprs, 1, + !defined($true[$i]), + $i, $i); + } + } + $mcdcMap->close_mcdcBlock($mcdc); + } + } + # now go through lines, functions, branches - append to test_name data + $fileData->sum()->union($lineMap); + $fileData->sumbr()->union($branchMap); + $fileData->mcdc()->union($mcdcMap); + $fileData->func()->union($functionMap); + } + return $traceFile; } - -sub get_output_fd($$) +sub which($) { - my ($outfile, $file) = @_; - my $fd; - - if (!defined($outfile)) { - open($fd, ">", "$file.info") or - die("ERROR: Cannot create file $file.info: $!\n"); - } elsif ($outfile eq "-") { - open($fd, ">&STDOUT") or - die("ERROR: Cannot duplicate stdout: $!\n"); - } else { - open($fd, ">>", $outfile) or - die("ERROR: Cannot write to file $outfile: $!\n"); - } - - return $fd; + my $filename = shift; + + return $filename if (file_name_is_absolute($filename)); + foreach my $dir (File::Spec->path()) { + my $p = catfile($dir, $filename); + return $p if (-x $p); + } + return $filename; } +sub check_gcov_fail($$) +{ + my ($msg, $filename) = @_; + + if ($msg =~ /version\s+'([^']+)',\s+prefer\s+'([^']+)'/) { + my $have = $1; + my $want = $2; + foreach my $f (\$have, \$want) { + if ($$f =~ /^(.)0(.)\*$/) { + # version ID numbering in the gcda/gcno file is not entirely + # clear to me - but it appears to be "major.0.minor" - where + # major is integral for versions older than gcc/10, and hex +1 + # for versions after gcc/10. + my $major = + (ord($1) >= ord('A')) ? (ord($1) - ord('A') + 9) : $1; + $$f = sprintf("%d.%d", $major, $2); + } + } + my $path = which($gcov_tool[0]); + lcovutil::ignorable_error($lcovutil::ERROR_VERSION, + "Incompatible GCC/GCOV version found while processing $filename:\n\tYour test was built with '$have'.\n\tYou are trying to capture with gcov tool '$path' which is version '$want'." + ); + return 1; + } + return 0; +} # -# print_gcov_warnings(stderr_file, is_graph, map) +# print_gcov_warnings(type, stderr_file, is_graph, map) # # Print GCOV warnings in file STDERR_FILE to STDERR. If IS_GRAPH is non-zero, # suppress warnings about missing as these are expected. Replace keys found # in MAP with their values. # -sub print_gcov_warnings($$$) +sub print_gcov_warnings($$$$) { - my ($stderr_file, $is_graph, $map) = @_; - my $fd; - - if (!open($fd, "<", $stderr_file)) { - warn("WARNING: Could not open GCOV stderr file ". - "$stderr_file: $!\n"); - return; - } - while (my $line = <$fd>) { - next if ($is_graph && $line =~ /cannot open data file/); - - for my $key (keys(%{$map})) { - $line =~ s/\Q$key\E/$map->{$key}/g; - } - - print(STDERR $line); - } - close($fd); -} + my ($type, $data, $is_graph, $map) = @_; + my $leader = "$type:\n "; + foreach my $line (split('\n', $data)) { + next if ($is_graph && $line =~ /cannot open data file/); + + for my $key (keys(%{$map})) { + $line =~ s/\Q$key\E/$map->{$key}/g; + } + + print(STDERR $leader, $line, "\n"); + $leader = ' '; + } +} # -# process_intermediate(file, dir, tempdir) +# process_intermediate(directory, file, gcno_file, tempdir) # # Create output for a single file (either a data file or a graph file) using # gcov's intermediate option. # -sub process_intermediate($$$) +sub process_intermediate($$$$) { - my ($file, $dir, $tempdir) = @_; - my ($fdir, $fbase, $fext); - my $data_file; - my $errmsg; - my %data; - my $fd; - my $base; - my $srcdata; - my $is_graph = 0; - my ($out, $err, $rc); - my $json_basedir; - my $json_format; - - info("Processing %s\n", abs2rel($file, $dir)); - - $file = solve_relative_path($cwd, $file); - ($fdir, $fbase, $fext) = split_filename($file); - - $is_graph = 1 if (".$fext" eq $graph_file_extension); - - if ($is_graph) { - # Process graph file - copy to temp directory to prevent - # accidental processing of associated data file - $data_file = "$tempdir/$fbase$graph_file_extension"; - if (!copy($file, $data_file)) { - $errmsg = "ERROR: Could not copy file $file"; - goto err; - } - } else { - # Process data file in place - $data_file = $file; - } - - # Change directory - if (!chdir($tempdir)) { - $errmsg = "Could not change to directory $tempdir: $!"; - goto err; - } - - # Run gcov on data file - ($out, $err, $rc) = system_no_output(1 + 2 + 4, $gcov_tool, - $data_file, @gcov_options, "-i"); - defined($out) && unlink($out); - if (defined($err)) { - print_gcov_warnings($err, $is_graph, { - $data_file => $file, - }); - unlink($err); - } - if ($rc) { - $errmsg = "GCOV failed for $file"; - goto err; - } - - if ($is_graph) { - # Remove graph file copy - unlink($data_file); - } - - # Parse resulting file(s) - for my $gcov_filename (glob("*.gcov")) { - read_intermediate_text($gcov_filename, \%data); - unlink($gcov_filename); - } - - for my $gcov_filename (glob("*.gcov.json.gz")) { - read_intermediate_json($gcov_filename, \%data, \$json_basedir); - unlink($gcov_filename); - $json_format = 1; - } - - if (!%data) { - warn("WARNING: GCOV did not produce any data for $file\n"); - return; - } - - # Determine base directory - if (defined($base_directory)) { - $base = $base_directory; - } elsif (defined($json_basedir)) { - $base = $json_basedir; - } else { - $base = $fdir; - - if (is_compat($COMPAT_MODE_LIBTOOL)) { - # Avoid files from .libs dirs - $base =~ s/\.libs$//; - } - - # Try to find base directory automatically if requested by user - if ($rc_auto_base) { - $base = find_base_from_source($base, [ keys(%data) ]); - } - } - - # Apply base file name to relative source files - adjust_source_filenames(\%data, $base); - - # Remove excluded source files - filter_source_files(\%data); - - # Get data on exclusion markers and checksums if requested - if (!$no_markers || $checksum) { - $srcdata = get_all_source_data(keys(%data)); - } - - # Generate output - $fd = get_output_fd($output_filename, $file); - if ($json_format) { - intermediate_json_to_info($fd, \%data, $srcdata); - } else { - intermediate_text_to_info($fd, \%data, $srcdata); - } - close($fd); - - chdir($cwd); - - return; - -err: - if ($ignore[$ERROR_GCOV]) { - warn("WARNING: $errmsg!\n"); - } else { - die("ERROR: $errmsg!\n") - } + my ($searchdir, $file, $gcno_file, $tempdir) = @_; + my $data_file; + my $errmsg; + my $errorType = $lcovutil::ERROR_GCOV; + my %data; + my $base; + my $json_basedir; + my $json_format; + + my $filename = defined($file) ? $file : $gcno_file; + $file = solve_relative_path($cwd, $filename); + my ($fdir, $fbase, $fext) = split_filename($file); + + my $is_graph = (".$fext" eq $graph_file_extension); + + if ($is_graph) { + # Process graph file - copy to temp directory to prevent + # accidental processing of associated data file + $data_file = + File::Spec->catfile($tempdir, "$fbase$graph_file_extension"); + if (!copy($file, $data_file)) { + $errmsg = "ERROR: Could not copy file $file: $!"; + $errorType = $lcovutil::ERROR_PATH; + goto err; + } + } else { + $gcno_file = solve_relative_path($cwd, $gcno_file); + foreach my $f ($file, $gcno_file) { + my $p = -l $f ? Cwd::abs_path($f) : $f; + if (!-r $p) { + $errmsg = "$f does not exist/is not readable"; + $errorType = $lcovutil::ERROR_PATH; + goto err; + } + } + # if .gcda and .gcno files are in the same directory, then simply + # process in place - otherwise, link the .gcda and .gcno files + # into tempdir and run from here + if (dirname($file) eq dirname($gcno_file)) { + # Process data file in place + $data_file = $file; + } else { + $data_file = basename($file); + foreach my $f ($file, $gcno_file) { + my $l = File::Spec->catfile($tempdir, basename($f)); + debug("create links to process $f in $tempdir\n"); + symlink($f, $l); + # unclear why symlink returns an error code when it actually + # created the link + if ($? && !-l $l) { + $errmsg = "unable to create link for $f: $!"; + goto err; + } + } + } + } + + # Change directory + debug(2, "chdir to tempdir $tempdir\n"); + if (!chdir($tempdir)) { + $errmsg = "Could not change to directory $tempdir: $!"; + goto err; + } + + # Run gcov on data file + debug("gcov: " . join(' ', @gcov_tool) . " $data_file\n"); + my $now = Time::HiRes::gettimeofday(); + my ($out, $err, $rc) = system_no_output(1 + 2 + 4, @gcov_tool, $data_file); + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{exec}{$filename} = $then - $now; + print_gcov_warnings('stdout', $out, $is_graph, {$data_file => $file,}) + if ('' ne $out && + (0 != $rc || + $lcovutil::verbose > 1)); + unlink($out); + debug(2, "chdir back to '$cwd'\n"); + chdir($cwd) or die("can't cd back to $cwd: $!"); + print_gcov_warnings('stderr', $err, $is_graph, {$data_file => $file,}) + if ('' ne $err && + (0 != $rc || + $lcovutil::verbose)); + + my $gcovOutGlobPattern = + "$tempdir/*.gcov $tempdir/.*.gcov $tempdir/*.gcov.json.gz $tempdir/.*gcov.json.gz"; + + if (0 != $rc) { + if (check_gcov_fail($err, $file)) { + return; + } + $errmsg = "GCOV failed for $file"; + # can parse the error log to see if it spaced out - then return + # code so parent can catch it + if ($err =~ /out of memory allocating/) { + lcovutil::info("spaceout calling gcov for '$data_file'\n"); + $errmsg .= ' out of memory'; + $errorType = $lcovutil::ERROR_CHILD + if 1 != $lcovutil::maxParallelism; + } + goto err; + } + + if ($is_graph) { + # Remove graph file copy + unlink($data_file) unless $lcovutil::preserve_intermediates; + } + + # Parse resulting file(s) + # 'meson' build system likes to use "." as leading character in generated + # files. Seems an unfortunate decision. + my $start = Time::HiRes::gettimeofday(); + for my $gcov_filename (glob($gcovOutGlobPattern)) { + eval { + if ($gcov_filename =~ /\.gcov\.json/) { + read_intermediate_json($gcov_filename, \%data, \$json_basedir); + $json_format = 1; + } else { + read_intermediate_text($gcov_filename, \%data); + } + if ($lcovutil::preserve_intermediates) { + File::Copy::move($gcov_filename, $fdir) or + die("cannot rename $gcov_filename: $!"); + } else { + unlink($gcov_filename); + } + }; + if ($@) { + if (1 != $lcovutil::maxParallelism && + $@ =~ /(integrity check failed|cannot start)/) { + # looks like we ran out of memory.. + # maybe need new error type ERROR_MEMORY + #$errorType = $lcovutil::ERROR_GCOV; + $errmsg = $@; + goto err; + } else { + die("read_intermediate failed: $@"); + } + } + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{read}{$filename} = $end - $start; + + if (!%data) { + ignorable_warning($ERROR_GCOV, + "GCOV did not produce any data for $file"); + return; + } + + # Determine base directory + if (defined($base_directory)) { + $base = $base_directory; + } elsif (defined($json_basedir)) { + $base = $json_basedir; + } else { + $base = $fdir; + + if (is_compat($COMPAT_MODE_LIBTOOL)) { + # Avoid files from .libs dirs + $base =~ s/\.libs$//; + } + + # Try to find base directory automatically if requested by user + if ($lcovutil::rc_auto_base) { + $base = find_base_from_source($base, [keys(%data)]); + } + } + + # Apply base file name to relative source files + adjust_source_filenames(\%data, $base); + + # Remove excluded source files + filter_source_files(\%data); + + # Generate output + my $trace = + $json_format ? intermediate_json_to_info(\%data) : + intermediate_text_to_info(\%data); + my $done = Time::HiRes::gettimeofday(); + $lcovutil::profileData{translate}{$filename} = $done - $end; + + return $trace; + + err: + unlink(glob($gcovOutGlobPattern)); # clean up - in case gcov died + ignorable_error($errorType, "$errmsg!"); + return undef; } - # Map LLVM versions to the version of GCC gcov which they emulate. sub map_llvm_version($) { - my ($ver) = @_; + my $ver = shift; - return 0x040200 if ($ver >= 0x030400); + return 0x040200 if ($ver >= 0x030400); - warn("WARNING: This version of LLVM's gcov is unknown. ". - "Assuming it emulates GCC gcov version 4.2.\n"); + warn("This version of LLVM's gcov is unknown. " . + "Assuming it emulates GCC gcov version 4.2.\n"); - return 0x040200; + return 0x040200; } - # Return a readable version of encoded gcov version. sub version_to_str($) { - my ($ver) = @_; - my ($a, $b, $c); + my $ver = shift; - $a = $ver >> 16 & 0xff; - $b = $ver >> 8 & 0xff; - $c = $ver & 0xff; + my $a = $ver >> 16 & 0xff; + my $b = $ver >> 8 & 0xff; + my $c = $ver & 0xff; - return "$a.$b.$c"; + return "$a.$b.$c"; } - # # Get the GCOV tool version. Return an integer number which represents the # GCOV version. Version numbers can be compared using standard integer @@ -2647,80 +3041,75 @@ sub version_to_str($) sub get_gcov_version() { - local *HANDLE; - my $version_string; - my $result; - my ($a, $b, $c) = (4, 2, 0); # Fallback version - - # Examples for gcov version output: - # - # gcov (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - # - # gcov (crosstool-NG 1.18.0) 4.7.2 - # - # LLVM (http://llvm.org/): - # LLVM version 3.4svn - # - # Apple LLVM version 8.0.0 (clang-800.0.38) - # Optimized build. - # Default target: x86_64-apple-darwin16.0.0 - # Host CPU: haswell - - open(GCOV_PIPE, "-|", "$gcov_tool --version") - or die("ERROR: cannot retrieve gcov version!\n"); - local $/; - $version_string = ; - close(GCOV_PIPE); - - # Remove all bracketed information - $version_string =~ s/\([^\)]*\)//g; - - if ($version_string =~ /(\d+)\.(\d+)(\.(\d+))?/) { - ($a, $b, $c) = ($1, $2, $4); - $c = 0 if (!defined($c)); - } else { - warn("WARNING: cannot determine gcov version - ". - "assuming $a.$b.$c\n"); - } - $result = $a << 16 | $b << 8 | $c; - - if ($version_string =~ /LLVM/) { - $result = map_llvm_version($result); - info("Found LLVM gcov version $a.$b.$c, which emulates gcov ". - "version ".version_to_str($result)."\n"); - } else { - info("Found gcov version: ".version_to_str($result)."\n"); - } - - return ($result, $version_string); + local *HANDLE; + my ($a, $b, $c) = (4, 2, 0); # Fallback version + + # Examples for gcov version output: + # + # gcov (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) + # + # gcov (crosstool-NG 1.18.0) 4.7.2 + # + # LLVM (http://llvm.org/): + # LLVM version 3.4svn + # + # Apple LLVM version 8.0.0 (clang-800.0.38) + # Optimized build. + # Default target: x86_64-apple-darwin16.0.0 + # Host CPU: haswell + + if ($lcovutil::verbose) { + my $which = which($gcov_tool[0]); + lcovutil::info("gcov is '$which'\n"); + } + + open(GCOV_PIPE, "-|", "\"$gcov_tool[0]\" --version") or + die("cannot retrieve gcov version: $!\n"); + local $/; + my $version_string = ; + close(GCOV_PIPE) or die("unable to close gcov pipe: $!\n"); + + # Remove all bracketed information + $version_string =~ s/\([^\)]*\)//g; + + if ($version_string =~ /(\d+)\.(\d+)(\.(\d+))?/) { + ($a, $b, $c) = ($1, $2, $4); + $c = 0 if (!defined($c)); + $version_string = (split('\n', $version_string))[0]; + chomp($version_string); + } else { + warn("cannot determine gcov version - assuming $a.$b.$c\n"); + } + my $result = $a << 16 | $b << 8 | $c; + + if ($version_string =~ /LLVM/) { + $result = map_llvm_version($result); + info("Found LLVM gcov version $a.$b.$c, which emulates gcov " . + "version " . version_to_str($result) . "\n"); + } else { + info("Found gcov version: " . version_to_str($result) . "\n"); + } + + return ($result, $version_string); } - # # info(printf_parameter) # -# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag -# is not set. +# Use printf to write PRINTF_PARAMETER to stdout only when not --quiet # -sub info(@) +sub my_info(@) { - if (!$quiet) - { - # Print info string - if (defined($output_filename) && ($output_filename eq "-")) - { - # Don't interfere with the .info output to STDOUT - printf(STDERR @_); - } - else - { - printf(@_); - } - } + # Print info string + if (defined($output_filename) && ($output_filename eq "-")) { + # Don't interfere with the .info output to STDOUT + printf(STDERR @_); + } else { + printf(@_); + } } - # # int_handler() # @@ -2729,534 +3118,119 @@ sub info(@) sub int_handler() { - if ($cwd) { chdir($cwd); } - info("Aborted.\n"); - exit(1); -} - - -# -# system_no_output(mode, parameters) -# -# Call an external program using PARAMETERS while suppressing depending on -# the value of MODE: -# -# MODE & 1: suppress STDOUT -# MODE & 2: suppress STDERR -# MODE & 4: redirect to temporary files instead of suppressing -# -# Return (stdout, stderr, rc): -# stdout: path to tempfile containing stdout or undef -# stderr: path to tempfile containing stderr or undef -# 0 on success, non-zero otherwise -# - -sub system_no_output($@) -{ - my $mode = shift; - my $result; - local *OLD_STDERR; - local *OLD_STDOUT; - my $stdout_file; - my $stderr_file; - my $fd; - - # Save old stdout and stderr handles - ($mode & 1) && open(OLD_STDOUT, ">>&", "STDOUT"); - ($mode & 2) && open(OLD_STDERR, ">>&", "STDERR"); - - if ($mode & 4) { - # Redirect to temporary files - if ($mode & 1) { - ($fd, $stdout_file) = tempfile(UNLINK => 1); - open(STDOUT, ">", $stdout_file) || warn("$!\n"); - close($fd); - } - if ($mode & 2) { - ($fd, $stderr_file) = tempfile(UNLINK => 1); - open(STDERR, ">", $stderr_file) || warn("$!\n"); - close($fd); - } - } else { - # Redirect to /dev/null - ($mode & 1) && open(STDOUT, ">", "/dev/null"); - ($mode & 2) && open(STDERR, ">", "/dev/null"); - } - - debug("system(".join(' ', @_).")\n"); - system(@_); - $result = $?; - - # Close redirected handles - ($mode & 1) && close(STDOUT); - ($mode & 2) && close(STDERR); - - # Restore old handles - ($mode & 1) && open(STDOUT, ">>&", "OLD_STDOUT"); - ($mode & 2) && open(STDERR, ">>&", "OLD_STDERR"); - - # Remove empty output files - if (defined($stdout_file) && -z $stdout_file) { - unlink($stdout_file); - $stdout_file = undef; - } - if (defined($stderr_file) && -z $stderr_file) { - unlink($stderr_file); - $stderr_file = undef; - } - - return ($stdout_file, $stderr_file, $result); -} - - -# -# read_config(filename) -# -# Read configuration file FILENAME and return a reference to a hash containing -# all valid key=value pairs found. -# - -sub read_config($) -{ - my $filename = $_[0]; - my %result; - my $key; - my $value; - local *HANDLE; - - if (!open(HANDLE, "<", $filename)) - { - warn("WARNING: cannot read configuration file $filename\n"); - return undef; - } - while () - { - chomp; - # Skip comments - s/#.*//; - # Remove leading blanks - s/^\s+//; - # Remove trailing blanks - s/\s+$//; - next unless length; - ($key, $value) = split(/\s*=\s*/, $_, 2); - if (defined($key) && defined($value)) - { - $result{$key} = $value; - } - else - { - warn("WARNING: malformed statement in line $. ". - "of configuration file $filename\n"); - } - } - close(HANDLE); - return \%result; + if ($cwd) { chdir($cwd); } + info("Aborted.\n"); + exit(1); } - -# -# apply_config(REF) -# -# REF is a reference to a hash containing the following mapping: -# -# key_string => var_ref -# -# where KEY_STRING is a keyword and VAR_REF is a reference to an associated -# variable. If the global configuration hashes CONFIG or OPT_RC contain a value -# for keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. -# - -sub apply_config($) -{ - my $ref = $_[0]; - - foreach (keys(%{$ref})) - { - if (defined($opt_rc{$_})) { - ${$ref->{$_}} = $opt_rc{$_}; - } elsif (defined($config->{$_})) { - ${$ref->{$_}} = $config->{$_}; - } - } -} - - -# -# get_source_data(filename) -# -# Scan specified source code file for exclusion markers and checksums. Return -# ( excl, brexcl, checksums ) where -# excl: lineno -> 1 for all lines for which to exclude all data -# brexcl: lineno -> 1 for all lines for which to exclude branch data -# checksums: lineno -> source code checksum -# - -sub get_source_data($) -{ - my ($filename) = @_; - my %list; - my $flag = 0; - my %brdata; - my $brflag = 0; - my $exceptionbrflag = 0; - my %checksums; - local *HANDLE; - - if (!open(HANDLE, "<", $filename)) { - warn("WARNING: could not open $filename\n"); - return; - } - while () { - if (/$EXCL_STOP/) { - $flag = 0; - } elsif (/$EXCL_START/) { - $flag = 1; - } - if (/$excl_line/ || $flag) { - $list{$.} = 1; - } - if (/$EXCL_BR_STOP/) { - $brflag = 0; - } elsif (/$EXCL_BR_START/) { - $brflag = 1; - } - if (/$EXCL_EXCEPTION_BR_STOP/) { - $exceptionbrflag = 0; - } elsif (/$EXCL_EXCEPTION_BR_START/) { - $exceptionbrflag = 1; - } - if (/$excl_br_line/ || $brflag) { - $brdata{$.} = 1; - } elsif (/$excl_exception_br_line/ || $exceptionbrflag) { - $brdata{$.} = 2; - } - if ($checksum) { - chomp(); - $checksums{$.} = md5_base64($_); - } - if ($intermediate && !$gcov_caps->{'json-format'} && - /($EXCL_EXCEPTION_BR_STOP|$EXCL_EXCEPTION_BR_START|$excl_exception_br_line)/) { - warn("WARNING: $1 found at $filename:$. but branch exceptions ". - "exclusion is not supported when using text intermediate ". - "format\n"); - } - } - close(HANDLE); - - if ($flag || $brflag || $exceptionbrflag) { - warn("WARNING: unterminated exclusion section in $filename\n"); - } - - return (\%list, \%brdata, \%checksums); -} - - -# -# get_all_source_data(filenames) -# -# Scan specified source code files for exclusion markers and return -# filename -> [ excl, brexcl, checksums ] -# excl: lineno -> 1 for all lines for which to exclude all data -# brexcl: lineno -> 1 for all lines for which to exclude branch data -# checksums: lineno -> source code checksum -# - -sub get_all_source_data(@) -{ - my @filenames = @_; - my %data; - my $failed = 0; - - for my $filename (@filenames) { - my @d; - next if (exists($data{$filename})); - - @d = get_source_data($filename); - if (@d) { - $data{$filename} = [ @d ]; - } else { - $failed = 1; - } - } - - if ($failed) { - warn("WARNING: some exclusion markers may be ignored\n"); - } - - return \%data; -} - - -# -# apply_exclusion_data(instr, graph) -# -# Remove lines from instr and graph data structures which are marked -# for exclusion in the source code file. -# -# Return adjusted (instr, graph). -# -# graph : file name -> function data -# function data : function name -> line data -# line data : [ line1, line2, ... ] -# -# instr : filename -> line data -# line data : [ line1, line2, ... ] -# - -sub apply_exclusion_data($$) -{ - my ($instr, $graph) = @_; - my $filename; - my $excl_data; - - ($excl_data) = get_all_source_data(keys(%{$graph}), keys(%{$instr})); - - # Skip if no markers were found - return ($instr, $graph) if (!%$excl_data); - - # Apply exclusion marker data to graph - foreach $filename (keys(%$excl_data)) { - my $function_data = $graph->{$filename}; - my $excl = $excl_data->{$filename}->[0]; - my $function; - - next if (!defined($function_data)); - - foreach $function (keys(%{$function_data})) { - my $line_data = $function_data->{$function}; - my $line; - my @new_data; - - # To be consistent with exclusion parser in non-initial - # case we need to remove a function if the first line - # was excluded - if ($excl->{$line_data->[0]}) { - delete($function_data->{$function}); - next; - } - # Copy only lines which are not excluded - foreach $line (@{$line_data}) { - push(@new_data, $line) if (!$excl->{$line}); - } - - # Store modified list - if (scalar(@new_data) > 0) { - $function_data->{$function} = \@new_data; - } else { - # All of this function was excluded - delete($function_data->{$function}); - } - } - - # Check if all functions of this file were excluded - if (keys(%{$function_data}) == 0) { - delete($graph->{$filename}); - } - } - - # Apply exclusion marker data to instr - foreach $filename (keys(%$excl_data)) { - my $line_data = $instr->{$filename}; - my $excl = $excl_data->{$filename}->[0]; - my $line; - my @new_data; - - next if (!defined($line_data)); - - # Copy only lines which are not excluded - foreach $line (@{$line_data}) { - push(@new_data, $line) if (!$excl->{$line}); - } - - # Store modified list - $instr->{$filename} = \@new_data; - } - - return ($instr, $graph); -} - - sub process_graphfile($$) { - my ($file, $dir) = @_; - my $graph_filename = $file; - my $graph_dir; - my $graph_basename; - my $source_dir; - my $base_dir; - my $graph; - my $instr; - my $filename; - local *INFO_HANDLE; - - info("Processing %s\n", abs2rel($file, $dir)); - - # Get path to data file in absolute and normalized form (begins with /, - # contains no more ../ or ./) - $graph_filename = solve_relative_path($cwd, $graph_filename); - - # Get directory and basename of data file - ($graph_dir, $graph_basename) = split_filename($graph_filename); - - $source_dir = $graph_dir; - if (is_compat($COMPAT_MODE_LIBTOOL)) { - # Avoid files from .libs dirs - $source_dir =~ s/\.libs$//; - } - - # Construct base_dir for current file - if ($base_directory) - { - $base_dir = $base_directory; - } - else - { - $base_dir = $source_dir; - } - - # Ignore empty graph file (e.g. source file with no statement) - if (-z $graph_filename) - { - warn("WARNING: empty $graph_filename (skipped)\n"); - return; - } - - if ($gcov_version < $GCOV_VERSION_3_4_0) - { - if (is_compat($COMPAT_MODE_HAMMER)) - { - ($instr, $graph) = read_bbg($graph_filename); - } - else - { - ($instr, $graph) = read_bb($graph_filename); - } - } - else - { - ($instr, $graph) = read_gcno($graph_filename); - } - - # Try to find base directory automatically if requested by user - if ($rc_auto_base) { - $base_dir = find_base_from_source($base_dir, - [ keys(%{$instr}), keys(%{$graph}) ]); - } - - adjust_source_filenames($instr, $base_dir); - adjust_source_filenames($graph, $base_dir); - - if (!$no_markers) { - # Apply exclusion marker data to graph file data - ($instr, $graph) = apply_exclusion_data($instr, $graph); - } - - # Check whether we're writing to a single file - if ($output_filename) - { - if ($output_filename eq "-") - { - *INFO_HANDLE = *STDOUT; - } - else - { - # Append to output file - open(INFO_HANDLE, ">>", $output_filename) - or die("ERROR: cannot write to ". - "$output_filename!\n"); - } - } - else - { - # Open .info file for output - open(INFO_HANDLE, ">", "$graph_filename.info") - or die("ERROR: cannot create $graph_filename.info!\n"); - } - - # Write test name - printf(INFO_HANDLE "TN:%s\n", $test_name); - foreach $filename (sort(keys(%{$instr}))) - { - my $funcdata = $graph->{$filename}; - my $line; - my $linedata; - - # Skip external files if requested - if (!$opt_external) { - if (is_external($filename)) { - info(" ignoring data for external file ". - "$filename\n"); - next; - } - } - - print(INFO_HANDLE "SF:$filename\n"); - - if (defined($funcdata) && $func_coverage) { - my @functions = sort {$funcdata->{$a}->[0] <=> - $funcdata->{$b}->[0]} - keys(%{$funcdata}); - my $func; - - # Gather list of instrumented lines and functions - foreach $func (@functions) { - $linedata = $funcdata->{$func}; - - # Print function name and starting line - print(INFO_HANDLE "FN:".$linedata->[0]. - ",".filter_fn_name($func)."\n"); - } - # Print zero function coverage data - foreach $func (@functions) { - print(INFO_HANDLE "FNDA:0,". - filter_fn_name($func)."\n"); - } - # Print function summary - print(INFO_HANDLE "FNF:".scalar(@functions)."\n"); - print(INFO_HANDLE "FNH:0\n"); - } - # Print zero line coverage data - foreach $line (@{$instr->{$filename}}) { - print(INFO_HANDLE "DA:$line,0\n"); - } - # Print line summary - print(INFO_HANDLE "LF:".scalar(@{$instr->{$filename}})."\n"); - print(INFO_HANDLE "LH:0\n"); - - print(INFO_HANDLE "end_of_record\n"); - } - if (!($output_filename && ($output_filename eq "-"))) - { - close(INFO_HANDLE); - } -} - -sub filter_fn_name($) -{ - my ($fn) = @_; - - # Remove characters used internally as function name delimiters - $fn =~ s/[,=]/_/g; - - return $fn; -} - -sub warn_handler($) -{ - my ($msg) = @_; - - warn("$tool_name: $msg"); + my ($dirname, $file) = @_; + my $graph_filename = $file; + my $source_dir; + my $base_dir; + + # Get path to data file in absolute and normalized form (begins with /, + # contains no more ../ or ./) + $graph_filename = solve_relative_path($cwd, $graph_filename); + + # Get directory and basename of data file + my ($graph_dir, $graph_basename) = split_filename($graph_filename); + + $source_dir = $graph_dir; + if (is_compat($COMPAT_MODE_LIBTOOL)) { + # Avoid files from .libs dirs + $source_dir =~ s/\.libs$//; + } + + # Construct base_dir for current file + if ($base_directory) { + $base_dir = $base_directory; + } else { + $base_dir = $source_dir; + } + + # Ignore empty graph file (e.g. source file with no statement) + if (-z $graph_filename) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "empty $graph_filename"); + return undef; + } + + my ($instr, $graph) = read_gcno($graph_filename); + + # Try to find base directory automatically if requested by user + if ($lcovutil::rc_auto_base) { + $base_dir = find_base_from_source($base_dir, + [keys(%{$instr}), keys(%{$graph})]); + } + + adjust_source_filenames($instr, $base_dir); + adjust_source_filenames($graph, $base_dir); + + my $traceFile = TraceFile->new(); + foreach my $filename (sort(keys(%{$instr}))) { + my $funcdata = $graph->{$filename}; + my $line; + my $linedata; + + # Skip external files if requested + if (is_external($filename)) { + info(" ignoring data for external file $filename\n"); + next; + } + # there is no meaningful parse location for this data + my $fileData = $traceFile->data($filename); + my $functionMap = $fileData->testfnc($test_name); + my $lineMap = $fileData->test($test_name); + + if (@lcovutil::extractVersionScript) { + my $version = lcovutil::extractFileVersion($filename); + $fileData->version($version) + if (defined($version) && $version ne ""); + } + if (defined($funcdata) && $lcovutil::func_coverage) { + my @functions = + sort({ $funcdata->{$a}->[0] <=> $funcdata->{$b}->[0] } + keys(%{$funcdata})); + # Gather list of instrumented lines and functions + foreach my $func (@functions) { + $linedata = $funcdata->{$func}; + my $lineNo = $linedata->[0]; + my $fnName = + filter_fn_name($func, defined($lcovutil::demangle_cpp_cmd)); + my $func = $functionMap->define_function($fnName, $lineNo); + $func->addAlias($fnName, 0); + } + } + # Print zero line coverage data + foreach $line (@{$instr->{$filename}}) { + $lineMap->append($line, 0); + } + # now go through lines, functions, branches - append to test_name data + $fileData->sum()->union($lineMap); + $fileData->func()->union($functionMap); + } + return $traceFile; } -sub die_handler($) +sub filter_fn_name($$) { - my ($msg) = @_; - - die("$tool_name: $msg"); + my ($fn, $demangle) = @_; + my $f; + if ($demangle) { + $f = `$lcovutil::demangle_cpp_cmd $fn`; + chomp($f); + die("unable to demangle '$fn': $!") if ($?); + } else { + $f = $fn; + } + # Remove characters used internally as function name delimiters + $f =~ s/[,=]/_/g; + + return $f; } - # # graph_error(filename, message) # @@ -3266,31 +3240,14 @@ sub die_handler($) sub graph_error($$) { - my ($filename, $msg) = @_; - - if ($ignore[$ERROR_GRAPH]) { - warn("WARNING: $filename: $msg - skipping\n"); - return; - } - die("ERROR: $filename: $msg\n"); -} - -# -# graph_expect(description) -# -# If debug is set to a non-zero value, print the specified description of what -# is expected to be read next from the graph file. -# - -sub graph_expect($) -{ - my ($msg) = @_; - - if (!$debug || !defined($msg)) { - return; - } - - print(STDERR "DEBUG: expecting $msg\n"); + my ($filename, $msg) = @_; + + ignorable_error($ERROR_GRAPH, + "$filename: $msg" + . + (lcovutil::is_ignored($ERROR_GRAPH) ? ' - skipping' : '' + ) . + '.'); } # @@ -3303,51 +3260,54 @@ sub graph_expect($) sub graph_read(*$;$$) { - my ($handle, $length, $desc, $peek) = @_; - my $data; - my $result; - my $pos; - - graph_expect($desc); - if ($peek) { - $pos = tell($handle); - if ($pos == -1) { - warn("Could not get current file position: $!\n"); - return undef; - } - } - $result = read($handle, $data, $length); - if ($debug) { - my $op = $peek ? "peek" : "read"; - my $ascii = ""; - my $hex = ""; - my $i; - - print(STDERR "DEBUG: $op($length)=$result: "); - for ($i = 0; $i < length($data); $i++) { - my $c = substr($data, $i, 1);; - my $n = ord($c); - - $hex .= sprintf("%02x ", $n); - if ($n >= 32 && $n <= 127) { - $ascii .= $c; - } else { - $ascii .= "."; - } - } - print(STDERR "$hex |$ascii|"); - print(STDERR "\n"); - } - if ($peek) { - if (!seek($handle, $pos, 0)) { - warn("Could not set file position: $!\n"); - return undef; - } - } - if ($result != $length) { - return undef; - } - return $data; + my ($handle, $length, $desc, $peek) = @_; + my $data; + my $pos; + + lcovutil::debug(2, $desc); + if ($peek) { + $pos = tell($handle); + if ($pos == -1) { + lcovutil::ignorable_error($lcovutil::ERROR_CORRUPT, + "Could not get current file position: $!"); + return undef; + } + } + my $result = read($handle, $data, $length); + # LCOV_EXCL_START + if ($debug && + $debug >= 2) { + my $op = $peek ? "peek" : "read"; + my $ascii = ""; + my $hex = ""; + my $i; + + my $msg = "$op($length)=$result: "; + for ($i = 0; $i < length($data); $i++) { + my $c = substr($data, $i, 1); + my $n = ord($c); + + $hex .= sprintf("%02x ", $n); + if ($n >= 32 && $n <= 127) { + $ascii .= $c; + } else { + $ascii .= "."; + } + } + lcovutil::debug(2, "$msg$hex |$ascii|\n"); + } + # LCOV_EXCL_STOP + if ($peek) { + if (!seek($handle, $pos, 0)) { + lcovutil::ignorable_error($lcovutil::ERROR_CORRUPT, + "Could not set file position: $!"); + return undef; + } + } + if ($result != $length) { + return undef; + } + return $data; } # @@ -3359,12 +3319,9 @@ sub graph_read(*$;$$) sub graph_skip(*$;$) { - my ($handle, $length, $desc) = @_; + my ($handle, $length, $desc) = @_; - if (defined(graph_read($handle, $length, $desc))) { - return 1; - } - return 0; + return defined(graph_read($handle, $length, $desc)); } # @@ -3375,17 +3332,16 @@ sub graph_skip(*$;$) sub uniq(@) { - my (@list) = @_; - my @new_list; - my %known; + my @new_list; + my %known; - foreach my $item (@list) { - next if ($known{$item}); - $known{$item} = 1; - push(@new_list, $item); - } + foreach my $item (@_) { + next if ($known{$item}); + $known{$item} = 1; + push(@new_list, $item); + } - return @new_list; + return @new_list; } # @@ -3396,30 +3352,12 @@ sub uniq(@) sub sort_uniq(@) { - my (@list) = @_; - my %hash; + my %hash; - foreach (@list) { - $hash{$_} = 1; - } - return sort { $a <=> $b } keys(%hash); -} - -# -# sort_uniq_lex(list) -# -# Return list in lexically ascending order and without duplicate entries. -# - -sub sort_uniq_lex(@) -{ - my (@list) = @_; - my %hash; - - foreach (@list) { - $hash{$_} = 1; - } - return sort keys(%hash); + foreach (@_) { + $hash{$_} = 1; + } + return sort { $a <=> $b } keys(%hash); } # @@ -3431,13 +3369,13 @@ sub sort_uniq_lex(@) sub parent_dir($) { - my ($dir) = @_; - my ($v, $d, $f) = splitpath($dir, 1); - my @dirs = splitdir($d); + my $dir = shift; + my ($v, $d, $f) = splitpath($dir, 1); + my @dirs = splitdir($d); - pop(@dirs); + pop(@dirs); - return catpath($v, catdir(@dirs), $f); + return catpath($v, catdir(@dirs), $f); } # @@ -3457,49 +3395,49 @@ sub parent_dir($) sub find_base_from_source($$) { - my ($base_dir, $source_files) = @_; - my $old_base; - my $best_miss; - my $best_base; - my %rel_files; + my ($base_dir, $source_files) = @_; + my $old_base; + my $best_miss; + my $best_base; + my %rel_files; - # Determine list of relative paths - foreach my $filename (@$source_files) { - next if (file_name_is_absolute($filename)); + # Determine list of relative paths + foreach my $filename (@$source_files) { + next if (file_name_is_absolute($filename)); - $rel_files{$filename} = 1; - } + $rel_files{$filename} = 1; + } - # Early exit if there are no relative paths - return $base_dir if (!%rel_files); + # Early exit if there are no relative paths + return $base_dir if (!%rel_files); - do { - my $miss = 0; + do { + my $miss = 0; - foreach my $filename (keys(%rel_files)) { - if (!-e solve_relative_path($base_dir, $filename)) { - $miss++; - } - } + foreach my $filename (keys(%rel_files)) { + if (!-e solve_relative_path($base_dir, $filename)) { + $miss++; + } + } - debug("base_dir=$base_dir miss=$miss\n"); + debug("base_dir=$base_dir miss=$miss\n"); - # Exit if we find an exact match with no misses - return $base_dir if ($miss == 0); + # Exit if we find an exact match with no misses + return $base_dir if ($miss == 0); - # No exact match, aim for the one with the least source file - # misses - if (!defined($best_base) || $miss < $best_miss) { - $best_base = $base_dir; - $best_miss = $miss; - } + # No exact match, aim for the one with the least source file + # misses + if (!defined($best_base) || $miss < $best_miss) { + $best_base = $base_dir; + $best_miss = $miss; + } - # Repeat until there's no more parent directory - $old_base = $base_dir; - $base_dir = parent_dir($base_dir); - } while ($old_base ne $base_dir); + # Repeat until there's no more parent directory + $old_base = $base_dir; + $base_dir = parent_dir($base_dir); + } while ($old_base ne $base_dir); - return $best_base; + return $best_base; } # @@ -3511,26 +3449,26 @@ sub find_base_from_source($$) sub adjust_source_filenames($$$) { - my ($hash, $base_dir) = @_; + my ($hash, $base_dir) = @_; - foreach my $filename (keys(%{$hash})) { - my $old_filename = $filename; + foreach my $filename (keys(%{$hash})) { + my $old_filename = $filename; - # Convert to absolute canonical form - $filename = solve_relative_path($base_dir, $filename); + # Convert to absolute canonical form + $filename = solve_relative_path($base_dir, $filename); - # Apply adjustment - if (defined($adjust_src_pattern)) { - $filename =~ s/$adjust_src_pattern/$adjust_src_replace/g; - } + # Apply adjustment + $filename = ReadCurrentSource::resolve_path($filename, 1); + if ($lcovutil::opt_follow && $lcovutil::opt_follow_file_links) { + $filename = Cwd::realpath($filename); + } - if ($filename ne $old_filename) { - $hash->{$filename} = delete($hash->{$old_filename}); - } - } + if ($filename ne $old_filename) { + $hash->{$filename} = delete($hash->{$old_filename}); + } + } } - # # filter_source_files(hash) # @@ -3539,36 +3477,23 @@ sub adjust_source_filenames($$$) sub filter_source_files($) { - my ($hash) = @_; - - foreach my $filename (keys(%{$hash})) { - # Skip external files if requested - goto del if (!$opt_external && is_external($filename)); - - # Apply include patterns - if (@include_patterns) { - my $keep; - - foreach my $pattern (@include_patterns) { - if ($filename =~ (/^$pattern$/)) { - $keep = 1; - last; - } - } - goto del if (!$keep); - } - - # Apply exclude patterns - foreach my $pattern (@exclude_patterns) { - goto del if ($filename =~ (/^$pattern$/)); - } - next; - -del: - # Remove file data - delete($hash->{$filename}); - $excluded_files{$filename} = 1; - } + my $hash = shift; + + foreach my $filename (keys(%{$hash})) { + # Skip external files if requested + my $external = is_external($filename); + if ($external || + TraceFile::skipCurrentFile($filename)) { + if ($external) { + lcovutil::info("Dropping 'external' file '$filename'\n"); + } else { + lcovutil::info("Excluding file '$filename'\n"); + } + # Remove file data + delete($hash->{$filename}); + $lcovutil::excluded_files{$filename} = 1; + } + } } # @@ -3580,29 +3505,29 @@ del: sub graph_cleanup($) { - my ($graph) = @_; - my $filename; - - foreach $filename (keys(%{$graph})) { - my $per_file = $graph->{$filename}; - my $function; - - foreach $function (keys(%{$per_file})) { - my $lines = $per_file->{$function}; - - if (scalar(@$lines) == 0) { - # Remove empty function - delete($per_file->{$function}); - next; - } - # Normalize list - $per_file->{$function} = [ uniq(@$lines) ]; - } - if (scalar(keys(%{$per_file})) == 0) { - # Remove empty file - delete($graph->{$filename}); - } - } + my $graph = shift; + my $filename; + + foreach $filename (keys(%{$graph})) { + my $per_file = $graph->{$filename}; + my $function; + + foreach $function (keys(%{$per_file})) { + my $lines = $per_file->{$function}; + + if (scalar(@$lines) == 0) { + # Remove empty function + delete($per_file->{$function}); + next; + } + # Normalize list + $per_file->{$function} = [uniq(@$lines)]; + } + if (scalar(keys(%{$per_file})) == 0) { + # Remove empty file + delete($graph->{$filename}); + } + } } # @@ -3614,43 +3539,36 @@ sub graph_cleanup($) sub graph_find_base($) { - my ($bb) = @_; - my %file_count; - my $basefile; - my $file; - my $func; - my $filedata; - my $count; - my $num; - - # Identify base name for this bb data. - foreach $func (keys(%{$bb})) { - $filedata = $bb->{$func}; - - foreach $file (keys(%{$filedata})) { - $count = $file_count{$file}; - - # Count file occurrence - $file_count{$file} = defined($count) ? $count + 1 : 1; - } - } - $count = 0; - $num = 0; - foreach $file (keys(%file_count)) { - if ($file_count{$file} > $count) { - # The file that contains code for the most functions - # is likely the base file - $count = $file_count{$file}; - $num = 1; - $basefile = $file; - } elsif ($file_count{$file} == $count) { - # If more than one file could be the basefile, we - # don't have a basefile - $basefile = undef; - } - } - - return $basefile; + my $bb = shift; + my %file_count; + my $basefile; + + # Identify base name for this bb data. + foreach my $func (keys(%{$bb})) { + my $filedata = $bb->{$func}; + + foreach my $file (keys(%{$filedata})) { + my $count = $file_count{$file}; + + # Count file occurrence + $file_count{$file} = defined($count) ? $count + 1 : 1; + } + } + my $count = 0; + foreach my $file (keys(%file_count)) { + if ($file_count{$file} > $count) { + # The file that contains code for the most functions + # is likely the base file + $count = $file_count{$file}; + $basefile = $file; + } elsif ($file_count{$file} == $count) { + # If more than one file could be the basefile, we + # don't have a basefile + $basefile = undef; + } + } + + return $basefile; } # @@ -3680,48 +3598,43 @@ sub graph_find_base($) sub graph_from_bb($$$$) { - my ($bb, $fileorder, $bb_filename, $fileorder_first) = @_; - my $graph = {}; - my $instr = {}; - my $basefile; - my $file; - my $func; - my $filedata; - my $linedata; - my $order; - - $basefile = graph_find_base($bb); - # Create graph structure - foreach $func (keys(%{$bb})) { - $filedata = $bb->{$func}; - $order = $fileorder->{$func}; - - # Account for lines in functions - if (defined($basefile) && defined($filedata->{$basefile}) && - !$fileorder_first) { - # If the basefile contributes to this function, - # account this function to the basefile. - $graph->{$basefile}->{$func} = $filedata->{$basefile}; - } else { - # If the basefile does not contribute to this function, - # account this function to the first file contributing - # lines. - $graph->{$order->[0]}->{$func} = - $filedata->{$order->[0]}; - } - - foreach $file (keys(%{$filedata})) { - # Account for instrumented lines - $linedata = $filedata->{$file}; - push(@{$instr->{$file}}, @$linedata); - } - } - # Clean up array of instrumented lines - foreach $file (keys(%{$instr})) { - $instr->{$file} = [ sort_uniq(@{$instr->{$file}}) ]; - } - - return ($instr, $graph); + my ($bb, $fileorder, $bb_filename, $fileorder_first) = @_; + my $graph = {}; + my $instr = {}; + + my $basefile = graph_find_base($bb); + # Create graph structure + foreach my $func (keys(%{$bb})) { + my $filedata = $bb->{$func}; + my $order = $fileorder->{$func}; + + # Account for lines in functions + if (defined($basefile) && + defined($filedata->{$basefile}) && + !$fileorder_first) { + # If the basefile contributes to this function, + # account this function to the basefile. + $graph->{$basefile}->{$func} = $filedata->{$basefile}; + } else { + # If the basefile does not contribute to this function, + # account this function to the first file contributing + # lines. + $graph->{$order->[0]}->{$func} = + $filedata->{$order->[0]}; + } + + foreach my $file (keys(%{$filedata})) { + # Account for instrumented lines + my $linedata = $filedata->{$file}; + push(@{$instr->{$file}}, @$linedata); + } + } + # Clean up array of instrumented lines + foreach my $file (keys(%{$instr})) { + $instr->{$file} = [sort_uniq(@{$instr->{$file}})]; + } + + return ($instr, $graph); } # @@ -3732,338 +3645,16 @@ sub graph_from_bb($$$$) sub graph_add_order($$$) { - my ($fileorder, $function, $filename) = @_; - my $item; - my $list; - - $list = $fileorder->{$function}; - foreach $item (@$list) { - if ($item eq $filename) { - return; - } - } - push(@$list, $filename); - $fileorder->{$function} = $list; -} - -# -# read_bb_word(handle[, description]) -# -# Read and return a word in .bb format from handle. -# - -sub read_bb_word(*;$) -{ - my ($handle, $desc) = @_; - - return graph_read($handle, 4, $desc); -} - -# -# read_bb_value(handle[, description]) -# -# Read a word in .bb format from handle and return the word and its integer -# value. -# - -sub read_bb_value(*;$) -{ - my ($handle, $desc) = @_; - my $word; - - $word = read_bb_word($handle, $desc); - return undef if (!defined($word)); - - return ($word, unpack("V", $word)); -} - -# -# read_bb_string(handle, delimiter) -# -# Read and return a string in .bb format from handle up to the specified -# delimiter value. -# - -sub read_bb_string(*$) -{ - my ($handle, $delimiter) = @_; - my $word; - my $value; - my $string = ""; - - graph_expect("string"); - do { - ($word, $value) = read_bb_value($handle, "string or delimiter"); - return undef if (!defined($value)); - if ($value != $delimiter) { - $string .= $word; - } - } while ($value != $delimiter); - $string =~ s/\0//g; - - return $string; -} - -# -# read_bb(filename) -# -# Read the contents of the specified .bb file and return (instr, graph), where: -# -# instr : filename -> line data -# line data : [ line1, line2, ... ] -# -# graph : filename -> file_data -# file_data : function name -> line_data -# line_data : [ line1, line2, ... ] -# -# See the gcov info pages of gcc 2.95 for a description of the .bb file format. -# - -sub read_bb($) -{ - my ($bb_filename) = @_; - my $minus_one = 0x80000001; - my $minus_two = 0x80000002; - my $value; - my $filename; - my $function; - my $bb = {}; - my $fileorder = {}; - my $instr; - my $graph; - local *HANDLE; - - open(HANDLE, "<", $bb_filename) or goto open_error; - binmode(HANDLE); - while (!eof(HANDLE)) { - $value = read_bb_value(*HANDLE, "data word"); - goto incomplete if (!defined($value)); - if ($value == $minus_one) { - # Source file name - graph_expect("filename"); - $filename = read_bb_string(*HANDLE, $minus_one); - goto incomplete if (!defined($filename)); - } elsif ($value == $minus_two) { - # Function name - graph_expect("function name"); - $function = read_bb_string(*HANDLE, $minus_two); - goto incomplete if (!defined($function)); - } elsif ($value > 0) { - # Line number - if (!defined($filename) || !defined($function)) { - warn("WARNING: unassigned line number ". - "$value\n"); - next; - } - push(@{$bb->{$function}->{$filename}}, $value); - graph_add_order($fileorder, $function, $filename); - } - } - close(HANDLE); - - ($instr, $graph) = graph_from_bb($bb, $fileorder, $bb_filename, 0); - graph_cleanup($graph); - - return ($instr, $graph); - -open_error: - graph_error($bb_filename, "could not open file"); - return undef; -incomplete: - graph_error($bb_filename, "reached unexpected end of file"); - return undef; -} - -# -# read_bbg_word(handle[, description]) -# -# Read and return a word in .bbg format. -# - -sub read_bbg_word(*;$) -{ - my ($handle, $desc) = @_; - - return graph_read($handle, 4, $desc); -} - -# -# read_bbg_value(handle[, description]) -# -# Read a word in .bbg format from handle and return its integer value. -# - -sub read_bbg_value(*;$) -{ - my ($handle, $desc) = @_; - my $word; - - $word = read_bbg_word($handle, $desc); - return undef if (!defined($word)); - - return unpack("N", $word); -} - -# -# read_bbg_string(handle) -# -# Read and return a string in .bbg format. -# - -sub read_bbg_string(*) -{ - my ($handle, $desc) = @_; - my $length; - my $string; - - graph_expect("string"); - # Read string length - $length = read_bbg_value($handle, "string length"); - return undef if (!defined($length)); - if ($length == 0) { - return ""; - } - # Read string - $string = graph_read($handle, $length, "string"); - return undef if (!defined($string)); - # Skip padding - graph_skip($handle, 4 - $length % 4, "string padding") or return undef; - - return $string; -} - -# -# read_bbg_lines_record(handle, bbg_filename, bb, fileorder, filename, -# function) -# -# Read a bbg format lines record from handle and add the relevant data to -# bb and fileorder. Return filename on success, undef on error. -# - -sub read_bbg_lines_record(*$$$$$) -{ - my ($handle, $bbg_filename, $bb, $fileorder, $filename, $function) = @_; - my $string; - my $lineno; - - graph_expect("lines record"); - # Skip basic block index - graph_skip($handle, 4, "basic block index") or return undef; - while (1) { - # Read line number - $lineno = read_bbg_value($handle, "line number"); - return undef if (!defined($lineno)); - if ($lineno == 0) { - # Got a marker for a new filename - graph_expect("filename"); - $string = read_bbg_string($handle); - return undef if (!defined($string)); - # Check for end of record - if ($string eq "") { - return $filename; - } - $filename = $string; - if (!exists($bb->{$function}->{$filename})) { - $bb->{$function}->{$filename} = []; - } - next; - } - # Got an actual line number - if (!defined($filename)) { - warn("WARNING: unassigned line number in ". - "$bbg_filename\n"); - next; - } - push(@{$bb->{$function}->{$filename}}, $lineno); - graph_add_order($fileorder, $function, $filename); - } -} - -# -# read_bbg(filename) -# -# Read the contents of the specified .bbg file and return the following mapping: -# graph: filename -> file_data -# file_data: function name -> line_data -# line_data: [ line1, line2, ... ] -# -# See the gcov-io.h file in the SLES 9 gcc 3.3.3 source code for a description -# of the .bbg format. -# - -sub read_bbg($) -{ - my ($bbg_filename) = @_; - my $file_magic = 0x67626267; - my $tag_function = 0x01000000; - my $tag_lines = 0x01450000; - my $word; - my $tag; - my $length; - my $function; - my $filename; - my $bb = {}; - my $fileorder = {}; - my $instr; - my $graph; - local *HANDLE; - - open(HANDLE, "<", $bbg_filename) or goto open_error; - binmode(HANDLE); - # Read magic - $word = read_bbg_value(*HANDLE, "file magic"); - goto incomplete if (!defined($word)); - # Check magic - if ($word != $file_magic) { - goto magic_error; - } - # Skip version - graph_skip(*HANDLE, 4, "version") or goto incomplete; - while (!eof(HANDLE)) { - # Read record tag - $tag = read_bbg_value(*HANDLE, "record tag"); - goto incomplete if (!defined($tag)); - # Read record length - $length = read_bbg_value(*HANDLE, "record length"); - goto incomplete if (!defined($tag)); - if ($tag == $tag_function) { - graph_expect("function record"); - # Read function name - graph_expect("function name"); - $function = read_bbg_string(*HANDLE); - goto incomplete if (!defined($function)); - $filename = undef; - # Skip function checksum - graph_skip(*HANDLE, 4, "function checksum") - or goto incomplete; - } elsif ($tag == $tag_lines) { - # Read lines record - $filename = read_bbg_lines_record(HANDLE, $bbg_filename, - $bb, $fileorder, $filename, - $function); - goto incomplete if (!defined($filename)); - } else { - # Skip record contents - graph_skip(*HANDLE, $length, "unhandled record") - or goto incomplete; - } - } - close(HANDLE); - ($instr, $graph) = graph_from_bb($bb, $fileorder, $bbg_filename, 0); - - graph_cleanup($graph); - - return ($instr, $graph); - -open_error: - graph_error($bbg_filename, "could not open file"); - return undef; -incomplete: - graph_error($bbg_filename, "reached unexpected end of file"); - return undef; -magic_error: - graph_error($bbg_filename, "found unrecognized bbg file magic"); - return undef; + my ($fileorder, $function, $filename) = @_; + + my $list = $fileorder->{$function}; + foreach my $item (@$list) { + if ($item eq $filename) { + return; + } + } + push(@$list, $filename); + $fileorder->{$function} = $list; } # @@ -4074,9 +3665,9 @@ magic_error: sub read_gcno_word(*;$$) { - my ($handle, $desc, $peek) = @_; + my ($handle, $desc, $peek) = @_; - return graph_read($handle, 4, $desc, $peek); + return graph_read($handle, 4, $desc, $peek); } # @@ -4089,17 +3680,11 @@ sub read_gcno_word(*;$$) sub read_gcno_value(*$;$$) { - my ($handle, $big_endian, $desc, $peek) = @_; - my $word; - my $pos; - - $word = read_gcno_word($handle, $desc, $peek); - return undef if (!defined($word)); - if ($big_endian) { - return unpack("N", $word); - } else { - return unpack("V", $word); - } + my ($handle, $big_endian, $desc, $peek) = @_; + + my $word = read_gcno_word($handle, $desc, $peek); + return undef unless defined($word); + return unpack($big_endian ? 'N' : 'V', $word); } # @@ -4110,24 +3695,22 @@ sub read_gcno_value(*$;$$) sub read_gcno_string(*$) { - my ($handle, $big_endian) = @_; - my $length; - my $string; - - graph_expect("string"); - # Read string length - $length = read_gcno_value($handle, $big_endian, "string length"); - return undef if (!defined($length)); - if ($length == 0) { - return ""; - } - $length *= 4; - # Read string - $string = graph_read($handle, $length, "string and padding"); - return undef if (!defined($string)); - $string =~ s/\0//g; - - return $string; + my ($handle, $big_endian) = @_; + + lcovutil::debug(2, "string"); + # Read string length + my $length = read_gcno_value($handle, $big_endian, "string length"); + return undef if (!defined($length)); + if ($length == 0) { + return ""; + } + $length *= 4; + # Read string + my $string = graph_read($handle, $length, "string and padding"); + return undef if (!defined($string)); + $string =~ s/\0//g; + + return $string; } # @@ -4140,88 +3723,42 @@ sub read_gcno_string(*$) sub read_gcno_lines_record(*$$$$$$) { - my ($handle, $gcno_filename, $bb, $fileorder, $filename, $function, - $big_endian) = @_; - my $string; - my $lineno; - - graph_expect("lines record"); - # Skip basic block index - graph_skip($handle, 4, "basic block index") or return undef; - while (1) { - # Read line number - $lineno = read_gcno_value($handle, $big_endian, "line number"); - return undef if (!defined($lineno)); - if ($lineno == 0) { - # Got a marker for a new filename - graph_expect("filename"); - $string = read_gcno_string($handle, $big_endian); - return undef if (!defined($string)); - # Check for end of record - if ($string eq "") { - return $filename; - } - $filename = $string; - if (!exists($bb->{$function}->{$filename})) { - $bb->{$function}->{$filename} = []; - } - next; - } - # Got an actual line number - if (!defined($filename)) { - warn("WARNING: unassigned line number in ". - "$gcno_filename\n"); - next; - } - # Add to list - push(@{$bb->{$function}->{$filename}}, $lineno); - graph_add_order($fileorder, $function, $filename); - } -} - -# -# determine_gcno_split_crc(handle, big_endian, rec_length, version) -# -# Determine if HANDLE refers to a .gcno file with a split checksum function -# record format. Return non-zero in case of split checksum format, zero -# otherwise, undef in case of read error. -# - -sub determine_gcno_split_crc($$$$) -{ - my ($handle, $big_endian, $rec_length, $version) = @_; - my $strlen; - my $overlong_string; - - return 1 if ($version >= $GCOV_VERSION_4_7_0); - return 1 if (is_compat($COMPAT_MODE_SPLIT_CRC)); - - # Heuristic: - # Decide format based on contents of next word in record: - # - pre-gcc 4.7 - # This is the function name length / 4 which should be - # less than the remaining record length - # - gcc 4.7 - # This is a checksum, likely with high-order bits set, - # resulting in a large number - $strlen = read_gcno_value($handle, $big_endian, undef, 1); - return undef if (!defined($strlen)); - $overlong_string = 1 if ($strlen * 4 >= $rec_length - 12); - - if ($overlong_string) { - if (is_compat_auto($COMPAT_MODE_SPLIT_CRC)) { - info("Auto-detected compatibility mode for split ". - "checksum .gcno file format\n"); - - return 1; - } else { - # Sanity check - warn("Found overlong string in function record: ". - "try '--compat split_crc'\n"); - } - } - - return 0; + my ($handle, $gcno_filename, $bb, $fileorder, $filename, $function, + $big_endian) + = @_; + + lcovutil::debug(2, "lines record"); + # Skip basic block index + graph_skip($handle, 4, "basic block index") or return undef; + while (1) { + # Read line number + my $lineno = read_gcno_value($handle, $big_endian, "line number"); + return undef if (!defined($lineno)); + if ($lineno == 0) { + # Got a marker for a new filename + lcovutil::debug(2, "filename"); + my $string = read_gcno_string($handle, $big_endian); + return undef if (!defined($string)); + # Check for end of record + if ($string eq "") { + return $filename; + } + $filename = $string; + if (!exists($bb->{$function}->{$filename})) { + $bb->{$function}->{$filename} = []; + } + next; + } + # Got an actual line number + if (!defined($filename)) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "unassigned line number in $gcno_filename"); + next; + } + # Add to list + push(@{$bb->{$function}->{$filename}}, $lineno); + graph_add_order($fileorder, $function, $filename); + } } # @@ -4233,51 +3770,40 @@ sub determine_gcno_split_crc($$$$) sub read_gcno_function_record(*$$$$$) { - my ($handle, $bb, $fileorder, $big_endian, $rec_length, $version) = @_; - my $filename; - my $function; - my $lineno; - my $lines; - my $artificial; - - graph_expect("function record"); - # Skip ident and checksum - graph_skip($handle, 8, "function ident and checksum") or return undef; - # Determine if this is a function record with split checksums - if (!defined($gcno_split_crc)) { - $gcno_split_crc = determine_gcno_split_crc($handle, $big_endian, - $rec_length, - $version); - return undef if (!defined($gcno_split_crc)); - } - # Skip cfg checksum word in case of split checksums - graph_skip($handle, 4, "function cfg checksum") if ($gcno_split_crc); - # Read function name - graph_expect("function name"); - $function = read_gcno_string($handle, $big_endian); - return undef if (!defined($function)); - if ($version >= $GCOV_VERSION_8_0_0) { - $artificial = read_gcno_value($handle, $big_endian, - "compiler-generated entity flag"); - return undef if (!defined($artificial)); - } - # Read filename - graph_expect("filename"); - $filename = read_gcno_string($handle, $big_endian); - return undef if (!defined($filename)); - # Read first line number - $lineno = read_gcno_value($handle, $big_endian, "initial line number"); - return undef if (!defined($lineno)); - # Skip column and ending line number - if ($version >= $GCOV_VERSION_8_0_0) { - graph_skip($handle, 4, "column number") or return undef; - graph_skip($handle, 4, "ending line number") or return undef; - } - # Add to list - push(@{$bb->{$function}->{$filename}}, $lineno); - graph_add_order($fileorder, $function, $filename); - - return ($filename, $function, $artificial); + my ($handle, $bb, $fileorder, $big_endian, $rec_length, $version) = @_; + my $artificial; + + lcovutil::debug(2, "function record"); + # Skip ident and checksum + graph_skip($handle, 8, "function ident and checksum") or return undef; + # must be gcc > 4 - so only split checksum exists + graph_skip($handle, 4, "function cfg checksum"); + # Read function name + lcovutil::debug(2, "function name"); + my $function = read_gcno_string($handle, $big_endian); + return undef if (!defined($function)); + if ($version >= $GCOV_VERSION_8_0_0) { + $artificial = read_gcno_value($handle, $big_endian, + "compiler-generated entity flag"); + return undef if (!defined($artificial)); + } + # Read filename + lcovutil::debug(2, "filename"); + my $filename = read_gcno_string($handle, $big_endian); + return undef if (!defined($filename)); + # Read first line number + my $lineno = read_gcno_value($handle, $big_endian, "initial line number"); + return undef if (!defined($lineno)); + # Skip column and ending line number + if ($version >= $GCOV_VERSION_8_0_0) { + graph_skip($handle, 4, "column number") or return undef; + graph_skip($handle, 4, "ending line number") or return undef; + } + # Add to list + push(@{$bb->{$function}->{$filename}}, $lineno); + graph_add_order($fileorder, $function, $filename); + + return ($filename, $function, $artificial); } # @@ -4288,32 +3814,31 @@ sub read_gcno_function_record(*$$$$$) sub map_gcno_version($) { - my ($version) = @_; - my ($a, $b, $c); - my ($major, $minor); - - $a = $version >> 24; - $b = $version >> 16 & 0xff; - $c = $version >> 8 & 0xff; - - if ($a < ord('A')) { - $major = $a - ord('0'); - $minor = ($b - ord('0')) * 10 + $c - ord('0'); - } else { - $major = ($a - ord('A')) * 10 + $b - ord('0'); - $minor = $c - ord('0'); - } - - return $major << 16 | $minor << 8; + my $version = shift; + my ($major, $minor); + + my $a = $version >> 24; + my $b = $version >> 16 & 0xff; + my $c = $version >> 8 & 0xff; + + if ($a < ord('A')) { + $major = $a - ord('0'); + $minor = ($b - ord('0')) * 10 + $c - ord('0'); + } else { + $major = ($a - ord('A')) * 10 + $b - ord('0'); + $minor = $c - ord('0'); + } + + return $major << 16 | $minor << 8; } sub remove_fn_from_hash($$) { - my ($hash, $fns) = @_; + my ($hash, $fns) = @_; - foreach my $fn (@$fns) { - delete($hash->{$fn}); - } + foreach my $fn (@$fns) { + delete($hash->{$fn}); + } } # @@ -4331,137 +3856,127 @@ sub remove_fn_from_hash($$) sub read_gcno($) { - my ($gcno_filename) = @_; - my $file_magic = 0x67636e6f; - my $tag_function = 0x01000000; - my $tag_lines = 0x01450000; - my $big_endian; - my $word; - my $tag; - my $length; - my $filename; - my $function; - my $bb = {}; - my $fileorder = {}; - my $instr; - my $graph; - my $filelength; - my $version; - my $artificial; - my @artificial_fns; - local *HANDLE; - - open(HANDLE, "<", $gcno_filename) or goto open_error; - $filelength = (stat(HANDLE))[7]; - binmode(HANDLE); - # Read magic - $word = read_gcno_word(*HANDLE, "file magic"); - goto incomplete if (!defined($word)); - # Determine file endianness - if (unpack("N", $word) == $file_magic) { - $big_endian = 1; - } elsif (unpack("V", $word) == $file_magic) { - $big_endian = 0; - } else { - goto magic_error; - } - # Read version - $version = read_gcno_value(*HANDLE, $big_endian, "compiler version"); - $version = map_gcno_version($version); - debug(sprintf("found version 0x%08x\n", $version)); - # Skip stamp - graph_skip(*HANDLE, 4, "file timestamp") or goto incomplete; - if ($version >= $GCOV_VERSION_8_0_0) { - graph_skip(*HANDLE, 4, "support unexecuted blocks flag") - or goto incomplete; - } - while (!eof(HANDLE)) { - my $next_pos; - my $curr_pos; - - # Read record tag - $tag = read_gcno_value(*HANDLE, $big_endian, "record tag"); - goto incomplete if (!defined($tag)); - # Read record length - $length = read_gcno_value(*HANDLE, $big_endian, - "record length"); - goto incomplete if (!defined($length)); - # Convert length to bytes - $length *= 4; - # Calculate start of next record - $next_pos = tell(HANDLE); - goto tell_error if ($next_pos == -1); - $next_pos += $length; - # Catch garbage at the end of a gcno file - if ($next_pos > $filelength) { - debug("Overlong record: file_length=$filelength ". - "rec_length=$length\n"); - warn("WARNING: $gcno_filename: Overlong record at end ". - "of file!\n"); - last; - } - # Process record - if ($tag == $tag_function) { - ($filename, $function, $artificial) = - read_gcno_function_record( - *HANDLE, $bb, $fileorder, $big_endian, - $length, $version); - goto incomplete if (!defined($function)); - push(@artificial_fns, $function) if ($artificial); - } elsif ($tag == $tag_lines) { - # Read lines record - $filename = read_gcno_lines_record(*HANDLE, - $gcno_filename, $bb, $fileorder, - $filename, $function, $big_endian); - goto incomplete if (!defined($filename)); - } else { - # Skip record contents - graph_skip(*HANDLE, $length, "unhandled record") - or goto incomplete; - } - # Ensure that we are at the start of the next record - $curr_pos = tell(HANDLE); - goto tell_error if ($curr_pos == -1); - next if ($curr_pos == $next_pos); - goto record_error if ($curr_pos > $next_pos); - graph_skip(*HANDLE, $next_pos - $curr_pos, - "unhandled record content") - or goto incomplete; - } - close(HANDLE); - - # Remove artificial functions from result data - remove_fn_from_hash($bb, \@artificial_fns); - remove_fn_from_hash($fileorder, \@artificial_fns); - - ($instr, $graph) = graph_from_bb($bb, $fileorder, $gcno_filename, 1); - graph_cleanup($graph); - - return ($instr, $graph); - -open_error: - graph_error($gcno_filename, "could not open file"); - return undef; -incomplete: - graph_error($gcno_filename, "reached unexpected end of file"); - return undef; -magic_error: - graph_error($gcno_filename, "found unrecognized gcno file magic"); - return undef; -tell_error: - graph_error($gcno_filename, "could not determine file position"); - return undef; -record_error: - graph_error($gcno_filename, "found unrecognized record format"); - return undef; -} - -sub debug($) -{ - my ($msg) = @_; - - return if (!$debug); - print(STDERR "DEBUG: $msg"); + my ($gcno_filename) = @_; + my $file_magic = 0x67636e6f; + my $tag_function = 0x01000000; + my $tag_lines = 0x01450000; + my $big_endian; + my $word; + my $tag; + my $length; + my $filename; + my $function; + my $bb = {}; + my $fileorder = {}; + my $instr; + my $graph; + my $filelength; + my $version; + my $artificial; + my @artificial_fns; + local *HANDLE; + + open(HANDLE, "<", $gcno_filename) or goto open_error; + $filelength = (stat(HANDLE))[7]; + binmode(HANDLE); + # Read magic + $word = read_gcno_word(*HANDLE, "file magic"); + goto incomplete if (!defined($word)); + # Determine file endianness + if (unpack("N", $word) == $file_magic) { + $big_endian = 1; + } elsif (unpack("V", $word) == $file_magic) { + $big_endian = 0; + } else { + goto magic_error; + } + # Read version + $version = read_gcno_value(*HANDLE, $big_endian, "compiler version"); + $version = map_gcno_version($version); + debug(sprintf("found version 0x%08x\n", $version)); + # Skip stamp + graph_skip(*HANDLE, 4, "file timestamp") or goto incomplete; + if ($version >= $GCOV_VERSION_8_0_0) { + graph_skip(*HANDLE, 4, "support unexecuted blocks flag") or + goto incomplete; + } + while (!eof(HANDLE)) { + my $next_pos; + my $curr_pos; + + # Read record tag + $tag = read_gcno_value(*HANDLE, $big_endian, "record tag"); + goto incomplete if (!defined($tag)); + # Read record length + $length = read_gcno_value(*HANDLE, $big_endian, "record length"); + goto incomplete if (!defined($length)); + # Convert length to bytes + $length *= 4; + # Calculate start of next record + $next_pos = tell(HANDLE); + goto tell_error if ($next_pos == -1); + $next_pos += $length; + # Catch garbage at the end of a gcno file + if ($next_pos > $filelength) { + debug("Overlong record: file_length=$filelength " . + "rec_length=$length\n"); + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "$gcno_filename: Overlong record at end of file"); + last; + } + # Process record + if ($tag == $tag_function) { + ($filename, $function, $artificial) = + read_gcno_function_record(*HANDLE, $bb, $fileorder, $big_endian, + $length, $version); + goto incomplete if (!defined($function)); + push(@artificial_fns, $function) if ($artificial); + } elsif ($tag == $tag_lines) { + # Read lines record + $filename = + read_gcno_lines_record(*HANDLE, $gcno_filename, $bb, $fileorder, + $filename, $function, $big_endian); + goto incomplete if (!defined($filename)); + } else { + # Skip record contents + graph_skip(*HANDLE, $length, "unhandled record") or + goto incomplete; + } + # Ensure that we are at the start of the next record + $curr_pos = tell(HANDLE); + goto tell_error if ($curr_pos == -1); + next if ($curr_pos == $next_pos); + goto record_error if ($curr_pos > $next_pos); + graph_skip(*HANDLE, $next_pos - $curr_pos, "unhandled record content") + or + goto incomplete; + } + close(HANDLE) or die("unable to close $gcno_filename: $!\n"); + + # Remove artificial functions from result data + remove_fn_from_hash($bb, \@artificial_fns); + remove_fn_from_hash($fileorder, \@artificial_fns); + + ($instr, $graph) = graph_from_bb($bb, $fileorder, $gcno_filename, 1); + graph_cleanup($graph); + + return ($instr, $graph); + + open_error: + graph_error($gcno_filename, "could not open file: $!"); + return undef; + incomplete: + graph_error($gcno_filename, "reached unexpected end of file"); + return undef; + magic_error: + graph_error($gcno_filename, "found unrecognized gcno file magic"); + return undef; + tell_error: + graph_error($gcno_filename, "could not determine file position"); + return undef; + record_error: + graph_error($gcno_filename, "found unrecognized record format"); + return undef; } # @@ -4472,95 +3987,44 @@ sub debug($) sub get_gcov_capabilities() { - my $help = `$gcov_tool --help`; - my %capabilities; - my %short_option_translations = ( - 'a' => 'all-blocks', - 'b' => 'branch-probabilities', - 'c' => 'branch-counts', - 'f' => 'function-summaries', - 'h' => 'help', - 'i' => 'intermediate-format', - 'l' => 'long-file-names', - 'n' => 'no-output', - 'o' => 'object-directory', - 'p' => 'preserve-paths', - 'u' => 'unconditional-branches', - 'v' => 'version', - 'x' => 'hash-filenames', - ); - - foreach (split(/\n/, $help)) { - my $capability; - if (/--(\S+)/) { - $capability = $1; - } else { - # If the line provides a short option, translate it. - next if (!/^\s*-(\S)\s/); - $capability = $short_option_translations{$1}; - next if not defined($capability); - } - next if ($capability eq 'help'); - next if ($capability eq 'version'); - next if ($capability eq 'object-directory'); - - $capabilities{$capability} = 1; - debug("gcov has capability '$capability'\n"); - } - - return \%capabilities; -} - -# -# parse_ignore_errors(@ignore_errors) -# -# Parse user input about which errors to ignore. -# - -sub parse_ignore_errors(@) -{ - my (@ignore_errors) = @_; - my @items; - my $item; - - return if (!@ignore_errors); - - foreach $item (@ignore_errors) { - $item =~ s/\s//g; - if ($item =~ /,/) { - # Split and add comma-separated parameters - push(@items, split(/,/, $item)); - } else { - # Add single parameter - push(@items, $item); - } - } - foreach $item (@items) { - my $item_id = $ERROR_ID{lc($item)}; - - if (!defined($item_id)) { - die("ERROR: unknown argument for --ignore-errors: ". - "$item\n"); - } - $ignore[$item_id] = 1; - } -} - -# -# is_external(filename) -# -# Determine if a file is located outside of the specified data directories. -# - -sub is_external($) -{ - my ($filename) = @_; - my $dir; - - foreach $dir (@internal_dirs) { - return 0 if ($filename =~ /^\Q$dir\/\E/); - } - return 1; + my $help = join(' ', @gcov_tool) . ' --help'; + $help = `$help`; + die("return code from '\"$gcov_tool[0]\" --help': $!") + if ($?); + my %capabilities; + my %short_option_translations = ('a' => 'all-blocks', + 'b' => 'branch-probabilities', + 'c' => 'branch-counts', + 'f' => 'function-summaries', + 'h' => 'help', + 'i' => 'intermediate-format', + 'l' => 'long-file-names', + 'n' => 'no-output', + 'o' => 'object-directory', + 'p' => 'preserve-paths', + 'u' => 'unconditional-branches', + 'v' => 'version', + 'x' => 'hash-filenames',); + + foreach (split(/\n/, $help)) { + my $capability; + if (/--(\S+)/) { + $capability = $1; + } else { + # If the line provides a short option, translate it. + next if (!/^\s*-(\S)\s/); + $capability = $short_option_translations{$1}; + next if not defined($capability); + } + next if ($capability eq 'help'); + next if ($capability eq 'version'); + next if ($capability eq 'object-directory'); + + $capabilities{$capability} = 1; + debug("gcov has capability '$capability'\n"); + } + + return \%capabilities; } # @@ -4571,12 +4035,12 @@ sub is_external($) sub compat_name($) { - my ($mode) = @_; - my $name = $COMPAT_MODE_TO_NAME{$mode}; + my $mode = shift; + my $name = $COMPAT_MODE_TO_NAME{$mode}; - return $name if (defined($name)); + return $name if (defined($name)); - return ""; + return ""; } # @@ -4587,99 +4051,82 @@ sub compat_name($) sub parse_compat_modes($) { - my ($opt) = @_; - my @opt_list; - my %specified; - - # Initialize with defaults - %compat_value = %COMPAT_MODE_DEFAULTS; - - # Add old style specifications - if (defined($opt_compat_libtool)) { - $compat_value{$COMPAT_MODE_LIBTOOL} = - $opt_compat_libtool ? $COMPAT_VALUE_ON - : $COMPAT_VALUE_OFF; - } - - # Parse settings - if (defined($opt)) { - @opt_list = split(/\s*,\s*/, $opt); - } - foreach my $directive (@opt_list) { - my ($mode, $value); - - # Either - # mode=off|on|auto or - # mode (implies on) - if ($directive !~ /^(\w+)=(\w+)$/ && - $directive !~ /^(\w+)$/) { - die("ERROR: Unknown compatibility mode specification: ". - "$directive!\n"); - } - # Determine mode - $mode = $COMPAT_NAME_TO_MODE{lc($1)}; - if (!defined($mode)) { - die("ERROR: Unknown compatibility mode '$1'!\n"); - } - $specified{$mode} = 1; - # Determine value - if (defined($2)) { - $value = $COMPAT_NAME_TO_VALUE{lc($2)}; - if (!defined($value)) { - die("ERROR: Unknown compatibility mode ". - "value '$2'!\n"); - } - } else { - $value = $COMPAT_VALUE_ON; - } - $compat_value{$mode} = $value; - } - # Perform auto-detection - foreach my $mode (sort(keys(%compat_value))) { - my $value = $compat_value{$mode}; - my $is_autodetect = ""; - my $name = compat_name($mode); - - if ($value == $COMPAT_VALUE_AUTO) { - my $autodetect = $COMPAT_MODE_AUTO{$mode}; - - if (!defined($autodetect)) { - die("ERROR: No auto-detection for ". - "mode '$name' available!\n"); - } - - if (ref($autodetect) eq "CODE") { - $value = &$autodetect(); - $compat_value{$mode} = $value; - $is_autodetect = " (auto-detected)"; - } - } - - if ($specified{$mode}) { - if ($value == $COMPAT_VALUE_ON) { - info("Enabling compatibility mode ". - "'$name'$is_autodetect\n"); - } elsif ($value == $COMPAT_VALUE_OFF) { - info("Disabling compatibility mode ". - "'$name'$is_autodetect\n"); - } else { - info("Using delayed auto-detection for ". - "compatibility mode ". - "'$name'\n"); - } - } - } -} - -sub compat_hammer_autodetect() -{ - if ($gcov_version_string =~ /suse/i && $gcov_version == 0x30303 || - $gcov_version_string =~ /mandrake/i && $gcov_version == 0x30302) - { - info("Auto-detected compatibility mode for GCC 3.3 (hammer)\n"); - return $COMPAT_VALUE_ON; - } - return $COMPAT_VALUE_OFF; + my $opt = shift; + my @opt_list; + my %specified; + + # Initialize with defaults + %compat_value = %COMPAT_MODE_DEFAULTS; + + # Add old style specifications + if (defined($lcovutil::opt_compat_libtool)) { + $compat_value{$COMPAT_MODE_LIBTOOL} = + $lcovutil::opt_compat_libtool ? $COMPAT_VALUE_ON : + $COMPAT_VALUE_OFF; + } + + # Parse settings + if (defined($opt)) { + @opt_list = split(/\s*,\s*/, $opt); + } + foreach my $directive (@opt_list) { + my ($mode, $value); + + # Either + # mode=off|on|auto or + # mode (implies on) + if ($directive !~ /^(\w+)=(\w+)$/ && + $directive !~ /^(\w+)$/) { + die("Unknown compatibility mode specification: " . "$directive!\n"); + } + # Determine mode + $mode = $COMPAT_NAME_TO_MODE{lc($1)}; + if (!defined($mode)) { + die("Unknown compatibility mode '$1'!\n"); + } + $specified{$mode} = 1; + # Determine value + if (defined($2)) { + $value = $COMPAT_NAME_TO_VALUE{lc($2)}; + if (!defined($value)) { + die("Unknown compatibility mode value '$2'!\n"); + } + } else { + $value = $COMPAT_VALUE_ON; + } + $compat_value{$mode} = $value; + } + # Perform auto-detection + foreach my $mode (sort(keys(%compat_value))) { + my $value = $compat_value{$mode}; + my $is_autodetect = ""; + my $name = compat_name($mode); + + if ($value == $COMPAT_VALUE_AUTO) { + my $autodetect = $COMPAT_MODE_AUTO{$mode}; + + if (!defined($autodetect)) { + die("No auto-detection for " . "mode '$name' available!\n"); + } + + if (ref($autodetect) eq "CODE") { + $value = &$autodetect(); + $compat_value{$mode} = $value; + $is_autodetect = " (auto-detected)"; + } + } + + if ($specified{$mode}) { + if ($value == $COMPAT_VALUE_ON) { + info("Enabling compatibility mode '$name'$is_autodetect\n"); + } elsif ($value == $COMPAT_VALUE_OFF) { + info("Disabling compatibility mode '$name'$is_autodetect\n"); + } else { + info("Using delayed auto-detection for " . + "compatibility mode '$name'\n"); + } + } + } } # @@ -4690,22 +4137,7 @@ sub compat_hammer_autodetect() sub is_compat($) { - my ($mode) = @_; - - return 1 if ($compat_value{$mode} == $COMPAT_VALUE_ON); - return 0; -} - -# -# is_compat_auto(mode) -# -# Return non-zero if compatibility mode MODE is set to auto-detect. -# - -sub is_compat_auto($) -{ - my ($mode) = @_; + my $mode = shift; - return 1 if ($compat_value{$mode} == $COMPAT_VALUE_AUTO); - return 0; + return $compat_value{$mode} == $COMPAT_VALUE_ON; } diff --git a/bin/genpng b/bin/genpng index bf8e821d..6c96ef3c 100755 --- a/bin/genpng +++ b/bin/genpng @@ -10,7 +10,7 @@ # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. +# General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see @@ -31,101 +31,93 @@ use strict; use warnings; -use File::Basename; +use File::Basename; use Getopt::Long; use Cwd qw/abs_path/; +use FindBin; +use lib "$FindBin::RealBin/../lib"; +use lcovutil qw (%tlaColor %tlaTextColor + $tool_name $tool_dir $lcov_version $lcov_url + die_handler warn_handler); # Constants -our $tool_dir = abs_path(dirname($0)); -our $lcov_version = 'LCOV version '.`$tool_dir/get_version.sh --full`; -our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php"; -our $tool_name = basename($0); - +# (now imported from lcovutil.pm) # Prototypes -sub gen_png($$$@); +sub gen_png($$$$$@); sub check_and_load_module($); sub genpng_print_usage(*); -sub genpng_process_file($$$$); -sub genpng_warn_handler($); -sub genpng_die_handler($); - +sub genpng_process_file($$$$$); # # Code entry point # # Check whether required module GD.pm is installed -if (check_and_load_module("GD")) -{ - # Note: cannot use die() to print this message because inserting this - # code into another script via do() would not fail as required! - print(STDERR < \$tab_size, - "width=i" => \$width, - "output-filename=s" => \$out_filename, - "help" => \$help, - "version" => \$version)) - { - print(STDERR "Use $tool_name --help to get usage ". - "information\n"); - exit(1); - } - - $filename = $ARGV[0]; - - # Check for help flag - if ($help) - { - genpng_print_usage(*STDOUT); - exit(0); - } - - # Check for version flag - if ($version) - { - print("$tool_name: $lcov_version\n"); - exit(0); - } - - # Check options - if (!$filename) - { - die("No filename specified\n"); - } - - # Check for output filename - if (!$out_filename) - { - $out_filename = "$filename.png"; - } - - genpng_process_file($filename, $out_filename, $width, $tab_size); - exit(0); +if (!caller) { + my $filename; + my $tab_size = 4; + my $width = 80; + my $dark = 0; + my $out_filename; + my $help; + my $version; + + $SIG{__WARN__} = \&warn_handler; + $SIG{__DIE__} = \&die_handler; + + # Parse command line options + if (!GetOptions("tab-size=i" => \$tab_size, + "width=i" => \$width, + "output-filename=s" => \$out_filename, + "dark-mode" => \$dark, + "help" => \$help, + "version" => \$version + )) { + print(STDERR "Use $tool_name --help to get usage information\n"); + exit(1); + } + + $filename = $ARGV[0]; + + # Check for help flag + if ($help) { + genpng_print_usage(*STDOUT); + exit(0); + } + + # Check for version flag + if ($version) { + print("$tool_name: $lcov_version\n"); + exit(0); + } + + # Check options + if (!$filename) { + die("No filename specified\n"); + } + + # Check for output filename + if (!$out_filename) { + $out_filename = "$filename.png"; + } + + genpng_process_file($filename, $out_filename, $width, $tab_size, $dark); + exit(0); } - # # genpng_print_usage(handle) # @@ -134,26 +126,27 @@ if (!caller) sub genpng_print_usage(*) { - local *HANDLE = $_[0]; + local *HANDLE = $_[0]; - print(HANDLE <) - { - if (/^\t\t(.*)$/) - { - # Uninstrumented line - push(@source, ":$1"); - } - elsif (/^ ###### (.*)$/) - { - # Line with zero execution count - push(@source, "0:$1"); - } - elsif (/^( *)(\d*) (.*)$/) - { - # Line with positive execution count - push(@source, "$2:$3"); - } - } - } - else - { - # Plain text file - while () { push(@source, ":$_"); } - } - close(HANDLE); - - gen_png($out_filename, $width, $tab_size, @source); + my $filename = $_[0]; + my $out_filename = $_[1]; + my $width = $_[2]; + my $tab_size = $_[3]; + my $dark = $_[4]; + local *HANDLE; + my @source; + + open(HANDLE, "<", $filename) or + die("cannot open $filename!\n"); + + # Check for .gcov filename extension + if ($filename =~ /^(.*).gcov$/) { + # Assume gcov text format + while () { + if (/^\t\t(.*)$/) { + # Uninstrumented line + push(@source, ":$1"); + } elsif (/^ ###### (.*)$/) { + # Line with zero execution count + push(@source, "0:$1"); + } elsif (/^( *)(\d*) (.*)$/) { + # Line with positive execution count + push(@source, "$2:$3"); + } + } + } else { + # Plain text file + while () { push(@source, ":$_"); } + } + close(HANDLE) or die("unable to close $filename: $!\n"); + + my $show_tla = 1; + gen_png($out_filename, $show_tla, $dark, $width, $tab_size, @source); } - # -# gen_png(filename, width, tab_size, source) +# gen_png(filename, show_tla, dark, width, tab_size, source) # # Write an overview PNG file to FILENAME. Source code is defined by SOURCE # which is a list of lines : per source code line. @@ -232,158 +216,183 @@ sub genpng_process_file($$$$) # Die on error. # -sub gen_png($$$@) +sub gen_png($$$$$@) { - my $filename = shift(@_); # Filename for PNG file - my $overview_width = shift(@_); # Imagewidth for image - my $tab_size = shift(@_); # Replacement string for tab signs - my @source = @_; # Source code as passed via argument 2 - my $height; # Height as define by source size - my $overview; # Source code overview image data - my $col_plain_back; # Color for overview background - my $col_plain_text; # Color for uninstrumented text - my $col_cov_back; # Color for background of covered lines - my $col_cov_text; # Color for text of covered lines - my $col_nocov_back; # Color for background of lines which - # were not covered (count == 0) - my $col_nocov_text; # Color for test of lines which were not - # covered (count == 0) - my $col_hi_back; # Color for background of highlighted lines - my $col_hi_text; # Color for text of highlighted lines - my $line; # Current line during iteration - my $row = 0; # Current row number during iteration - my $column; # Current column number during iteration - my $color_text; # Current text color during iteration - my $color_back; # Current background color during iteration - my $last_count; # Count of last processed line - my $count; # Count of current line - my $source; # Source code of current line - my $replacement; # Replacement string for tabulator chars - local *PNG_HANDLE; # Handle for output PNG file - - # Handle empty source files - if (!@source) { - @source = ( "" ); - } - $height = scalar(@source); - # Create image - $overview = new GD::Image($overview_width, $height) - or die("ERROR: cannot allocate overview image!\n"); - - # Define colors - $col_plain_back = $overview->colorAllocate(0xff, 0xff, 0xff); - $col_plain_text = $overview->colorAllocate(0xaa, 0xaa, 0xaa); - $col_cov_back = $overview->colorAllocate(0xaa, 0xa7, 0xef); - $col_cov_text = $overview->colorAllocate(0x5d, 0x5d, 0xea); - $col_nocov_back = $overview->colorAllocate(0xff, 0x00, 0x00); - $col_nocov_text = $overview->colorAllocate(0xaa, 0x00, 0x00); - $col_hi_back = $overview->colorAllocate(0x00, 0xff, 0x00); - $col_hi_text = $overview->colorAllocate(0x00, 0xaa, 0x00); - - # Visualize each line - foreach $line (@source) - { - # Replace tabs with spaces to keep consistent with source - # code view - while ($line =~ /^([^\t]*)(\t)/) - { - $replacement = " "x($tab_size - ((length($1) - 1) % - $tab_size)); - $line =~ s/^([^\t]*)(\t)/$1$replacement/; - } - - # Skip lines which do not follow the : - # specification, otherwise $1 = count, $2 = source code - if (!($line =~ /(\*?)(\d*):(.*)$/)) { next; } - $count = $2; - $source = $3; - - # Decide which color pair to use - - # If this line was not instrumented but the one before was, - # take the color of that line to widen color areas in - # resulting image - if (($count eq "") && defined($last_count) && - ($last_count ne "")) - { - $count = $last_count; - } - - if ($count eq "") - { - # Line was not instrumented - $color_text = $col_plain_text; - $color_back = $col_plain_back; - } - elsif ($count == 0) - { - # Line was instrumented but not executed - $color_text = $col_nocov_text; - $color_back = $col_nocov_back; - } - elsif ($1 eq "*") - { - # Line was highlighted - $color_text = $col_hi_text; - $color_back = $col_hi_back; - } - else - { - # Line was instrumented and executed - $color_text = $col_cov_text; - $color_back = $col_cov_back; - } - - # Write one pixel for each source character - $column = 0; - foreach (split("", $source)) - { - # Check for width - if ($column >= $overview_width) { last; } - - if ($_ eq " ") - { - # Space - $overview->setPixel($column++, $row, - $color_back); - } - else - { - # Text - $overview->setPixel($column++, $row, - $color_text); - } - } - - # Fill rest of line - while ($column < $overview_width) - { - $overview->setPixel($column++, $row, $color_back); - } - - $last_count = $2; - - $row++; - } - - # Write PNG file - open (PNG_HANDLE, ">", $filename) - or die("ERROR: cannot write png file $filename!\n"); - binmode(*PNG_HANDLE); - print(PNG_HANDLE $overview->png()); - close(PNG_HANDLE); -} - -sub genpng_warn_handler($) -{ - my ($msg) = @_; - - warn("$tool_name: $msg"); -} - -sub genpng_die_handler($) -{ - my ($msg) = @_; - - die("$tool_name: $msg"); + my $filename = shift(@_); # Filename for PNG file + my $show_tla = shift(@_); # differential categories + my $dark_mode = shift(@_); # dark-on-light, if set + my $overview_width = shift(@_); # Imagewidth for image + my $tab_size = shift(@_); # Replacement string for tab signs + my @source = @_; # Source code as passed via argument 2 + my $height; # Height as define by source size + my $overview; # Source code overview image data + my $col_plain_back; # Color for overview background + my $col_plain_text; # Color for uninstrumented text + my $col_cov_back; # Color for background of covered lines + my $col_cov_text; # Color for text of covered lines + my $col_nocov_back; # Color for background of lines which + # were not covered (count == 0) + my $col_nocov_text; # Color for test of lines which were not + # covered (count == 0) + my $col_hi_back; # Color for background of highlighted lines + my $col_hi_text; # Color for text of highlighted lines + + my %col_tla_back; # Color for background of TLA lines + my %col_tla_text; # Color for text of TLA lines + + my $line; # Current line during iteration + my $row = 0; # Current row number during iteration + my $column; # Current column number during iteration + my $color_text; # Current text color during iteration + my $color_back; # Current background color during iteration + my $last_tag; # Tag of last processed line + my $tag; # Tag of current line + my $last_count; # Count of last processed line + my $count; # Count of current line + my $source; # Source code of current line + my $replacement; # Replacement string for tabulator chars + local *PNG_HANDLE; # Handle for output PNG file + + # Handle empty source files + if (!@source) { + @source = (""); + } + $height = scalar(@source); + # Create image + $overview = new GD::Image($overview_width, $height) or + die("cannot allocate overview image!\n"); + + # Define colors + # overview->colorAllocate(red, green, blue) + if ($dark_mode) { + # just reverse foreground and background + # there is probably a better color scheme than this. + $col_plain_text = + $overview->colorAllocate(0xaa, 0xaa, 0xaa); # light grey + $col_plain_back = $overview->colorAllocate(0x00, 0x00, 0x00); + $col_cov_text = $overview->colorAllocate(0xaa, 0xa7, 0xef); + $col_cov_back = $overview->colorAllocate(0x5d, 0x5d, 0xea); + $col_nocov_text = $overview->colorAllocate(0xff, 0x00, 0x00); + $col_nocov_back = $overview->colorAllocate(0xaa, 0x00, 0x00); + $col_hi_text = $overview->colorAllocate(0x00, 0xff, 0x00); + $col_hi_back = $overview->colorAllocate(0x00, 0xaa, 0x00); + } else { + $col_plain_back = $overview->colorAllocate(0xff, 0xff, 0xff); + $col_plain_text = $overview->colorAllocate(0xaa, 0xaa, 0xaa); + $col_cov_back = $overview->colorAllocate(0xaa, 0xa7, 0xef); + $col_cov_text = $overview->colorAllocate(0x5d, 0x5d, 0xea); + $col_nocov_back = $overview->colorAllocate(0xff, 0x00, 0x00); + $col_nocov_text = $overview->colorAllocate(0xaa, 0x00, 0x00); + $col_hi_back = $overview->colorAllocate(0x00, 0xff, 0x00); + $col_hi_text = $overview->colorAllocate(0x00, 0xaa, 0x00); + } + + foreach my $tla (keys(%lcovutil::tlaColor)) { + next if 'D' eq substr($tla, 0, 1); # skip deleted TLAs..they don't apper + + if ($show_tla) { + my $text = $tlaTextColor{$tla}; + my $back = $tlaColor{$tla}; + $text =~ s/^#/0x/; + $back =~ s/^#/0x/; + $text = hex($text); + $back = hex($back); + + $col_tla_back{$tla} = + $overview->colorAllocate($back >> 16, + ($back >> 8) & 0xFF, + $back & 0xFF); + $col_tla_text{$tla} = + $overview->colorAllocate($text >> 16, + ($text >> 8) & 0xFF, + $text & 0xFF); + } else { + # no differential categories...use vanilla colors + if (grep(/^$tla$/, ("GBC", "CBC", "GIC", "GNC"))) { + $col_tla_back{$tla} = $col_cov_back; + $col_tla_text{$tla} = $col_cov_text; + } elsif (grep(/^$tla$/, ("ECB", "EUB"))) { + $col_tla_back{$tla} = $col_plain_back; + $col_tla_text{$tla} = $col_plain_text; + } else { + $col_tla_back{$tla} = $col_nocov_back; + $col_tla_text{$tla} = $col_nocov_text; + } + } + } + # Visualize each line + foreach $line (@source) { + # Replace tabs with spaces to keep consistent with source + # code view + while ($line =~ /^([^\t]*)(\t)/) { + $replacement = " " x ($tab_size - ((length($1) - 1) % $tab_size)); + $line =~ s/^([^\t]*)(\t)/$1$replacement/; + } + # Skip lines which do not follow the : + # specification, otherwise $1 = count, $2 = source code + if (!($line =~ /([-+<>=]?)(\d*):(.*)$/)) { next; } + $tag = $1; + $count = $2; + $source = $3; + + # Decide which color pair to use + my $tla = undef; + + # If this line was not instrumented but the one before was, + # take the color of that line to widen color areas in + # resulting image + if (($count eq "") && + defined($last_count) && + ($last_count ne "")) { + $tag = $last_tag; + $count = $last_count; + } + + if ($tag eq "" && $count eq "") { + # Line was not instrumented + $color_text = $col_plain_text; + $color_back = $col_plain_back; + } else { + die("unexpected PNG tag '$tag'") + unless exists($lcovutil::pngMap{$tag}); + # index '1' if not covered (count is zero) + $tla = $lcovutil::pngMap{$tag}[$count == 0]; + } + + if (defined($tla)) { + $color_text = $col_tla_text{$tla}; + $color_back = $col_tla_back{$tla}; + } + # Write one pixel for each source character + $column = 0; + foreach (split("", $source)) { + # Check for width + if ($column >= $overview_width) { last; } + + if ($_ eq " ") { + # Space + $overview->setPixel($column++, $row, $color_back); + } else { + # Text + $overview->setPixel($column++, $row, $color_text); + } + } + + # Fill rest of line + while ($column < $overview_width) { + $overview->setPixel($column++, $row, $color_back); + } + + $last_tag = $1; + $last_count = $2; + + $row++; + } + + # Write PNG file + open(PNG_HANDLE, ">", $filename) or + die("cannot write png file $filename!\n"); + binmode(*PNG_HANDLE); + print(PNG_HANDLE $overview->png()); + close(PNG_HANDLE) or die("unable to close $filename: $!\n"); } diff --git a/bin/get_changes.sh b/bin/get_changes.sh index ec373b4f..9edd9d76 100755 --- a/bin/get_changes.sh +++ b/bin/get_changes.sh @@ -10,4 +10,4 @@ cd $TOOLDIR if ! git --no-pager log --no-merges --decorate=short --color=never 2>/dev/null ; then cat "$TOOLDIR/../CHANGES" 2>/dev/null -fi +fi diff --git a/bin/get_version.sh b/bin/get_version.sh index ac5a3631..e515231b 100755 --- a/bin/get_version.sh +++ b/bin/get_version.sh @@ -4,30 +4,30 @@ # # Print lcov version or release information as provided by Git, .version # or a fallback. - -TOOLDIR=$(cd $(dirname $0) >/dev/null ; pwd) -GITVER=$(cd $TOOLDIR ; git describe --tags 2>/dev/null) +DIRPATH=$(dirname "$0") +TOOLDIR=$(cd "$DIRPATH" >/dev/null ; pwd) +GITVER=$(cd "$TOOLDIR" ; git describe --tags 2>/dev/null) if [ -z "$GITVER" ] ; then - # Get version information from file - if [ -e "$TOOLDIR/../.version" ] ; then - source "$TOOLDIR/../.version" - fi + # Get version information from file + if [ -e "$TOOLDIR/../.version" ] ; then + source "$TOOLDIR/../.version" + fi else - # Get version information from git - FULL=${GITVER:1} - VERSION=${GITVER%%-*} - VERSION=${VERSION:1} - if [ "${GITVER#*-}" != "$GITVER" ] ; then - RELEASE=${GITVER#*-} - RELEASE=${RELEASE/-/.} - fi + # Get version information from git + FULL=${GITVER#v} + VERSION=${GITVER%%-*} + VERSION=${VERSION#v} + if [ "${GITVER#*-}" != "$GITVER" ] ; then + RELEASE=${GITVER#*-} + RELEASE=${RELEASE/-/.} + fi fi # Fallback -[ -z "$VERSION" ] && VERSION="1.0" -[ -z "$RELEASE" ] && RELEASE="1" -[ -z "$FULL" ] && FULL="$VERSION" +[ -z "$VERSION" ] && VERSION="2.4" +[ -z "$RELEASE" ] && RELEASE="beta" +[ -z "$FULL" ] && FULL="$VERSION-$RELEASE" [ "$1" == "--version" ] && echo -n "$VERSION" [ "$1" == "--release" ] && echo -n "$RELEASE" diff --git a/bin/install.sh b/bin/install.sh deleted file mode 100755 index 2cdef45b..00000000 --- a/bin/install.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env bash -# -# install.sh [--uninstall] sourcefile targetfile [install options] -# - - -# Check for uninstall option -if test "x$1" == "x--uninstall" ; then - UNINSTALL=true - SOURCE=$2 - TARGET=$3 - shift 3 -else - UNINSTALL=false - SOURCE=$1 - TARGET=$2 - shift 2 -fi - -# Check usage -if test -z "$SOURCE" || test -z "$TARGET" ; then - echo Usage: install.sh [--uninstall] source target [install options] >&2 - exit 1 -fi - - -# -# do_install(SOURCE_FILE, TARGET_FILE) -# - -do_install() -{ - local SOURCE=$1 - local TARGET=$2 - local PARAMS=$3 - - install -d $(dirname $TARGET) - install -p $PARAMS $SOURCE $TARGET - if [ -n "$LCOV_PERL_PATH" ] ; then - # Replace Perl interpreter specification - sed -e "1 s%^#\!.*perl.*$%#\!$LCOV_PERL_PATH%" -i $TARGET - fi -} - - -# -# do_uninstall(SOURCE_FILE, TARGET_FILE) -# - -do_uninstall() -{ - local SOURCE=$1 - local TARGET=$2 - - # Does target exist? - if test -r $TARGET ; then - # Is target of the same version as this package? - if diff -I '^our \$lcov_version' -I '^\.TH ' -I '^#!' $SOURCE $TARGET >/dev/null; then - rm -f $TARGET - else - echo WARNING: Skipping uninstall for $TARGET - versions differ! >&2 - fi - else - echo WARNING: Skipping uninstall for $TARGET - not installed! >&2 - fi -} - - -# Call sub routine -if $UNINSTALL ; then - do_uninstall $SOURCE $TARGET -else - do_install $SOURCE $TARGET "$*" -fi - -exit 0 diff --git a/bin/lcov b/bin/lcov index f76f9d45..4e1fef23 100755 --- a/bin/lcov +++ b/bin/lcov @@ -10,7 +10,7 @@ # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. +# General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see @@ -31,7 +31,7 @@ # 2002-10-16 / Peter Oberparleiter: implemented --add-tracefile option # 2002-10-17 / Peter Oberparleiter: implemented --extract option # 2002-11-04 / Peter Oberparleiter: implemented --list option -# 2003-03-07 / Paul Larson: Changed to make it work with the latest gcov +# 2003-03-07 / Paul Larson: Changed to make it work with the latest gcov # kernel patch. This will break it with older gcov-kernel # patches unless you change the value of $gcovmod in this script # 2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error @@ -57,37 +57,58 @@ # 2004-03-30 / Peter Oberparleiter: added --path option # 2004-08-09 / Peter Oberparleiter: added configuration file support # 2008-08-13 / Peter Oberparleiter: added function coverage support +# 2020-09-15 / Henry Cox: refactor to use common utilities. # use strict; use warnings; -use File::Basename; -use File::Path; +use File::Basename qw(basename dirname); +use File::Path qw(mkpath); use File::Find; -use File::Temp qw /tempdir/; use File::Spec::Functions qw /abs2rel canonpath catdir catfile catpath - file_name_is_absolute rootdir splitdir splitpath/; -use Getopt::Long; + file_name_is_absolute rootdir splitdir splitpath/; use Cwd qw /abs_path getcwd/; - - -# Global constants -our $tool_dir = abs_path(dirname($0)); -our $lcov_version = 'LCOV version '.`$tool_dir/get_version.sh --full`; -our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php"; -our $tool_name = basename($0); +use POSIX qw (floor); +use FindBin; +use Storable; +use Time::HiRes; # for profiling + +use lib "$FindBin::RealBin/../lib"; +use lcovutil qw ($tool_name $tool_dir $lcov_version $lcov_url + define_errors parse_ignore_errors ignorable_error + info set_info_callback init_verbose_flag $verbose + $br_coverage $func_coverage + debug $debug $devnull $dirseparator + die_handler warn_handler abort_handler + create_temp_dir temp_cleanup + summarize_cov_filters + $FILTER_BRANCH_NO_COND $FILTER_LINE_CLOSE_BRACE @cov_filter + + @exclude_file_patterns @include_file_patterns %excluded_files + warn_file_patterns summarize_messages + %lcovErrors $ERROR_GCOV $ERROR_SOURCE $ERROR_MISMATCH + $ERROR_BRANCH $ERROR_EMPTY $ERROR_FORMAT $ERROR_VERSION + $ERROR_UNUSED $ERROR_PACKAGE $ERROR_CORRUPT + $ERROR_PARALLEL report_parallel_error + system_no_output + rate get_overall_line + + parseOptions + strip_directories transform_pattern + @extractVersionScript $verify_checksum + @comments + + $maxParallelism $maxMemory + ); # Directory containing gcov kernel files our $gcov_dir; -# Where to create temporary directories -our $tmp_dir; - # Internal constants -our $GKV_PROC = 0; # gcov-kernel data in /proc via external patch -our $GKV_SYS = 1; # gcov-kernel data in /sys via vanilla 2.6.31+ -our @GKV_NAME = ( "external", "upstream" ); -our $pkg_gkv_file = ".gcov_kernel_version"; +our $GKV_PROC = 0; # gcov-kernel data in /proc via external patch +our $GKV_SYS = 1; # gcov-kernel data in /sys via vanilla 2.6.31+ +our @GKV_NAME = ("external", "upstream"); +our $pkg_gkv_file = ".gcov_kernel_version"; our $pkg_build_file = ".build_directory"; # Branch data combination types @@ -104,387 +125,325 @@ sub kernel_capture(); sub kernel_capture_initial(); sub package_capture(); sub add_traces(); -sub read_info_file($); -sub get_info_entry($); -sub set_info_entry($$$$$$$$$;$$$$$$); -sub add_counts($$); -sub merge_checksums($$$); -sub combine_info_entries($$$); -sub combine_info_files($$); -sub write_info_file(*$); -sub extract(); -sub remove(); +sub merge_traces($$); +sub remove_file_patterns($); sub list(); sub get_common_filename($$); sub read_diff($); sub diff(); sub system_no_output($@); -sub read_config($); -sub apply_config($); -sub info(@); +sub my_info(@); +set_info_callback(\&my_info); sub create_temp_dir(); -sub transform_pattern($); -sub warn_handler($); -sub die_handler($); -sub abort_handler($); -sub temp_cleanup(); sub setup_gkv(); -sub get_overall_line($$$$); -sub print_overall_rate($$$$$$$$$); sub lcov_geninfo(@); sub create_package($$$;$); -sub get_func_found_and_hit($); sub summary(); -sub rate($$;$$$); # Global variables & initialization -our @directory; # Specifies where to get coverage data from -our @kernel_directory; # If set, captures only from specified kernel subdirs -our @add_tracefile; # If set, reads in and combines all files in list -our $list; # If set, list contents of tracefile -our $extract; # If set, extracts parts of tracefile -our $remove; # If set, removes parts of tracefile -our $diff; # If set, modifies tracefile according to diff -our $reset; # If set, reset all coverage data to zero -our $capture; # If set, capture data -our $output_filename; # Name for file to write coverage data to -our $test_name = ""; # Test case name -our $quiet = ""; # If set, suppress information messages -our $help; # Help option flag -our $version; # Version option flag -our $convert_filenames; # If set, convert filenames when applying diff -our $strip; # If set, strip leading directories when applying diff -our $temp_dir_name; # Name of temporary directory -our $cwd = `pwd`; # Current working directory -our $data_stdout; # If set, indicates that data is written to stdout -our $follow; # If set, indicates that find shall follow links -our $diff_path = ""; # Path removed from tracefile when applying diff -our $base_directory; # Base directory (cwd of gcc during compilation) -our $checksum; # If set, calculate a checksum for each line -our $no_checksum; # If set, don't calculate a checksum for each line -our $compat_libtool; # If set, indicates that libtool mode is to be enabled -our $no_compat_libtool; # If set, indicates that libtool mode is to be disabled -our $gcov_tool; -our @opt_ignore_errors; + +our @directory; # Specifies where to get coverage data from +our @kernel_directory; # If set, captures only from specified kernel subdirs +our @add_tracefile; # If set, reads in and combines all files in list +our @intersect; # glob patterns for intersect RHS +our @difference; # glob patterns for difference RHS +our $list; # If set, list contents of tracefile +our $diff; # If set, modifies tracefile according to diff +our $reset; # If set, reset all coverage data to zero +our $prune_testcases; # If set, try to filter out useless tests that do not + # contribute new coverage or new coverpoints +our $output_filename; # Name for file to write coverage data to +our $test_name = ""; # Test case name +our $cwd = Cwd::getcwd(); # Current working directory +our $data_stdout; # If set, indicates that data is written to stdout +our $follow; # If set, indicates that find shall follow links +our $base_directory; # Base directory (cwd of gcc during compilation) +our $compat_libtool; # If set, indicates that libtool mode is to be enabled +our $no_compat_libtool; # If set, indicates that libtool mode is to be disabled + +our @gcov_tool; +our @large_files; # handled sequentially in geninfo our $initial; -our @include_patterns; # List of source file patterns to include -our @exclude_patterns; # List of source file patterns to exclude +our $captureAll; our $no_recursion = 0; our $to_package; our $from_package; our $maxdepth; our $no_markers; -our $config; # Configuration file contents chomp($cwd); -our @temp_dirs; -our $gcov_gkv; # gcov kernel support version found on machine +our $gcov_gkv; # gcov kernel support version found on machine our $opt_derive_func_data; -our $opt_debug; our $opt_list_full_path; our $opt_no_list_full_path; -our $opt_list_width = 80; +our $opt_list_width = 80; our $opt_list_truncate_max = 20; our $opt_external; -our $opt_no_external; -our $opt_config_file; -our %opt_rc; our @opt_summary; our $opt_compat; -our $ln_overall_found; -our $ln_overall_hit; -our $fn_overall_found; -our $fn_overall_hit; -our $br_overall_found; -our $br_overall_hit; -our $func_coverage = 1; -our $br_coverage = 0; - # # Code entry point # -$SIG{__WARN__} = \&warn_handler; -$SIG{__DIE__} = \&die_handler; -$SIG{'INT'} = \&abort_handler; -$SIG{'QUIT'} = \&abort_handler; - -# Check command line for a configuration file name -Getopt::Long::Configure("pass_through", "no_auto_abbrev"); -GetOptions("config-file=s" => \$opt_config_file, - "rc=s%" => \%opt_rc); -Getopt::Long::Configure("default"); +$SIG{__WARN__} = \&lcovutil::warn_handler; +$SIG{__DIE__} = \&lcovutil::die_handler; +$SIG{'INT'} = \&lcovutil::abort_handler; +$SIG{'QUIT'} = \&lcovutil::abort_handler; + +lcovutil::save_cmd_line(\@ARGV, "$FindBin::RealBin"); + +our @cmdArgs = @ARGV; + +my %lcov_rc_params = ("lcov_gcov_dir" => \$gcov_dir, + "lcov_list_full_path" => \$opt_list_full_path, + "lcov_list_width" => \$opt_list_width, + "lcov_list_truncate_max" => \$opt_list_truncate_max); + +my %lcov_options = ("directory|d|di=s" => \@directory, + "add-tracefile|a=s" => \@add_tracefile, + "list|l=s" => \$list, + "kernel-directory|k=s" => \@kernel_directory, + "extract|e=s" => \$lcovutil::lcov_extract, + "remove|r=s" => \$lcovutil::lcov_remove, + "diff=s" => \$diff, + "capture|c" => \$lcovutil::lcov_capture, + "output-file|o=s" => \$output_filename, + "test-name|t=s" => \$test_name, + "zerocounters|z" => \$reset, + "follow|f" => \$follow, + "base-directory|b=s" => \$base_directory, + "compat-libtool" => \$compat_libtool, + "no-compat-libtool" => \$no_compat_libtool, + "gcov-tool=s" => \@gcov_tool, + 'large-file=s' => \@large_files, + + "initial|i" => \$initial, + "all" => \$captureAll, + "no-recursion" => \$no_recursion, + "to-package=s" => \$to_package, + "from-package=s" => \$from_package, + "no-markers" => \$no_markers, + "derive-func-data" => \$opt_derive_func_data, + "list-full-path" => \$opt_list_full_path, + "no-list-full-path" => \$opt_no_list_full_path, + "external" => \$opt_external, + "no-external" => \$lcovutil::opt_no_external, + "summary=s" => \@opt_summary, + "compat=s" => \$opt_compat, + "prune-tests" => \$prune_testcases, + "map-functions" => \$AggregateTraces::function_mapping, + + 'intersect=s' => \@intersect, + 'subtract=s' => \@difference,); + +# geninfo args might get passed to lcov for --capture mode - so we need to not croak on them +my %mergedRcOpts = (%lcov_rc_params, %lcovutil::geninfo_rc_opts); +# Parse command line options +if (!lcovutil::parseOptions(\%mergedRcOpts, \%lcov_options, \$output_filename)) { - # Remove spaces around rc options - my %new_opt_rc; - - while (my ($key, $value) = each(%opt_rc)) { - $key =~ s/^\s+|\s+$//g; - $value =~ s/^\s+|\s+$//g; - - $new_opt_rc{$key} = $value; - } - %opt_rc = %new_opt_rc; + print(STDERR "Use $tool_name --help to get usage information\n"); + exit(1); } -# Read configuration file if available -if (defined($opt_config_file)) { - $config = read_config($opt_config_file); -} elsif (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc")) -{ - $config = read_config($ENV{"HOME"}."/.lcovrc"); -} -elsif (-r "/etc/lcovrc") -{ - $config = read_config("/etc/lcovrc"); -} elsif (-r "/usr/local/etc/lcovrc") -{ - $config = read_config("/usr/local/etc/lcovrc"); +if (defined($no_compat_libtool)) { + $compat_libtool = ($no_compat_libtool ? 0 : 1); + $no_compat_libtool = undef; } -if ($config || %opt_rc) -{ - # Copy configuration file and --rc values to variables - apply_config({ - "lcov_gcov_dir" => \$gcov_dir, - "lcov_tmp_dir" => \$tmp_dir, - "lcov_list_full_path" => \$opt_list_full_path, - "lcov_list_width" => \$opt_list_width, - "lcov_list_truncate_max"=> \$opt_list_truncate_max, - "lcov_branch_coverage" => \$br_coverage, - "lcov_function_coverage"=> \$func_coverage, - }); +if (defined($opt_no_list_full_path)) { + $opt_list_full_path = ($opt_no_list_full_path ? 0 : 1); + $opt_no_list_full_path = undef; } -# Parse command line options -if (!GetOptions("directory|d|di=s" => \@directory, - "add-tracefile|a=s" => \@add_tracefile, - "list|l=s" => \$list, - "kernel-directory|k=s" => \@kernel_directory, - "extract|e=s" => \$extract, - "remove|r=s" => \$remove, - "diff=s" => \$diff, - "convert-filenames" => \$convert_filenames, - "strip=i" => \$strip, - "capture|c" => \$capture, - "output-file|o=s" => \$output_filename, - "test-name|t=s" => \$test_name, - "zerocounters|z" => \$reset, - "quiet|q" => \$quiet, - "help|h|?" => \$help, - "version|v" => \$version, - "follow|f" => \$follow, - "path=s" => \$diff_path, - "base-directory|b=s" => \$base_directory, - "checksum" => \$checksum, - "no-checksum" => \$no_checksum, - "compat-libtool" => \$compat_libtool, - "no-compat-libtool" => \$no_compat_libtool, - "gcov-tool=s" => \$gcov_tool, - "ignore-errors=s" => \@opt_ignore_errors, - "initial|i" => \$initial, - "include=s" => \@include_patterns, - "exclude=s" => \@exclude_patterns, - "no-recursion" => \$no_recursion, - "to-package=s" => \$to_package, - "from-package=s" => \$from_package, - "no-markers" => \$no_markers, - "derive-func-data" => \$opt_derive_func_data, - "debug" => \$opt_debug, - "list-full-path" => \$opt_list_full_path, - "no-list-full-path" => \$opt_no_list_full_path, - "external" => \$opt_external, - "no-external" => \$opt_no_external, - "summary=s" => \@opt_summary, - "compat=s" => \$opt_compat, - "config-file=s" => \$opt_config_file, - "rc=s%" => \%opt_rc, - )) -{ - print(STDERR "Use $tool_name --help to get usage information\n"); - exit(1); +if (defined($base_directory)) { + push(@ReadCurrentSource::source_directories, $base_directory); + push(@lcovutil::internal_dirs, $base_directory); } -else -{ - # Merge options - if (defined($no_checksum)) - { - $checksum = ($no_checksum ? 0 : 1); - $no_checksum = undef; - } - - if (defined($no_compat_libtool)) - { - $compat_libtool = ($no_compat_libtool ? 0 : 1); - $no_compat_libtool = undef; - } - - if (defined($opt_no_list_full_path)) - { - $opt_list_full_path = ($opt_no_list_full_path ? 0 : 1); - $opt_no_list_full_path = undef; - } - - if (defined($opt_no_external)) { - $opt_external = 0; - $opt_no_external = undef; - } +if (defined($opt_external)) { + $lcovutil::opt_no_external = 0; + $opt_external = undef; } -# Check for help option -if ($help) -{ - print_usage(*STDOUT); - exit(0); +my $begin = Time::HiRes::gettimeofday(); + +if ($initial && !$lcovutil::lcov_capture) { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "'--initial' is ignored except in '--capture' mode."); + $initial = undef; } -# Check for version option -if ($version) -{ - print("$tool_name: $lcov_version\n"); - exit(0); +if ($captureAll && $initial) { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "'--all' ignored when '--initial' is used."); + $captureAll = undef; } # Check list width option if ($opt_list_width <= 40) { - die("ERROR: lcov_list_width parameter out of range (needs to be ". - "larger than 40)\n"); + die("lcov_list_width parameter out of range (needs to be " . + "larger than 40)\n"); } -# Normalize --path text -$diff_path =~ s/\/$//; - -if ($follow) -{ - $follow = "-follow"; -} -else -{ - $follow = ""; -} - -if ($no_recursion) -{ - $maxdepth = "-maxdepth 1"; -} -else -{ - $maxdepth = ""; -} +$follow = $follow ? '-follow' : ''; +$maxdepth = $no_recursion ? '-maxdepth 1' : ''; # Check for valid options check_options(); -# Only --extract, --remove and --diff allow unnamed parameters -if (@ARGV && !($extract || $remove || $diff || @opt_summary)) -{ - die("Extra parameter found: '".join(" ", @ARGV)."'\n". - "Use $tool_name --help to get usage information\n"); +# Only --extract, --remove and --diff, --intersect, --subtract allow unnamed parameters +if (@ARGV && + !( $lcovutil::lcov_extract || + $lcovutil::lcov_remove || + $diff || + @intersect || + @difference || + @opt_summary) +) { + die("Extra parameter found: '" . + join(" ", @ARGV) . + "'\n" . "Use $tool_name --help to get usage information\n"); +} + +if (defined($lcovutil::opt_no_external) && + !(defined($lcovutil::lcov_capture) && $lcovutil::lcov_capture != 0)) { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "'--no-external' is ignored except in 'lcov --capture|-c'\n"); + $lcovutil::opt_no_external = 0; } # Check for output filename $data_stdout = !($output_filename && ($output_filename ne "-")); -if ($capture) -{ - if ($data_stdout) - { - # Option that tells geninfo to write to stdout - $output_filename = "-"; - } +if ($lcovutil::lcov_capture && $data_stdout) { + # Option that tells geninfo to write to stdout + $output_filename = "-"; } # Determine kernel directory for gcov data -if (!$from_package && !@directory && ($capture || $reset)) { - ($gcov_gkv, $gcov_dir) = setup_gkv(); +if (!$from_package && !@directory && ($lcovutil::lcov_capture || $reset)) { + ($gcov_gkv, $gcov_dir) = setup_gkv(); } -# Check for requested functionality -if ($reset) -{ - $data_stdout = 0; - # Differentiate between user space and kernel reset - if (@directory) - { - userspace_reset(); - } - else - { - kernel_reset(); - } -} -elsif ($capture) -{ - # Capture source can be user space, kernel or package - if ($from_package) { - package_capture(); - } elsif (@directory) { - userspace_capture(); - } else { - if ($initial) { - if (defined($to_package)) { - die("ERROR: --initial cannot be used together ". - "with --to-package\n"); - } - kernel_capture_initial(); - } else { - kernel_capture(); - } - } -} -elsif (@add_tracefile) -{ - ($ln_overall_found, $ln_overall_hit, - $fn_overall_found, $fn_overall_hit, - $br_overall_found, $br_overall_hit) = add_traces(); -} -elsif ($remove) -{ - ($ln_overall_found, $ln_overall_hit, - $fn_overall_found, $fn_overall_hit, - $br_overall_found, $br_overall_hit) = remove(); -} -elsif ($extract) -{ - ($ln_overall_found, $ln_overall_hit, - $fn_overall_found, $fn_overall_hit, - $br_overall_found, $br_overall_hit) = extract(); -} -elsif ($list) -{ - $data_stdout = 0; - list(); -} -elsif ($diff) -{ - if (scalar(@ARGV) != 1) - { - die("ERROR: option --diff requires one additional argument!\n". - "Use $tool_name --help to get usage information\n"); - } - ($ln_overall_found, $ln_overall_hit, - $fn_overall_found, $fn_overall_hit, - $br_overall_found, $br_overall_hit) = diff(); -} -elsif (@opt_summary) -{ - $data_stdout = 0; - ($ln_overall_found, $ln_overall_hit, - $fn_overall_found, $fn_overall_hit, - $br_overall_found, $br_overall_hit) = summary(); -} +our $exit_code = 0; -temp_cleanup(); +my $trace; -if (defined($ln_overall_found)) { - print_overall_rate(1, $ln_overall_found, $ln_overall_hit, - 1, $fn_overall_found, $fn_overall_hit, - 1, $br_overall_found, $br_overall_hit); -} else { - info("Done.\n") if (!$list && !$capture); +eval { + # Check for requested functionality + if ($reset) { + $data_stdout = 0; + # Differentiate between user space and kernel reset + if (@directory) { + userspace_reset(); + } else { + kernel_reset(); + } + } elsif ($lcovutil::lcov_capture) { + # Capture source can be user space, kernel or package + if ($from_package) { + package_capture(); + } elsif (@directory) { + userspace_capture(); + } else { + if ($initial) { + die("--initial cannot be used together with --to-package\n") + if (defined($to_package)); + kernel_capture_initial(); + } else { + kernel_capture(); + } + } + } elsif (@add_tracefile) { + if ($AggregateTraces::function_mapping) { + $AggregateTraces::function_mapping = {}; + add_traces(); + + my $file = InOutFile->out($output_filename); + my $hdl = $file->hdl(); + while (my ($key, $data) = each(%$AggregateTraces::function_mapping)) + { + print($hdl $data->[0] . ": " . $key . "\n"); + foreach my $f (@{$data->[1]}) { + print($hdl " $f\n"); + } + } + } elsif (defined($prune_testcases)) { + my ($pruned, $merged) = add_traces(); + + info("Pruned result: retained " . + scalar(@$pruned) . " of " . + scalar(@$merged) . " files\n"); + my $file = InOutFile->out($output_filename); + my $hdl = $file->hdl(); + print($hdl join("\n", @$pruned) . "\n"); + } else { + $trace = add_traces(); + } + } elsif ($lcovutil::lcov_remove) { + # remove files matching patterns + $trace = remove_file_patterns($lcovutil::lcov_remove); + } elsif ($lcovutil::lcov_extract) { + # kep only the files matching patterns + $trace = remove_file_patterns($lcovutil::lcov_extract); + } elsif ($list) { + $data_stdout = 0; + list(); + } elsif ($diff) { + die("Deprecated command \"lcov --diff ...\" is no longer supported and had been removed.\nPlease see the \"differential coverage\" section in the genhtml manual for a more flexible alternative,\nor use an older lcov release if you need the feature." + ); + } elsif (@opt_summary) { + $data_stdout = 0; + $trace = summary(); + } elsif (@intersect) { + $trace = merge_traces(\@intersect, TraceInfo::INTERSECT); + } elsif (@difference) { + $trace = merge_traces(\@difference, TraceInfo::DIFFERENCE); + } +}; +if ($@) { + $exit_code = 1; + print(STDERR $@); } -exit(0); + +temp_cleanup(); +chdir($cwd); + +if (0 == $exit_code) { + if (defined($trace)) { + # the numbers do not reflect coverpoints in 'erased' functions. + # the issue is that we filter them out in the write operation - but + # we don't bother to read it back + $trace->print_summary(); + $trace->checkCoverageCriteria(); + $exit_code = 1 if $CoverageCriteria::coverageCriteriaStatus; + CoverageCriteria::summarize(); + } else { + info("Done.\n") if (!$list && !$lcovutil::lcov_capture); + } + if (!defined($lcovutil::lcov_capture)) { + lcovutil::warn_file_patterns() + ; # warn about unused include/exclude directives + ReadCurrentSource::warn_sourcedir_patterns(); + summarize_cov_filters(); + summarize_messages(); + } +} +my $end = Time::HiRes::gettimeofday(); +$lcovutil::profileData{total} = $end - $begin; + +unless ($lcovutil::lcov_capture) { + # if we executed 'geninfo' - then we saved the profile data from that process + lcovutil::cleanup_callbacks(); + lcovutil::save_profile($output_filename ? $output_filename : "lcov"); +} + +# exit with non-zero status if --keep-going and some errors detected +$exit_code = 1 + if (0 == $exit_code && + lcovutil::saw_error()); + +exit($exit_code); # # print_usage(handle) @@ -494,45 +453,66 @@ exit(0); sub print_usage(*) { - local *HANDLE = $_[0]; + local *HANDLE = $_[0]; - print(HANDLE < 1) - { - die("ERROR: only one of -z, -c, -a, -e, -r, -l, ". - "--diff or --summary allowed!\n". - "Use $tool_name --help to get usage information\n"); - } + my $i = 0; + + # Count occurrence of mutually exclusive options + $reset && $i++; + $lcovutil::lcov_capture && $i++; + @add_tracefile && $i++; + $lcovutil::lcov_extract && $i++; + $lcovutil::lcov_remove && $i++; + $list && $i++; + $diff && $i++; + @opt_summary && $i++; + @intersect && $i++; + @difference && $i++; + + if ($i == 0 || + $i > 1) { + die("invalid command line:\n $0 " . + join(' ', @main::cmdArgs) . + "\nNeed " . ($i > 1 ? 'only ' : '') . + "one of options -z, -c, -a, -e, -r, -l, --diff, --intersect, --subtract, or --summary\n" + . "Use $tool_name --help to get usage information\n"); + } + + if ($prune_testcases && 0 == scalar(@add_tracefile)) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "--prune-tests has effect only when -a/--add-tracefile is specified" + ); + } } - # # userspace_reset() # @@ -597,22 +590,41 @@ sub check_options() sub userspace_reset() { - my $current_dir; - my @file_list; - - foreach $current_dir (@directory) - { - info("Deleting all .da files in $current_dir". - ($no_recursion?"\n":" and subdirectories\n")); - @file_list = `find "$current_dir" $maxdepth $follow -name \\*\\.da -type f -o -name \\*\\.gcda -type f 2>/dev/null`; - chomp(@file_list); - foreach (@file_list) - { - unlink($_) or die("ERROR: cannot remove file $_!\n"); - } - } -} + my @file_list; + foreach my $pattern (@directory) { + my @dirs; + if (-d $pattern) { + push(@dirs, $pattern); + } else { + $pattern =~ s/([^\\]) /$1\\ /g # explicitly escape spaces + unless $^O =~ /Win/; + @dirs = glob($pattern); + } + my $count = 0; + foreach my $current_dir (@dirs) { + if (!-d $current_dir) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "$current_dir is not a directory"); + next; + } + ++$count; + info("Deleting all .da files in $current_dir" . + ($no_recursion ? "\n" : " and subdirectories\n")); + @file_list = + `find "$current_dir" $maxdepth $follow -name \\*\\.da -type f -o -name \\*\\.gcda -type f 2>$lcovutil::devnull`; + die("Error return code from 'find \"$current_dir\" ...': $!") + if ($?); + chomp(@file_list); + foreach (@file_list) { + unlink($_) or die("cannot remove file $_: $!\n"); + } + } + ignorable_error($ERROR_EMPTY, + "$pattern does not match any directory entries") + if 0 == $count; + } +} # # userspace_capture() @@ -625,25 +637,24 @@ sub userspace_reset() sub userspace_capture() { - my $dir; - my $build; - - if (!defined($to_package)) { - lcov_geninfo(@directory); - return; - } - if (scalar(@directory) != 1) { - die("ERROR: -d may be specified only once with --to-package\n"); - } - $dir = $directory[0]; - if (defined($base_directory)) { - $build = $base_directory; - } else { - $build = $dir; - } - create_package($to_package, $dir, $build); -} + my $dir; + my $build; + if (!defined($to_package)) { + lcov_geninfo(@directory); + return; + } + if (scalar(@directory) != 1) { + die("-d may be specified only once with --to-package\n"); + } + $dir = $directory[0]; + if (defined($base_directory)) { + $build = $base_directory; + } else { + $build = $dir; + } + create_package($to_package, $dir, $build); +} # # kernel_reset() @@ -655,27 +666,26 @@ sub userspace_capture() sub kernel_reset() { - local *HANDLE; - my $reset_file; - - info("Resetting kernel execution counters\n"); - if (-e "$gcov_dir/vmlinux") { - $reset_file = "$gcov_dir/vmlinux"; - } elsif (-e "$gcov_dir/reset") { - $reset_file = "$gcov_dir/reset"; - } else { - die("ERROR: no reset control found in $gcov_dir\n"); - } - open(HANDLE, ">", $reset_file) or - die("ERROR: cannot write to $reset_file!\n"); - print(HANDLE "0"); - close(HANDLE); -} + local *HANDLE; + my $reset_file; + info("Resetting kernel execution counters\n"); + if (-e "$gcov_dir/vmlinux") { + $reset_file = "$gcov_dir/vmlinux"; + } elsif (-e "$gcov_dir/reset") { + $reset_file = "$gcov_dir/reset"; + } else { + die("no reset control found in $gcov_dir\n"); + } + open(HANDLE, ">", $reset_file) or + die("cannot write to $reset_file: $!\n"); + print(HANDLE "0"); + close(HANDLE); +} # # lcov_copy_single(from, to) -# +# # Copy single regular file FROM to TO without checking its size. This is # required to work with special files generated by the kernel # seq_file-interface. @@ -683,26 +693,26 @@ sub kernel_reset() # sub lcov_copy_single($$) { - my ($from, $to) = @_; - my $content; - local $/; - local *HANDLE; - - open(HANDLE, "<", $from) or die("ERROR: cannot read $from: $!\n"); - $content = ; - close(HANDLE); - open(HANDLE, ">", $to) or die("ERROR: cannot write $from: $!\n"); - if (defined($content)) { - print(HANDLE $content); - } - close(HANDLE); + my ($from, $to) = @_; + my $content; + local $/; + local *HANDLE; + + open(HANDLE, "<", $from) or die("cannot read $from: $!\n"); + $content = ; + close(HANDLE); + open(HANDLE, ">", $to) or die("cannot write $from: $!\n"); + if (defined($content)) { + print(HANDLE $content); + } + close(HANDLE); } # # lcov_find(dir, function, data[, extension, ...)]) # # Search DIR for files and directories whose name matches PATTERN and run -# FUNCTION for each match. If not pattern is specified, match all names. +# FUNCTION for each match. If no pattern is specified, match all names. # # FUNCTION has the following prototype: # function(dir, relative_name, data) @@ -714,30 +724,31 @@ sub lcov_copy_single($$) # sub lcov_find($$$;@) { - my ($dir, $fn, $data, @pattern) = @_; - my $result; - my $_fn = sub { - my $filename = $File::Find::name; - - if (defined($result)) { - return; - } - $filename = abs2rel($filename, $dir); - foreach (@pattern) { - if ($filename =~ /$_/) { - goto ok; - } - } - return; - ok: - $result = &$fn($dir, $filename, $data); - }; - if (scalar(@pattern) == 0) { - @pattern = ".*"; - } - find( { wanted => $_fn, no_chdir => 1 }, $dir); - - return $result; + my ($dir, $fn, $data, @pattern) = @_; + my $result; + my $_fn = sub { + my $filename = $File::Find::name; + + if (defined($result)) { + return; + } + $filename = abs2rel($filename, $dir); + foreach (@pattern) { + if (($lcovutil::case_insensitive && $filename =~ /$_/i) || + (!$lcovutil::case_insensitive && $filename =~ /$_/)) { + goto ok; + } + } + return; + ok: + $result = &$fn($dir, $filename, $data); + }; + if (scalar(@pattern) == 0) { + @pattern = ".*"; + } + find({wanted => $_fn, no_chdir => 1}, $dir); + + return $result; } # @@ -748,35 +759,35 @@ sub lcov_find($$$;@) sub lcov_copy_fn($$$) { - my ($from, $rel, $to) = @_; - my $absfrom = canonpath(catfile($from, $rel)); - my $absto = canonpath(catfile($to, $rel)); - - if (-d) { - if (! -d $absto) { - mkpath($absto) or - die("ERROR: cannot create directory $absto\n"); - chmod(0700, $absto); - } - } elsif (-l) { - # Copy symbolic link - my $link = readlink($absfrom); - - if (!defined($link)) { - die("ERROR: cannot read link $absfrom: $!\n"); - } - symlink($link, $absto) or - die("ERROR: cannot create link $absto: $!\n"); - } else { - lcov_copy_single($absfrom, $absto); - chmod(0600, $absto); - } - return undef; + my ($from, $rel, $to) = @_; + my $absfrom = canonpath(catfile($from, $rel)); + my $absto = canonpath(catfile($to, $rel)); + + if (-d) { + if (!-d $absto) { + mkpath($absto) or + die("cannot create directory $absto\n"); + chmod(0700, $absto); + } + } elsif (-l) { + # Copy symbolic link + my $link = readlink($absfrom); + + if (!defined($link)) { + die("cannot read link $absfrom: $!\n"); + } + symlink($link, $absto) or + die("cannot create link $absto: $!\n"); + } else { + lcov_copy_single($absfrom, $absto); + chmod(0600, $absto); + } + return undef; } # # lcov_copy(from, to, subdirs) -# +# # Copy all specified SUBDIRS and files from directory FROM to directory TO. For # regular files, copy file contents without checking its size. This is required # to work with seq_file-generated files. @@ -784,13 +795,13 @@ sub lcov_copy_fn($$$) sub lcov_copy($$;@) { - my ($from, $to, @subdirs) = @_; - my @pattern; + my ($from, $to, @subdirs) = @_; + my @pattern; - foreach (@subdirs) { - push(@pattern, "^$_"); - } - lcov_find($from, \&lcov_copy_fn, $to, @pattern); + foreach (@subdirs) { + push(@pattern, "^$_"); + } + lcov_find($from, \&lcov_copy_fn, $to, @pattern); } # @@ -802,104 +813,149 @@ sub lcov_copy($$;@) sub lcov_geninfo(@) { - my (@dir) = @_; - my @param; - - # Capture data - info("Capturing coverage data from ".join(" ", @dir)."\n"); - @param = ("$tool_dir/geninfo", @dir); - if ($output_filename) - { - @param = (@param, "--output-filename", $output_filename); - } - if ($test_name) - { - @param = (@param, "--test-name", $test_name); - } - if ($follow) - { - @param = (@param, "--follow"); - } - if ($quiet) - { - @param = (@param, "--quiet"); - } - if (defined($checksum)) - { - if ($checksum) - { - @param = (@param, "--checksum"); - } - else - { - @param = (@param, "--no-checksum"); - } - } - if ($base_directory) - { - @param = (@param, "--base-directory", $base_directory); - } - if ($no_compat_libtool) - { - @param = (@param, "--no-compat-libtool"); - } - elsif ($compat_libtool) - { - @param = (@param, "--compat-libtool"); - } - if ($gcov_tool) - { - @param = (@param, "--gcov-tool", $gcov_tool); - } - foreach (@opt_ignore_errors) { - @param = (@param, "--ignore-errors", $_); - } - if ($no_recursion) { - @param = (@param, "--no-recursion"); - } - if ($initial) - { - @param = (@param, "--initial"); - } - if ($no_markers) - { - @param = (@param, "--no-markers"); - } - if ($opt_derive_func_data) - { - @param = (@param, "--derive-func-data"); - } - if ($opt_debug) - { - @param = (@param, "--debug"); - } - if (defined($opt_external) && $opt_external) - { - @param = (@param, "--external"); - } - if (defined($opt_external) && !$opt_external) - { - @param = (@param, "--no-external"); - } - if (defined($opt_compat)) { - @param = (@param, "--compat", $opt_compat); - } - if (%opt_rc) { - foreach my $key (keys(%opt_rc)) { - @param = (@param, "--rc", "$key=".$opt_rc{$key}); - } - } - if (defined($opt_config_file)) { - @param = (@param, "--config-file", $opt_config_file); - } - foreach (@include_patterns) { - @param = (@param, "--include", $_); - } - foreach (@exclude_patterns) { - @param = (@param, "--exclude", $_); - } - - system(@param) and exit($? >> 8); + my (@dir) = @_; + my @param; + + # Capture data + info("Capturing coverage data from " . join(" ", @dir) . "\n"); + @param = (File::Spec->catfile($tool_dir, 'geninfo'), @dir); + # make things less confusing for user, by using the name they actually invoked + push(@param, '--toolname', $lcovutil::tool_name); + if ($output_filename) { + push(@param, "--output-filename", $output_filename); + } + if ($test_name) { + push(@param, "--test-name", $test_name); + } + if ($follow) { + push(@param, "--follow"); + } + push(@param, '--msg-log', $lcovutil::message_filename) + if $lcovutil::message_filename; + if ($lcovutil::verbose != 0) { + if ($lcovutil::verbose < 0) { + for (my $i = $lcovutil::verbose; $i < 0; ++$i) { + push(@param, '--quiet'); + } + } else { + for (my $i = 0; $i < $lcovutil::verbose; ++$i) { + push(@param, '--verbose'); + } + } + } + if (defined($verify_checksum)) { + push(@param, $verify_checksum ? '--checksum' : '--no-checksum'); + } + foreach my $s (@ReadCurrentSource::source_directories) { + # a bit of a hack: we pushed the --base-directory argument onto + # the source list - and we need to make sure that we only pass + # it to geninfo once. + push(@param, "--source-directory", $s) + unless (defined($base_directory) && $s eq $base_directory); + } + if ($no_compat_libtool) { + push(@param, "--no-compat-libtool"); + } elsif ($compat_libtool) { + push(@param, "--compat-libtool"); + } + if (defined($lcovutil::stop_on_error) && $lcovutil::stop_on_error == 0) { + push(@param, "--keep-going"); + } + if (defined($lcovutil::preserve_intermediates) && + $lcovutil::preserve_intermediates) { + push(@param, "--preserve"); + } + push(@param, "--base-directory", $base_directory) + if $base_directory; + foreach ( + split($lcovutil::split_char, + join($lcovutil::split_char, @lcovutil::opt_ignore_errors)) + ) { + # pass only the 'ignore' options that geninfo understands + push(@param, "--ignore-errors", $_) + if exists($lcovutil::lcovErrors{$_}); + } + if ($no_recursion) { + push(@param, "--no-recursion"); + } + if ($initial) { + push(@param, "--initial"); + } + if ($captureAll) { + push(@param, "--all"); + } + if ($no_markers) { + push(@param, "--no-markers"); + } + if ($opt_derive_func_data) { + push(@param, "--derive-func-data"); + } + for (my $i = 0; $i < $lcovutil::debug; ++$i) { + push(@param, "--debug"); + } + if (defined($opt_external) && $opt_external) { + push(@param, "--external"); + } elsif (defined($lcovutil::opt_no_external) && $lcovutil::opt_no_external) + { + push(@param, "--no-external"); + } + if (defined($opt_compat)) { + push(@param, "--compat", $opt_compat); + } + + if (defined($lcovutil::profile)) { + push(@param, '--profile'); + push(@param, $lcovutil::profile) + if ('' ne $lcovutil::profile); + } + if (defined($lcovutil::maxParallelism)) { + push(@param, '--parallel', $lcovutil::maxParallelism); + } + # memory has not been multiplied by Mb yet - so just pass the integer value + push(@param, '--memory', $lcovutil::maxMemory) + if defined($lcovutil::maxMemory); + push(@param, "--branch-coverage") if $lcovutil::br_coverage; + push(@param, "--mcdc") if $lcovutil::mcdc_coverage; + push(@param, '--fail-under-lines', $lcovutil::fail_under_lines) + if defined($lcovutil::fail_under_lines); + push(@param, '--tempdir', $lcovutil::tempdirname) + if (defined($lcovutil::tempdirname)); + foreach my $listOpt (['--comment', \@lcovutil::comments], + ['--config-file', \@lcovutil::opt_config_files], + ['--rc', \@lcovutil::opt_rc], + ['--build-directory', \@lcovutil::build_directory], + ['--gcov-tool', \@gcov_tool], + ['--demangle-cpp', \@lcovutil::cpp_demangle], + ['--include', \@lcovutil::include_file_patterns], + ['--exclude', \@lcovutil::exclude_file_patterns], + ['--context-script', \@lcovutil::contextCallback], + ['--criteria-script', + \@CoverageCriteria::coverageCriteriaScript + ], + ['--version-script', \@lcovutil::extractVersionScript], + ['--resolve-script', \@lcovutil::resolveCallback], + ['--substitute', \@lcovutil::file_subst_patterns], + ['--omit-lines', \@lcovutil::omit_line_patterns], + ['--erase-functions', + \@lcovutil::exclude_function_patterns + ], + ['--filter', \@lcovutil::opt_filter], + ['--large-file', \@large_files], + ) { + my ($opt, $l) = @$listOpt; + foreach my $v (@$l) { + push(@param, $opt, $v); + } + } + + # windows + # Kind of hacky to fork another script here. + # Probably better/cleaner to move the 'geninfo' functionality into + # a perl module - then use it here and in the geninfo script. + # Maybe someday. + unshift(@param, $lcovutil::interp) if defined($lcovutil::interp); + info("geninfo cmd: '" . join(' ', @param) . "'\n"); + system(@param) and exit($? >> 8); } # @@ -910,16 +966,16 @@ sub lcov_geninfo(@) sub read_file($) { - my ($filename) = @_; - my $content; - local $\; - local *HANDLE; + my ($filename) = @_; + my $content; + local $\; + local *HANDLE; - open(HANDLE, "<", $filename) || return undef; - $content = ; - close(HANDLE); + open(HANDLE, "<", $filename) || return undef; + $content = ; + close(HANDLE); - return $content; + return $content; } # @@ -932,50 +988,49 @@ sub read_file($) sub get_package($) { - my ($file) = @_; - my $dir = create_temp_dir(); - my $gkv; - my $build; - my $cwd = getcwd(); - my $count; - local *HANDLE; - - info("Reading package $file:\n"); - $file = abs_path($file); - chdir($dir); - open(HANDLE, "-|", "tar xvfz '$file' 2>/dev/null") - or die("ERROR: could not process package $file\n"); - $count = 0; - while () { - if (/\.da$/ || /\.gcda$/) { - $count++; - } - } - close(HANDLE); - if ($count == 0) { - die("ERROR: no data file found in package $file\n"); - } - info(" data directory .......: $dir\n"); - $build = read_file("$dir/$pkg_build_file"); - if (defined($build)) { - info(" build directory ......: $build\n"); - } - $gkv = read_file("$dir/$pkg_gkv_file"); - if (defined($gkv)) { - $gkv = int($gkv); - if ($gkv != $GKV_PROC && $gkv != $GKV_SYS) { - die("ERROR: unsupported gcov kernel version found ". - "($gkv)\n"); - } - info(" content type .........: kernel data\n"); - info(" gcov kernel version ..: %s\n", $GKV_NAME[$gkv]); - } else { - info(" content type .........: application data\n"); - } - info(" data files ...........: $count\n"); - chdir($cwd); - - return ($dir, $build, $gkv); + my ($file) = @_; + my $dir = create_temp_dir(); + my $gkv; + my $build; + my $cwd = getcwd(); + my $count; + local *HANDLE; + + info("Reading package $file:\n"); + $file = abs_path($file); + chdir($dir); + open(HANDLE, "-|", "tar xvfz '$file' 2>$lcovutil::devnull") or + die("could not process package $file: $!\n"); + $count = 0; + while () { + if (/\.da$/ || /\.gcda$/) { + $count++; + } + } + close(HANDLE); + if ($count == 0) { + die("no data file found in package $file\n"); + } + info(" data directory .......: $dir\n"); + $build = read_file(File::Spec->catfile($dir, $pkg_build_file)); + if (defined($build)) { + info(" build directory ......: $build\n"); + } + $gkv = read_file(File::Spec->catfile($dir, $pkg_gkv_file)); + if (defined($gkv)) { + $gkv = int($gkv); + if ($gkv != $GKV_PROC && $gkv != $GKV_SYS) { + die("unsupported gcov kernel version found ($gkv)\n"); + } + info(" content type .........: kernel data\n"); + info(" gcov kernel version ..: %s\n", $GKV_NAME[$gkv]); + } else { + info(" content type .........: application data\n"); + } + info(" data files ...........: $count\n"); + chdir($cwd); + + return ($dir, $build, $gkv); } # @@ -986,14 +1041,14 @@ sub get_package($) sub write_file($$) { - my ($filename, $content) = @_; - local *HANDLE; + my ($filename, $content) = @_; + local *HANDLE; - open(HANDLE, ">", $filename) || return 0; - print(HANDLE $content); - close(HANDLE) || return 0; + open(HANDLE, ">", $filename) || return 0; + print(HANDLE $content); + close(HANDLE) || return 0; - return 1; + return 1; } # count_package_data(filename) @@ -1003,89 +1058,87 @@ sub write_file($$) sub count_package_data($) { - my ($filename) = @_; - local *HANDLE; - my $count = 0; - - open(HANDLE, "-|", "tar tfz '$filename'") or return undef; - while () { - if (/\.da$/ || /\.gcda$/) { - $count++; - } - } - close(HANDLE); - return $count; + my ($filename) = @_; + local *HANDLE; + my $count = 0; + + open(HANDLE, "-|", "tar tfz '$filename'") or return undef; + while () { + if (/\.da$/ || /\.gcda$/) { + $count++; + } + } + close(HANDLE); + return $count; } # # create_package(package_file, source_directory, build_directory[, -# kernel_gcov_version]) +# kernel_gcov_version]) # # Store unprocessed coverage data files from source_directory to package_file. # sub create_package($$$;$) { - my ($file, $dir, $build, $gkv) = @_; - my $cwd = getcwd(); - - # Check for availability of tar tool first - system("tar --help > /dev/null") - and die("ERROR: tar command not available\n"); - - # Print information about the package - info("Creating package $file:\n"); - info(" data directory .......: $dir\n"); - - # Handle build directory - if (defined($build)) { - info(" build directory ......: $build\n"); - write_file("$dir/$pkg_build_file", $build) - or die("ERROR: could not write to ". - "$dir/$pkg_build_file\n"); - } - - # Handle gcov kernel version data - if (defined($gkv)) { - info(" content type .........: kernel data\n"); - info(" gcov kernel version ..: %s\n", $GKV_NAME[$gkv]); - write_file("$dir/$pkg_gkv_file", $gkv) - or die("ERROR: could not write to ". - "$dir/$pkg_gkv_file\n"); - } else { - info(" content type .........: application data\n"); - } - - # Create package - $file = abs_path($file); - chdir($dir); - system("tar cfz $file .") - and die("ERROR: could not create package $file\n"); - chdir($cwd); - - # Remove temporary files - unlink("$dir/$pkg_build_file"); - unlink("$dir/$pkg_gkv_file"); - - # Show number of data files - if (!$quiet) { - my $count = count_package_data($file); - - if (defined($count)) { - info(" data files ...........: $count\n"); - } - } + my ($file, $dir, $build, $gkv) = @_; + my $cwd = getcwd(); + + # Check for availability of tar tool first + system("tar --help > $lcovutil::devnull") and + die("tar command not available\n"); + + # Print information about the package + info("Creating package $file:\n"); + info(" data directory .......: $dir\n"); + + # Handle build directory + if (defined($build)) { + info(" build directory ......: $build\n"); + write_file(File::Spec->catfile($dir, $pkg_build_file), $build) or + die("could not write to $dir/$pkg_build_file\n"); + } + + # Handle gcov kernel version data + if (defined($gkv)) { + info(" content type .........: kernel data\n"); + info(" gcov kernel version ..: %s\n", $GKV_NAME[$gkv]); + write_file(File::Spec->catfile($dir, $pkg_gkv_file), $gkv) or + die("could not write to $dir/$pkg_gkv_file\n"); + } else { + info(" content type .........: application data\n"); + } + + # Create package + $file = abs_path($file); + chdir($dir); + system("tar cfz $file .") and + die("could not create package $file\n"); + chdir($cwd); + + # Remove temporary files + unlink(File::Spec->catfile($dir, $pkg_build_file)); + unlink(File::Spec->catfile($dir, $pkg_gkv_file)); + + # Show number of data files + if ($lcovutil::verbose >= 0) { + my $count = count_package_data($file); + + if (defined($count)) { + info(" data files ...........: $count\n"); + } + } } sub find_link_fn($$$) { - my ($from, $rel, $filename) = @_; - my $absfile = catfile($from, $rel, $filename); + my ($from, $rel, $filename) = @_; + my $absfile = catfile($from, $rel, $filename); - if (-l $absfile) { - return $absfile; - } - return undef; + if (-l $absfile) { + return $absfile; + } + return undef; } # @@ -1098,29 +1151,29 @@ sub find_link_fn($$$) sub get_base($) { - my ($dir) = @_; - my $marker = "kernel/gcov/base.gcno"; - my $markerfile; - my $sys; - my $obj; - my $link; - - $markerfile = lcov_find($dir, \&find_link_fn, $marker); - if (!defined($markerfile)) { - return (undef, undef); - } - - # sys base is parent of parent of markerfile. - $sys = abs2rel(dirname(dirname(dirname($markerfile))), $dir); - - # obj base is parent of parent of markerfile link target. - $link = readlink($markerfile); - if (!defined($link)) { - die("ERROR: could not read $markerfile\n"); - } - $obj = dirname(dirname(dirname($link))); - - return ($sys, $obj); + my ($dir) = @_; + my $marker = "kernel/gcov/base.gcno"; + my $markerfile; + my $sys; + my $obj; + my $link; + + $markerfile = lcov_find($dir, \&find_link_fn, $marker); + if (!defined($markerfile)) { + return (undef, undef); + } + + # sys base is parent of parent of markerfile. + $sys = abs2rel(dirname(dirname(dirname($markerfile))), $dir); + + # obj base is parent of parent of markerfile link target. + $link = readlink($markerfile); + if (!defined($link)) { + die("could not read $markerfile\n"); + } + $obj = dirname(dirname(dirname($link))); + + return ($sys, $obj); } # @@ -1131,51 +1184,51 @@ sub get_base($) sub apply_base_dir($$$@) { - my ($data, $base, $build, @dirs) = @_; - my $dir; - my @result; - - foreach $dir (@dirs) { - # Is directory path relative to data directory? - if (-d catdir($data, $dir)) { - push(@result, $dir); - next; - } - # Relative to the auto-detected base-directory? - if (defined($base)) { - if (-d catdir($data, $base, $dir)) { - push(@result, catdir($base, $dir)); - next; - } - } - # Relative to the specified base-directory? - if (defined($base_directory)) { - if (file_name_is_absolute($base_directory)) { - $base = abs2rel($base_directory, rootdir()); - } else { - $base = $base_directory; - } - if (-d catdir($data, $base, $dir)) { - push(@result, catdir($base, $dir)); - next; - } - } - # Relative to the build directory? - if (defined($build)) { - if (file_name_is_absolute($build)) { - $base = abs2rel($build, rootdir()); - } else { - $base = $build; - } - if (-d catdir($data, $base, $dir)) { - push(@result, catdir($base, $dir)); - next; - } - } - die("ERROR: subdirectory $dir not found\n". - "Please use -b to specify the correct directory\n"); - } - return @result; + my ($data, $base, $build, @dirs) = @_; + my $dir; + my @result; + + foreach $dir (@dirs) { + # Is directory path relative to data directory? + if (-d catdir($data, $dir)) { + push(@result, $dir); + next; + } + # Relative to the auto-detected base-directory? + if (defined($base)) { + if (-d catdir($data, $base, $dir)) { + push(@result, catdir($base, $dir)); + next; + } + } + # Relative to the specified base-directory? + if (defined($base_directory)) { + if (file_name_is_absolute($base_directory)) { + $base = abs2rel($base_directory, rootdir()); + } else { + $base = $base_directory; + } + if (-d catdir($data, $base, $dir)) { + push(@result, catdir($base, $dir)); + next; + } + } + # Relative to the build directory? + if (defined($build)) { + if (file_name_is_absolute($build)) { + $base = abs2rel($build, rootdir()); + } else { + $base = $build; + } + if (-d catdir($data, $base, $dir)) { + push(@result, catdir($base, $dir)); + next; + } + } + die("subdirectory $dir not found\n" . + "Please use -b to specify the correct directory\n"); + } + return @result; } # @@ -1188,13 +1241,13 @@ sub apply_base_dir($$$@) sub copy_gcov_dir($;@) { - my ($data, @dirs) = @_; - my $tempdir = create_temp_dir(); + my ($data, @dirs) = @_; + my $tempdir = create_temp_dir(); - info("Copying data to temporary directory $tempdir\n"); - lcov_copy($data, $tempdir, @dirs); + info("Copying data to temporary directory $tempdir\n"); + lcov_copy($data, $tempdir, @dirs); - return $tempdir; + return $tempdir; } # @@ -1207,32 +1260,32 @@ sub copy_gcov_dir($;@) sub kernel_capture_initial() { - my $build; - my $source; - my @params; - - if (defined($base_directory)) { - $build = $base_directory; - $source = "specified"; - } else { - (undef, $build) = get_base($gcov_dir); - if (!defined($build)) { - die("ERROR: could not auto-detect build directory.\n". - "Please use -b to specify the build directory\n"); - } - $source = "auto-detected"; - } - info("Using $build as kernel build directory ($source)\n"); - # Build directory needs to be passed to geninfo - $base_directory = $build; - if (@kernel_directory) { - foreach my $dir (@kernel_directory) { - push(@params, "$build/$dir"); - } - } else { - push(@params, $build); - } - lcov_geninfo(@params); + my $build; + my $source; + my @params; + + if (defined($base_directory)) { + $build = $base_directory; + $source = "specified"; + } else { + (undef, $build) = get_base($gcov_dir); + if (!defined($build)) { + die("could not auto-detect build directory.\n" . + "Please use -b to specify the build directory\n"); + } + $source = "auto-detected"; + } + info("Using $build as kernel build directory ($source)\n"); + # Build directory needs to be passed to geninfo + $base_directory = $build; + if (@kernel_directory) { + foreach my $dir (@kernel_directory) { + push(@params, File::Scpec->catdir($build, $dir)); + } + } else { + push(@params, $build); + } + lcov_geninfo(@params); } # @@ -1244,16 +1297,16 @@ sub kernel_capture_initial() sub kernel_capture_from_dir($$$) { - my ($dir, $gkv, $build) = @_; - - # Create package or coverage file - if (defined($to_package)) { - create_package($to_package, $dir, $build, $gkv); - } else { - # Build directory needs to be passed to geninfo - $base_directory = $build; - lcov_geninfo($dir); - } + my ($dir, $gkv, $build) = @_; + + # Create package or coverage file + if (defined($to_package)) { + create_package($to_package, $dir, $build, $gkv); + } else { + # Build directory needs to be passed to geninfo + $base_directory = $build; + lcov_geninfo($dir); + } } # @@ -1266,34 +1319,34 @@ sub kernel_capture_from_dir($$$) sub adjust_kernel_dir($$) { - my ($dir, $build) = @_; - my ($sys_base, $build_auto) = get_base($dir); - - if (!defined($build)) { - $build = $build_auto; - } - if (!defined($build)) { - die("ERROR: could not auto-detect build directory.\n". - "Please use -b to specify the build directory\n"); - } - # Make @kernel_directory relative to sysfs base - if (@kernel_directory) { - @kernel_directory = apply_base_dir($dir, $sys_base, $build, - @kernel_directory); - } - return $build; + my ($dir, $build) = @_; + my ($sys_base, $build_auto) = get_base($dir); + + if (!defined($build)) { + $build = $build_auto; + } + if (!defined($build)) { + die("could not auto-detect build directory.\n" . + "Please use -b to specify the build directory\n"); + } + # Make @kernel_directory relative to sysfs base + if (@kernel_directory) { + @kernel_directory = + apply_base_dir($dir, $sys_base, $build, @kernel_directory); + } + return $build; } sub kernel_capture() { - my $data_dir; - my $build = $base_directory; - - if ($gcov_gkv == $GKV_SYS) { - $build = adjust_kernel_dir($gcov_dir, $build); - } - $data_dir = copy_gcov_dir($gcov_dir, @kernel_directory); - kernel_capture_from_dir($data_dir, $gcov_gkv, $build); + my $data_dir; + my $build = $base_directory; + + if ($gcov_gkv == $GKV_SYS) { + $build = adjust_kernel_dir($gcov_dir, $build); + } + $data_dir = copy_gcov_dir($gcov_dir, @kernel_directory); + kernel_capture_from_dir($data_dir, $gcov_gkv, $build); } # @@ -1304,31 +1357,29 @@ sub kernel_capture() sub link_data_cb($$$) { - my ($datadir, $rel, $graphdir) = @_; - my $absfrom = catfile($datadir, $rel); - my $absto = catfile($graphdir, $rel); - my $base; - my $dir; - - if (-e $absto) { - die("ERROR: could not create symlink at $absto: ". - "File already exists!\n"); - } - if (-l $absto) { - # Broken link - possibly from an interrupted earlier run - unlink($absto); - } - - # Check for graph file - $base = $absto; - $base =~ s/\.(gcda|da)$//; - if (! -e $base.".gcno" && ! -e $base.".bbg" && ! -e $base.".bb") { - die("ERROR: No graph file found for $absfrom in ". - dirname($base)."!\n"); - } - - symlink($absfrom, $absto) or - die("ERROR: could not create symlink at $absto: $!\n"); + my ($datadir, $rel, $graphdir) = @_; + my $absfrom = catfile($datadir, $rel); + my $absto = catfile($graphdir, $rel); + my $base; + my $dir; + + if (-e $absto) { + die("could not create symlink at $absto: " . "File already exists!\n"); + } + if (-l $absto) { + # Broken link - possibly from an interrupted earlier run + unlink($absto); + } + + # Check for graph file + $base = $absto; + $base =~ s/\.(gcda|da)$//; + if (!-e $base . ".gcno") { + die("No graph file found for $absfrom in " . dirname($base) . "!\n"); + } + + symlink($absfrom, $absto) or + die("could not create symlink at $absto: $!\n"); } # @@ -1339,17 +1390,17 @@ sub link_data_cb($$$) sub unlink_data_cb($$$) { - my ($datadir, $rel, $graphdir) = @_; - my $absfrom = catfile($datadir, $rel); - my $absto = catfile($graphdir, $rel); - my $target; + my ($datadir, $rel, $graphdir) = @_; + my $absfrom = catfile($datadir, $rel); + my $absto = catfile($graphdir, $rel); + my $target; - return if (!-l $absto); - $target = readlink($absto); - return if (!defined($target) || $target ne $absfrom); + return if (!-l $absto); + $target = readlink($absto); + return if (!defined($target) || $target ne $absfrom); - unlink($absto) or - warn("WARNING: could not remove symlink $absto: $!\n"); + unlink($absto) or + warn("could not remove symlink $absto: $!\n"); } # @@ -1361,17 +1412,15 @@ sub unlink_data_cb($$$) sub link_data($$$) { - my ($datadir, $graphdir, $create) = @_; - - $datadir = abs_path($datadir); - $graphdir = abs_path($graphdir); - if ($create) { - lcov_find($datadir, \&link_data_cb, $graphdir, '\.gcda$', - '\.da$'); - } else { - lcov_find($datadir, \&unlink_data_cb, $graphdir, '\.gcda$', - '\.da$'); - } + my ($datadir, $graphdir, $create) = @_; + + $datadir = abs_path($datadir); + $graphdir = abs_path($graphdir); + if ($create) { + lcov_find($datadir, \&link_data_cb, $graphdir, '\.gcda$', '\.da$'); + } else { + lcov_find($datadir, \&unlink_data_cb, $graphdir, '\.gcda$', '\.da$'); + } } # @@ -1382,9 +1431,9 @@ sub link_data($$$) sub find_graph_cb($$$) { - my ($dir, $rel, $count_ref) = @_; + my ($dir, $rel, $count_ref) = @_; - ($$count_ref)++; + ($$count_ref)++; } # @@ -1395,12 +1444,12 @@ sub find_graph_cb($$$) sub find_graph($) { - my ($dir) = @_; - my $count = 0; + my ($dir) = @_; + my $count = 0; - lcov_find($dir, \&find_graph_cb, \$count, '\.gcno$', '\.bb$', '\.bbg$'); + lcov_find($dir, \&find_graph_cb, \$count, '\.gcno$'); - return $count > 0 ? 1 : 0; + return $count > 0 ? 1 : 0; } # @@ -1412,1424 +1461,155 @@ sub find_graph($) sub package_capture() { - my $dir; - my $build; - my $gkv; - - ($dir, $build, $gkv) = get_package($from_package); - - # Check for build directory - if (defined($base_directory)) { - if (defined($build)) { - info("Using build directory specified by -b.\n"); - } - $build = $base_directory; - } - - # Do the actual capture - if (defined($gkv)) { - if ($gkv == $GKV_SYS) { - $build = adjust_kernel_dir($dir, $build); - } - if (@kernel_directory) { - $dir = copy_gcov_dir($dir, @kernel_directory); - } - kernel_capture_from_dir($dir, $gkv, $build); - } else { - # Build directory needs to be passed to geninfo - $base_directory = $build; - if (find_graph($dir)) { - # Package contains graph files - collect from there - lcov_geninfo($dir); - } else { - # No graph files found, link data files next to - # graph files - link_data($dir, $base_directory, 1); - lcov_geninfo($base_directory); - link_data($dir, $base_directory, 0); - } - } -} - - -# -# info(printf_parameter) -# -# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag -# is not set. -# - -sub info(@) -{ - if (!$quiet) - { - # Print info string - if (!$data_stdout) - { - printf(@_) - } - else - { - # Don't interfere with the .info output to STDOUT - printf(STDERR @_); - } - } -} - - -# -# create_temp_dir() -# -# Create a temporary directory and return its path. -# -# Die on error. -# - -sub create_temp_dir() -{ - my $dir; - - if (defined($tmp_dir)) { - $dir = tempdir(DIR => $tmp_dir, CLEANUP => 1); - } else { - $dir = tempdir(CLEANUP => 1); - } - if (!defined($dir)) { - die("ERROR: cannot create temporary directory\n"); - } - push(@temp_dirs, $dir); - - return $dir; -} - -sub compress_brcount($) -{ - my ($brcount) = @_; - my $db; - - $db = brcount_to_db($brcount); - return db_to_brcount($db, $brcount); -} - -sub get_br_found_and_hit($) -{ - my ($brcount) = @_; - my $db; - - $db = brcount_to_db($brcount); - - return brcount_db_get_found_and_hit($db); -} - - -# -# read_info_file(info_filename) -# -# Read in the contents of the .info file specified by INFO_FILENAME. Data will -# be returned as a reference to a hash containing the following mappings: -# -# %result: for each filename found in file -> \%data -# -# %data: "test" -> \%testdata -# "sum" -> \%sumcount -# "func" -> \%funcdata -# "found" -> $lines_found (number of instrumented lines found in file) -# "hit" -> $lines_hit (number of executed lines in file) -# "f_found" -> $fn_found (number of instrumented functions found in file) -# "f_hit" -> $fn_hit (number of executed functions in file) -# "b_found" -> $br_found (number of instrumented branches found in file) -# "b_hit" -> $br_hit (number of executed branches in file) -# "check" -> \%checkdata -# "testfnc" -> \%testfncdata -# "sumfnc" -> \%sumfnccount -# "testbr" -> \%testbrdata -# "sumbr" -> \%sumbrcount -# -# %testdata : name of test affecting this file -> \%testcount -# %testfncdata: name of test affecting this file -> \%testfnccount -# %testbrdata: name of test affecting this file -> \%testbrcount -# -# %testcount : line number -> execution count for a single test -# %testfnccount: function name -> execution count for a single test -# %testbrcount : line number -> branch coverage data for a single test -# %sumcount : line number -> execution count for all tests -# %sumfnccount : function name -> execution count for all tests -# %sumbrcount : line number -> branch coverage data for all tests -# %funcdata : function name -> line number -# %checkdata : line number -> checksum of source code line -# $brdata : text "block,branch,taken:..." -# -# Note that .info file sections referring to the same file and test name -# will automatically be combined by adding all execution counts. -# -# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file -# is compressed using GZIP. If available, GUNZIP will be used to decompress -# this file. -# -# Die on error. -# - -sub read_info_file($) -{ - my $tracefile = $_[0]; # Name of tracefile - my %result; # Resulting hash: file -> data - my $data; # Data handle for current entry - my $testdata; # " " - my $testcount; # " " - my $sumcount; # " " - my $funcdata; # " " - my $checkdata; # " " - my $testfncdata; - my $testfnccount; - my $sumfnccount; - my $testbrdata; - my $testbrcount; - my $sumbrcount; - my $line; # Current line read from .info file - my $testname; # Current test name - my $filename; # Current filename - my $hitcount; # Count for lines hit - my $count; # Execution count of current line - my $negative; # If set, warn about negative counts - my $changed_testname; # If set, warn about changed testname - my $line_checksum; # Checksum of current line - local *INFO_HANDLE; # Filehandle for .info file - - info("Reading tracefile $tracefile\n"); - - # Check if file exists and is readable - stat($_[0]); - if (!(-r _)) - { - die("ERROR: cannot read file $_[0]!\n"); - } - - # Check if this is really a plain file - if (!(-f _)) - { - die("ERROR: not a plain file: $_[0]!\n"); - } - - # Check for .gz extension - if ($_[0] =~ /\.gz$/) - { - # Check for availability of GZIP tool - system_no_output(1, "gunzip" ,"-h") - and die("ERROR: gunzip command not available!\n"); - - # Check integrity of compressed file - system_no_output(1, "gunzip", "-t", $_[0]) - and die("ERROR: integrity check failed for ". - "compressed file $_[0]!\n"); - - # Open compressed file - open(INFO_HANDLE, "-|", "gunzip -c '$_[0]'") - or die("ERROR: cannot start gunzip to decompress ". - "file $_[0]!\n"); - } - else - { - # Open decompressed file - open(INFO_HANDLE, "<", $_[0]) - or die("ERROR: cannot read file $_[0]!\n"); - } - - $testname = ""; - while () - { - chomp($_); - $line = $_; - - # Switch statement - foreach ($line) - { - /^TN:([^,]*)(,diff)?/ && do - { - # Test name information found - $testname = defined($1) ? $1 : ""; - if ($testname =~ s/\W/_/g) - { - $changed_testname = 1; - } - $testname .= $2 if (defined($2)); - last; - }; - - /^[SK]F:(.*)/ && do - { - # Filename information found - # Retrieve data for new entry - $filename = $1; - - $data = $result{$filename}; - ($testdata, $sumcount, $funcdata, $checkdata, - $testfncdata, $sumfnccount, $testbrdata, - $sumbrcount) = - get_info_entry($data); - - if (defined($testname)) - { - $testcount = $testdata->{$testname}; - $testfnccount = $testfncdata->{$testname}; - $testbrcount = $testbrdata->{$testname}; - } - else - { - $testcount = {}; - $testfnccount = {}; - $testbrcount = {}; - } - last; - }; - - /^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do - { - # Fix negative counts - $count = $2 < 0 ? 0 : $2; - if ($2 < 0) - { - $negative = 1; - } - # Execution count found, add to structure - # Add summary counts - $sumcount->{$1} += $count; - - # Add test-specific counts - if (defined($testname)) - { - $testcount->{$1} += $count; - } - - # Store line checksum if available - if (defined($3)) - { - $line_checksum = substr($3, 1); - - # Does it match a previous definition - if (defined($checkdata->{$1}) && - ($checkdata->{$1} ne - $line_checksum)) - { - die("ERROR: checksum mismatch ". - "at $filename:$1\n"); - } - - $checkdata->{$1} = $line_checksum; - } - last; - }; - - /^FN:(\d+),([^,]+)/ && do - { - last if (!$func_coverage); - - # Function data found, add to structure - $funcdata->{$2} = $1; - - # Also initialize function call data - if (!defined($sumfnccount->{$2})) { - $sumfnccount->{$2} = 0; - } - if (defined($testname)) - { - if (!defined($testfnccount->{$2})) { - $testfnccount->{$2} = 0; - } - } - last; - }; - - /^FNDA:(\d+),([^,]+)/ && do - { - last if (!$func_coverage); - - # Function call count found, add to structure - # Add summary counts - $sumfnccount->{$2} += $1; - - # Add test-specific counts - if (defined($testname)) - { - $testfnccount->{$2} += $1; - } - last; - }; - - /^BRDA:(\d+),(\d+),(\d+),(\d+|-)/ && do { - # Branch coverage data found - my ($line, $block, $branch, $taken) = - ($1, $2, $3, $4); - - last if (!$br_coverage); - $sumbrcount->{$line} .= - "$block,$branch,$taken:"; - - # Add test-specific counts - if (defined($testname)) { - $testbrcount->{$line} .= - "$block,$branch,$taken:"; - } - last; - }; - - /^end_of_record/ && do - { - # Found end of section marker - if ($filename) - { - # Store current section data - if (defined($testname)) - { - $testdata->{$testname} = - $testcount; - $testfncdata->{$testname} = - $testfnccount; - $testbrdata->{$testname} = - $testbrcount; - } - - set_info_entry($data, $testdata, - $sumcount, $funcdata, - $checkdata, $testfncdata, - $sumfnccount, - $testbrdata, - $sumbrcount); - $result{$filename} = $data; - last; - } - }; - - # default - last; - } - } - close(INFO_HANDLE); - - # Calculate hit and found values for lines and functions of each file - foreach $filename (keys(%result)) - { - $data = $result{$filename}; - - ($testdata, $sumcount, undef, undef, $testfncdata, - $sumfnccount, $testbrdata, $sumbrcount) = - get_info_entry($data); - - # Filter out empty files - if (scalar(keys(%{$sumcount})) == 0) - { - delete($result{$filename}); - next; - } - # Filter out empty test cases - foreach $testname (keys(%{$testdata})) - { - if (!defined($testdata->{$testname}) || - scalar(keys(%{$testdata->{$testname}})) == 0) - { - delete($testdata->{$testname}); - delete($testfncdata->{$testname}); - } - } - - $data->{"found"} = scalar(keys(%{$sumcount})); - $hitcount = 0; - - foreach (keys(%{$sumcount})) - { - if ($sumcount->{$_} > 0) { $hitcount++; } - } - - $data->{"hit"} = $hitcount; - - # Get found/hit values for function call data - $data->{"f_found"} = scalar(keys(%{$sumfnccount})); - $hitcount = 0; - - foreach (keys(%{$sumfnccount})) { - if ($sumfnccount->{$_} > 0) { - $hitcount++; - } - } - $data->{"f_hit"} = $hitcount; - - # Combine branch data for the same branches - (undef, $data->{"b_found"}, $data->{"b_hit"}) = - compress_brcount($sumbrcount); - foreach $testname (keys(%{$testbrdata})) { - compress_brcount($testbrdata->{$testname}); - } - } - - if (scalar(keys(%result)) == 0) - { - die("ERROR: no valid records found in tracefile $tracefile\n"); - } - if ($negative) - { - warn("WARNING: negative counts found in tracefile ". - "$tracefile\n"); - } - if ($changed_testname) - { - warn("WARNING: invalid characters removed from testname in ". - "tracefile $tracefile\n"); - } - - return(\%result); -} - - -# -# get_info_entry(hash_ref) -# -# Retrieve data from an entry of the structure generated by read_info_file(). -# Return a list of references to hashes: -# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash -# ref, testfncdata hash ref, sumfnccount hash ref, testbrdata hash ref, -# sumbrcount hash ref, lines found, lines hit, functions found, -# functions hit, branches found, branches hit) -# - -sub get_info_entry($) -{ - my $testdata_ref = $_[0]->{"test"}; - my $sumcount_ref = $_[0]->{"sum"}; - my $funcdata_ref = $_[0]->{"func"}; - my $checkdata_ref = $_[0]->{"check"}; - my $testfncdata = $_[0]->{"testfnc"}; - my $sumfnccount = $_[0]->{"sumfnc"}; - my $testbrdata = $_[0]->{"testbr"}; - my $sumbrcount = $_[0]->{"sumbr"}; - my $lines_found = $_[0]->{"found"}; - my $lines_hit = $_[0]->{"hit"}; - my $f_found = $_[0]->{"f_found"}; - my $f_hit = $_[0]->{"f_hit"}; - my $br_found = $_[0]->{"b_found"}; - my $br_hit = $_[0]->{"b_hit"}; - - return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref, - $testfncdata, $sumfnccount, $testbrdata, $sumbrcount, - $lines_found, $lines_hit, $f_found, $f_hit, - $br_found, $br_hit); -} - - -# -# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref, -# checkdata_ref, testfncdata_ref, sumfcncount_ref, -# testbrdata_ref, sumbrcount_ref[,lines_found, -# lines_hit, f_found, f_hit, $b_found, $b_hit]) -# -# Update the hash referenced by HASH_REF with the provided data references. -# - -sub set_info_entry($$$$$$$$$;$$$$$$) -{ - my $data_ref = $_[0]; - - $data_ref->{"test"} = $_[1]; - $data_ref->{"sum"} = $_[2]; - $data_ref->{"func"} = $_[3]; - $data_ref->{"check"} = $_[4]; - $data_ref->{"testfnc"} = $_[5]; - $data_ref->{"sumfnc"} = $_[6]; - $data_ref->{"testbr"} = $_[7]; - $data_ref->{"sumbr"} = $_[8]; - - if (defined($_[9])) { $data_ref->{"found"} = $_[9]; } - if (defined($_[10])) { $data_ref->{"hit"} = $_[10]; } - if (defined($_[11])) { $data_ref->{"f_found"} = $_[11]; } - if (defined($_[12])) { $data_ref->{"f_hit"} = $_[12]; } - if (defined($_[13])) { $data_ref->{"b_found"} = $_[13]; } - if (defined($_[14])) { $data_ref->{"b_hit"} = $_[14]; } -} - - -# -# add_counts(data1_ref, data2_ref) -# -# DATA1_REF and DATA2_REF are references to hashes containing a mapping -# -# line number -> execution count -# -# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF -# is a reference to a hash containing the combined mapping in which -# execution counts are added. -# - -sub add_counts($$) -{ - my $data1_ref = $_[0]; # Hash 1 - my $data2_ref = $_[1]; # Hash 2 - my %result; # Resulting hash - my $line; # Current line iteration scalar - my $data1_count; # Count of line in hash1 - my $data2_count; # Count of line in hash2 - my $found = 0; # Total number of lines found - my $hit = 0; # Number of lines with a count > 0 - - foreach $line (keys(%$data1_ref)) - { - $data1_count = $data1_ref->{$line}; - $data2_count = $data2_ref->{$line}; - - # Add counts if present in both hashes - if (defined($data2_count)) { $data1_count += $data2_count; } - - # Store sum in %result - $result{$line} = $data1_count; - - $found++; - if ($data1_count > 0) { $hit++; } - } - - # Add lines unique to data2_ref - foreach $line (keys(%$data2_ref)) - { - # Skip lines already in data1_ref - if (defined($data1_ref->{$line})) { next; } - - # Copy count from data2_ref - $result{$line} = $data2_ref->{$line}; - - $found++; - if ($result{$line} > 0) { $hit++; } - } - - return (\%result, $found, $hit); -} - - -# -# merge_checksums(ref1, ref2, filename) -# -# REF1 and REF2 are references to hashes containing a mapping -# -# line number -> checksum -# -# Merge checksum lists defined in REF1 and REF2 and return reference to -# resulting hash. Die if a checksum for a line is defined in both hashes -# but does not match. -# - -sub merge_checksums($$$) -{ - my $ref1 = $_[0]; - my $ref2 = $_[1]; - my $filename = $_[2]; - my %result; - my $line; - - foreach $line (keys(%{$ref1})) - { - if (defined($ref2->{$line}) && - ($ref1->{$line} ne $ref2->{$line})) - { - die("ERROR: checksum mismatch at $filename:$line\n"); - } - $result{$line} = $ref1->{$line}; - } - - foreach $line (keys(%{$ref2})) - { - $result{$line} = $ref2->{$line}; - } - - return \%result; -} - - -# -# merge_func_data(funcdata1, funcdata2, filename) -# - -sub merge_func_data($$$) -{ - my ($funcdata1, $funcdata2, $filename) = @_; - my %result; - my $func; - - if (defined($funcdata1)) { - %result = %{$funcdata1}; - } - - foreach $func (keys(%{$funcdata2})) { - my $line1 = $result{$func}; - my $line2 = $funcdata2->{$func}; - - if (defined($line1) && ($line1 != $line2)) { - warn("WARNING: function data mismatch at ". - "$filename:$line2\n"); - next; - } - $result{$func} = $line2; - } - - return \%result; -} - - -# -# add_fnccount(fnccount1, fnccount2) -# -# Add function call count data. Return list (fnccount_added, f_found, f_hit) -# - -sub add_fnccount($$) -{ - my ($fnccount1, $fnccount2) = @_; - my %result; - my $f_found; - my $f_hit; - my $function; - - if (defined($fnccount1)) { - %result = %{$fnccount1}; - } - foreach $function (keys(%{$fnccount2})) { - $result{$function} += $fnccount2->{$function}; - } - $f_found = scalar(keys(%result)); - $f_hit = 0; - foreach $function (keys(%result)) { - if ($result{$function} > 0) { - $f_hit++; - } - } - - return (\%result, $f_found, $f_hit); -} - -# -# add_testfncdata(testfncdata1, testfncdata2) -# -# Add function call count data for several tests. Return reference to -# added_testfncdata. -# - -sub add_testfncdata($$) -{ - my ($testfncdata1, $testfncdata2) = @_; - my %result; - my $testname; - - foreach $testname (keys(%{$testfncdata1})) { - if (defined($testfncdata2->{$testname})) { - my $fnccount; - - # Function call count data for this testname exists - # in both data sets: merge - ($fnccount) = add_fnccount( - $testfncdata1->{$testname}, - $testfncdata2->{$testname}); - $result{$testname} = $fnccount; - next; - } - # Function call count data for this testname is unique to - # data set 1: copy - $result{$testname} = $testfncdata1->{$testname}; - } - - # Add count data for testnames unique to data set 2 - foreach $testname (keys(%{$testfncdata2})) { - if (!defined($result{$testname})) { - $result{$testname} = $testfncdata2->{$testname}; - } - } - return \%result; -} - - -# -# brcount_to_db(brcount) -# -# Convert brcount data to the following format: -# -# db: line number -> block hash -# block hash: block number -> branch hash -# branch hash: branch number -> taken value -# - -sub brcount_to_db($) -{ - my ($brcount) = @_; - my $line; - my $db = {}; - - # Add branches to database - foreach $line (keys(%{$brcount})) { - my $brdata = $brcount->{$line}; - - foreach my $entry (split(/:/, $brdata)) { - my ($block, $branch, $taken) = split(/,/, $entry); - my $old = $db->{$line}->{$block}->{$branch}; - - if (!defined($old) || $old eq "-") { - $old = $taken; - } elsif ($taken ne "-") { - $old += $taken; - } - - $db->{$line}->{$block}->{$branch} = $old; - } - } - - return $db; -} - - -# -# db_to_brcount(db[, brcount]) -# -# Convert branch coverage data back to brcount format. If brcount is specified, -# the converted data is directly inserted in brcount. -# - -sub db_to_brcount($;$) -{ - my ($db, $brcount) = @_; - my $line; - my $br_found = 0; - my $br_hit = 0; - - # Convert database back to brcount format - foreach $line (sort({$a <=> $b} keys(%{$db}))) { - my $ldata = $db->{$line}; - my $brdata; - my $block; - - foreach $block (sort({$a <=> $b} keys(%{$ldata}))) { - my $bdata = $ldata->{$block}; - my $branch; - - foreach $branch (sort({$a <=> $b} keys(%{$bdata}))) { - my $taken = $bdata->{$branch}; - - $br_found++; - $br_hit++ if ($taken ne "-" && $taken > 0); - $brdata .= "$block,$branch,$taken:"; - } - } - $brcount->{$line} = $brdata; - } - - return ($brcount, $br_found, $br_hit); -} - - -# -# brcount_db_combine(db1, db2, op) -# -# db1 := db1 op db2, where -# db1, db2: brcount data as returned by brcount_to_db -# op: one of $BR_ADD and BR_SUB -# -sub brcount_db_combine($$$) -{ - my ($db1, $db2, $op) = @_; - - foreach my $line (keys(%{$db2})) { - my $ldata = $db2->{$line}; - - foreach my $block (keys(%{$ldata})) { - my $bdata = $ldata->{$block}; - - foreach my $branch (keys(%{$bdata})) { - my $taken = $bdata->{$branch}; - my $new = $db1->{$line}->{$block}->{$branch}; - - if (!defined($new) || $new eq "-") { - $new = $taken; - } elsif ($taken ne "-") { - if ($op == $BR_ADD) { - $new += $taken; - } elsif ($op == $BR_SUB) { - $new -= $taken; - $new = 0 if ($new < 0); - } - } - - $db1->{$line}->{$block}->{$branch} = $new; - } - } - } -} - - -# -# brcount_db_get_found_and_hit(db) -# -# Return (br_found, br_hit) for db. -# - -sub brcount_db_get_found_and_hit($) -{ - my ($db) = @_; - my ($br_found , $br_hit) = (0, 0); - - foreach my $line (keys(%{$db})) { - my $ldata = $db->{$line}; - - foreach my $block (keys(%{$ldata})) { - my $bdata = $ldata->{$block}; - - foreach my $branch (keys(%{$bdata})) { - my $taken = $bdata->{$branch}; + my $dir; + my $build; + my $gkv; - $br_found++; - $br_hit++ if ($taken ne "-" && $taken > 0); - } - } - } + ($dir, $build, $gkv) = get_package($from_package); - return ($br_found, $br_hit); -} - - -# combine_brcount(brcount1, brcount2, type, inplace) -# -# If add is BR_ADD, add branch coverage data and return list brcount_added. -# If add is BR_SUB, subtract the taken values of brcount2 from brcount1 and -# return brcount_sub. If inplace is set, the result is inserted into brcount1. -# - -sub combine_brcount($$$;$) -{ - my ($brcount1, $brcount2, $type, $inplace) = @_; - my ($db1, $db2); - - $db1 = brcount_to_db($brcount1); - $db2 = brcount_to_db($brcount2); - brcount_db_combine($db1, $db2, $type); + # Check for build directory + if (defined($base_directory)) { + if (defined($build)) { + info("Using build directory specified by -b.\n"); + } + $build = $base_directory; + } - return db_to_brcount($db1, $inplace ? $brcount1 : undef); + # Do the actual capture + if (defined($gkv)) { + if ($gkv == $GKV_SYS) { + $build = adjust_kernel_dir($dir, $build); + } + if (@kernel_directory) { + $dir = copy_gcov_dir($dir, @kernel_directory); + } + kernel_capture_from_dir($dir, $gkv, $build); + } else { + # Build directory needs to be passed to geninfo + $base_directory = $build; + if (find_graph($dir)) { + # Package contains graph files - collect from there + lcov_geninfo($dir); + } else { + # No graph files found, link data files next to + # graph files + link_data($dir, $base_directory, 1); + lcov_geninfo($base_directory); + link_data($dir, $base_directory, 0); + } + } } - # -# add_testbrdata(testbrdata1, testbrdata2) +# info(printf_parameter) # -# Add branch coverage data for several tests. Return reference to -# added_testbrdata. +# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag +# is not set. # -sub add_testbrdata($$) +sub my_info(@) { - my ($testbrdata1, $testbrdata2) = @_; - my %result; - my $testname; - - foreach $testname (keys(%{$testbrdata1})) { - if (defined($testbrdata2->{$testname})) { - my $brcount; - - # Branch coverage data for this testname exists - # in both data sets: add - ($brcount) = combine_brcount( - $testbrdata1->{$testname}, - $testbrdata2->{$testname}, $BR_ADD); - $result{$testname} = $brcount; - next; - } - # Branch coverage data for this testname is unique to - # data set 1: copy - $result{$testname} = $testbrdata1->{$testname}; - } - - # Add count data for testnames unique to data set 2 - foreach $testname (keys(%{$testbrdata2})) { - if (!defined($result{$testname})) { - $result{$testname} = $testbrdata2->{$testname}; - } - } - return \%result; + # Print info string + if (!$data_stdout) { + printf(@_); + } else { + # Don't interfere with the .info output to STDOUT + printf(STDERR @_); + } } - -# -# combine_info_entries(entry_ref1, entry_ref2, filename) -# -# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2. -# Return reference to resulting hash. -# - -sub combine_info_entries($$$) +# emit() +# write output to file or stdout - possibly with info message +# returns array of (lines found, lines hit, functions found, functions hit, +# branches found, branches_hit) +sub emit { - my $entry1 = $_[0]; # Reference to hash containing first entry - my $testdata1; - my $sumcount1; - my $funcdata1; - my $checkdata1; - my $testfncdata1; - my $sumfnccount1; - my $testbrdata1; - my $sumbrcount1; - - my $entry2 = $_[1]; # Reference to hash containing second entry - my $testdata2; - my $sumcount2; - my $funcdata2; - my $checkdata2; - my $testfncdata2; - my $sumfnccount2; - my $testbrdata2; - my $sumbrcount2; - - my %result; # Hash containing combined entry - my %result_testdata; - my $result_sumcount = {}; - my $result_funcdata; - my $result_testfncdata; - my $result_sumfnccount; - my $result_testbrdata; - my $result_sumbrcount; - my $lines_found; - my $lines_hit; - my $f_found; - my $f_hit; - my $br_found; - my $br_hit; - - my $testname; - my $filename = $_[2]; - - # Retrieve data - ($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1, - $sumfnccount1, $testbrdata1, $sumbrcount1) = get_info_entry($entry1); - ($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2, - $sumfnccount2, $testbrdata2, $sumbrcount2) = get_info_entry($entry2); - - # Merge checksums - $checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename); - - # Combine funcdata - $result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename); - - # Combine function call count data - $result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2); - ($result_sumfnccount, $f_found, $f_hit) = - add_fnccount($sumfnccount1, $sumfnccount2); - - # Combine branch coverage data - $result_testbrdata = add_testbrdata($testbrdata1, $testbrdata2); - ($result_sumbrcount, $br_found, $br_hit) = - combine_brcount($sumbrcount1, $sumbrcount2, $BR_ADD); - - # Combine testdata - foreach $testname (keys(%{$testdata1})) - { - if (defined($testdata2->{$testname})) - { - # testname is present in both entries, requires - # combination - ($result_testdata{$testname}) = - add_counts($testdata1->{$testname}, - $testdata2->{$testname}); - } - else - { - # testname only present in entry1, add to result - $result_testdata{$testname} = $testdata1->{$testname}; - } - - # update sum count hash - ($result_sumcount, $lines_found, $lines_hit) = - add_counts($result_sumcount, - $result_testdata{$testname}); - } - - foreach $testname (keys(%{$testdata2})) - { - # Skip testnames already covered by previous iteration - if (defined($testdata1->{$testname})) { next; } - - # testname only present in entry2, add to result hash - $result_testdata{$testname} = $testdata2->{$testname}; - - # update sum count hash - ($result_sumcount, $lines_found, $lines_hit) = - add_counts($result_sumcount, - $result_testdata{$testname}); - } - - # Calculate resulting sumcount - - # Store result - set_info_entry(\%result, \%result_testdata, $result_sumcount, - $result_funcdata, $checkdata1, $result_testfncdata, - $result_sumfnccount, $result_testbrdata, - $result_sumbrcount, $lines_found, $lines_hit, - $f_found, $f_hit, $br_found, $br_hit); - - return(\%result); + my ($trace, $info_msg) = @_; + my $to = $data_stdout ? '-' : $output_filename; + if (!$data_stdout) { + info($info_msg) + if $info_msg; + info("Writing data to $output_filename\n"); + } + $trace->add_comments(@lcovutil::comments); + $trace->write_info_file($to, $lcovutil::verify_checksum); } - -# -# combine_info_files(info_ref1, info_ref2) # -# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return -# reference to resulting hash. -# - -sub combine_info_files($$) -{ - my %hash1 = %{$_[0]}; - my %hash2 = %{$_[1]}; - my $filename; - - foreach $filename (keys(%hash2)) - { - if ($hash1{$filename}) - { - # Entry already exists in hash1, combine them - $hash1{$filename} = - combine_info_entries($hash1{$filename}, - $hash2{$filename}, - $filename); - } - else - { - # Entry is unique in both hashes, simply add to - # resulting hash - $hash1{$filename} = $hash2{$filename}; - } - } - - return(\%hash1); -} - - # # add_traces() # sub add_traces() { - my $total_trace; - my $current_trace; - my $tracefile; - my @result; - local *INFO_HANDLE; - - info("Combining tracefiles.\n"); - - foreach $tracefile (@add_tracefile) - { - $current_trace = read_info_file($tracefile); - if ($total_trace) - { - $total_trace = combine_info_files($total_trace, - $current_trace); - } - else - { - $total_trace = $current_trace; - } - } - - # Write combined data - if (!$data_stdout) - { - info("Writing data to $output_filename\n"); - open(INFO_HANDLE, ">", $output_filename) - or die("ERROR: cannot write to $output_filename!\n"); - @result = write_info_file(*INFO_HANDLE, $total_trace); - close(*INFO_HANDLE); - } - else - { - @result = write_info_file(*STDOUT, $total_trace); - } - - return @result; -} + info("Combining tracefiles.\n"); + my @merge = AggregateTraces::find_from_glob(@add_tracefile); + info(".. found " . scalar(@merge) . " files to aggregate.\n"); + my ($total_trace, $effective) = + AggregateTraces::merge(ReadCurrentSource->new(), @merge); -# -# write_info_file(filehandle, data) -# - -sub write_info_file(*$) -{ - local *INFO_HANDLE = $_[0]; - my %data = %{$_[1]}; - my $source_file; - my $entry; - my $testdata; - my $sumcount; - my $funcdata; - my $checkdata; - my $testfncdata; - my $sumfnccount; - my $testbrdata; - my $sumbrcount; - my $testname; - my $line; - my $func; - my $testcount; - my $testfnccount; - my $testbrcount; - my $found; - my $hit; - my $f_found; - my $f_hit; - my $br_found; - my $br_hit; - my $ln_total_found = 0; - my $ln_total_hit = 0; - my $fn_total_found = 0; - my $fn_total_hit = 0; - my $br_total_found = 0; - my $br_total_hit = 0; - - foreach $source_file (sort(keys(%data))) - { - $entry = $data{$source_file}; - ($testdata, $sumcount, $funcdata, $checkdata, $testfncdata, - $sumfnccount, $testbrdata, $sumbrcount, $found, $hit, - $f_found, $f_hit, $br_found, $br_hit) = - get_info_entry($entry); - - # Add to totals - $ln_total_found += $found; - $ln_total_hit += $hit; - $fn_total_found += $f_found; - $fn_total_hit += $f_hit; - $br_total_found += $br_found; - $br_total_hit += $br_hit; - - foreach $testname (sort(keys(%{$testdata}))) - { - $testcount = $testdata->{$testname}; - $testfnccount = $testfncdata->{$testname}; - $testbrcount = $testbrdata->{$testname}; - $found = 0; - $hit = 0; - - print(INFO_HANDLE "TN:$testname\n"); - print(INFO_HANDLE "SF:$source_file\n"); - - # Write function related data - foreach $func ( - sort({$funcdata->{$a} <=> $funcdata->{$b}} - keys(%{$funcdata}))) - { - print(INFO_HANDLE "FN:".$funcdata->{$func}. - ",$func\n"); - } - foreach $func (keys(%{$testfnccount})) { - print(INFO_HANDLE "FNDA:". - $testfnccount->{$func}. - ",$func\n"); - } - ($f_found, $f_hit) = - get_func_found_and_hit($testfnccount); - print(INFO_HANDLE "FNF:$f_found\n"); - print(INFO_HANDLE "FNH:$f_hit\n"); - - # Write branch related data - $br_found = 0; - $br_hit = 0; - foreach $line (sort({$a <=> $b} - keys(%{$testbrcount}))) { - my $brdata = $testbrcount->{$line}; - - foreach my $brentry (split(/:/, $brdata)) { - my ($block, $branch, $taken) = - split(/,/, $brentry); - - print(INFO_HANDLE "BRDA:$line,$block,". - "$branch,$taken\n"); - $br_found++; - $br_hit++ if ($taken ne '-' && - $taken > 0); - } - } - if ($br_found > 0) { - print(INFO_HANDLE "BRF:$br_found\n"); - print(INFO_HANDLE "BRH:$br_hit\n"); - } - - # Write line related data - foreach $line (sort({$a <=> $b} keys(%{$testcount}))) - { - print(INFO_HANDLE "DA:$line,". - $testcount->{$line}. - (defined($checkdata->{$line}) && - $checksum ? - ",".$checkdata->{$line} : "")."\n"); - $found++; - if ($testcount->{$line} > 0) - { - $hit++; - } - - } - print(INFO_HANDLE "LF:$found\n"); - print(INFO_HANDLE "LH:$hit\n"); - print(INFO_HANDLE "end_of_record\n"); - } - } - - return ($ln_total_found, $ln_total_hit, $fn_total_found, $fn_total_hit, - $br_total_found, $br_total_hit); + if ($AggregateTraces::function_mapping) { + return; + } elsif ($prune_testcases) { + return ($effective, \@merge); + } else { + # write the result + emit($total_trace); + return $total_trace; + } } - # -# transform_pattern(pattern) # -# Transform shell wildcard expression to equivalent Perl regular expression. -# Return transformed pattern. +# merge_traces() # -sub transform_pattern($) +sub merge_traces($$) { - my $pattern = $_[0]; - - # Escape special chars - - $pattern =~ s/\\/\\\\/g; - $pattern =~ s/\//\\\//g; - $pattern =~ s/\^/\\\^/g; - $pattern =~ s/\$/\\\$/g; - $pattern =~ s/\(/\\\(/g; - $pattern =~ s/\)/\\\)/g; - $pattern =~ s/\[/\\\[/g; - $pattern =~ s/\]/\\\]/g; - $pattern =~ s/\{/\\\{/g; - $pattern =~ s/\}/\\\}/g; - $pattern =~ s/\./\\\./g; - $pattern =~ s/\,/\\\,/g; - $pattern =~ s/\|/\\\|/g; - $pattern =~ s/\+/\\\+/g; - $pattern =~ s/\!/\\\!/g; - - # Transform ? => (.) and * => (.*) - - $pattern =~ s/\*/\(\.\*\)/g; - $pattern =~ s/\?/\(\.\)/g; - - return $pattern; -} + my ($list, $op) = @_; + info(($op == TraceInfo::INTERSECT ? 'Intersect' : 'Subtract') . + " tracefiles.\n"); + my @data = AggregateTraces::find_from_glob(@ARGV); + info('.. found: ' . + scalar(@data) . " 'base' trace file" . + (1 == scalar(@data) ? '' : 's') . ".\n"); + die("must specify at lease one 'base' tracefile") + unless @data; + my @merge = AggregateTraces::find_from_glob(@$list); + info(' ' . + scalar(@merge) . + ' file' . (1 == scalar(@merge) ? '' : 's') . ' to ' . + ($op == TraceInfo::INTERSECT ? 'intersect' : 'subtract') . ".\n"); -# -# extract() -# + my $srcReader = ReadCurrentSource->new(); + my ($base) = AggregateTraces::merge($srcReader, @data); + my ($merge) = AggregateTraces::merge($srcReader, @merge); -sub extract() -{ - my $data = read_info_file($extract); - my $filename; - my $keep; - my $pattern; - my @pattern_list; - my $extracted = 0; - my @result; - local *INFO_HANDLE; - - # Need perlreg expressions instead of shell pattern - @pattern_list = map({ transform_pattern($_); } @ARGV); - - # Filter out files which do not match any pattern - foreach $filename (sort(keys(%{$data}))) - { - $keep = 0; - - foreach $pattern (@pattern_list) - { - $keep ||= ($filename =~ (/^$pattern$/)); - } - - - if (!$keep) - { - delete($data->{$filename}); - } - else - { - info("Extracting $filename\n"), - $extracted++; - } - } - - # Write extracted data - if (!$data_stdout) - { - info("Extracted $extracted files\n"); - info("Writing data to $output_filename\n"); - open(INFO_HANDLE, ">", $output_filename) - or die("ERROR: cannot write to $output_filename!\n"); - @result = write_info_file(*INFO_HANDLE, $data); - close(*INFO_HANDLE); - } - else - { - @result = write_info_file(*STDOUT, $data); - } - - return @result; + my $interesting = $base->merge_tracefile($merge, $op); + # write the result + emit($base); + return $base; } +sub remove_file_patterns($) +{ + my $filename = shift; -# -# remove() -# + my $readSourceFile = ReadCurrentSource->new(); + my $now = Time::HiRes::gettimeofday(); + my $data = + TraceFile->load($filename, $readSourceFile, $lcovutil::verify_checksum); + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{parse} = $then - $now; -sub remove() -{ - my $data = read_info_file($remove); - my $filename; - my $match_found; - my $pattern; - my @pattern_list; - my $removed = 0; - my @result; - local *INFO_HANDLE; - - # Need perlreg expressions instead of shell pattern - @pattern_list = map({ transform_pattern($_); } @ARGV); - - # Filter out files that match the pattern - foreach $filename (sort(keys(%{$data}))) - { - $match_found = 0; - - foreach $pattern (@pattern_list) - { - $match_found ||= ($filename =~ (/^$pattern$/)); - } - - - if ($match_found) - { - delete($data->{$filename}); - info("Removing $filename\n"), - $removed++; - } - } - - # Write data - if (!$data_stdout) - { - info("Deleted $removed files\n"); - info("Writing data to $output_filename\n"); - open(INFO_HANDLE, ">", $output_filename) - or die("ERROR: cannot write to $output_filename!\n"); - @result = write_info_file(*INFO_HANDLE, $data); - close(*INFO_HANDLE); - } - else - { - @result = write_info_file(*STDOUT, $data); - } - - return @result; + # Write extracted data + my $removed = scalar(keys(%lcovutil::excluded_files)); + emit($data, "Removed $removed files\n"); + return $data; } - # get_prefix(max_width, max_percentage_too_long, path_list) # # Return a path prefix that satisfies the following requirements: @@ -2842,53 +1622,55 @@ sub remove() sub get_prefix($$@) { - my ($max_width, $max_long, @path_list) = @_; - my $path; - my $ENTRY_NUM = 0; - my $ENTRY_LONG = 1; - my %prefix; - - # Build prefix hash - foreach $path (@path_list) { - my ($v, $d, $f) = splitpath($path); - my @dirs = splitdir($d); - my $p_len = length($path); - my $i; - - # Remove trailing '/' - pop(@dirs) if ($dirs[scalar(@dirs) - 1] eq ''); - for ($i = 0; $i < scalar(@dirs); $i++) { - my $subpath = catpath($v, catdir(@dirs[0..$i]), ''); - my $entry = $prefix{$subpath}; - - $entry = [ 0, 0 ] if (!defined($entry)); - $entry->[$ENTRY_NUM]++; - if (($p_len - length($subpath) - 1) > $max_width) { - $entry->[$ENTRY_LONG]++; - } - $prefix{$subpath} = $entry; - } - } - # Find suitable prefix (sort descending by two keys: 1. number of - # entries covered by a prefix, 2. length of prefix) - foreach $path (sort {($prefix{$a}->[$ENTRY_NUM] == - $prefix{$b}->[$ENTRY_NUM]) ? - length($b) <=> length($a) : - $prefix{$b}->[$ENTRY_NUM] <=> - $prefix{$a}->[$ENTRY_NUM]} - keys(%prefix)) { - my ($num, $long) = @{$prefix{$path}}; - - # Check for additional requirement: number of filenames - # that would be too long may not exceed a certain percentage - if ($long <= $num * $max_long / 100) { - return $path; - } - } - - return ""; -} + my ($max_width, $max_long, @path_list) = @_; + my $path; + my $ENTRY_NUM = 0; + my $ENTRY_LONG = 1; + my %prefix; + + # Build prefix hash + foreach $path (@path_list) { + my ($v, $d, $f) = splitpath($path); + my @dirs = splitdir($d); + my $p_len = length($path); + + # Remove trailing '/' + pop(@dirs) + if (0 != scalar(@dirs) && $dirs[scalar(@dirs) - 1] eq ''); + for (my $i = 0; $i < scalar(@dirs); $i++) { + my $subpath = catpath($v, catdir(@dirs[0 .. $i]), ''); + my $entry = $prefix{$subpath}; + + $entry = [0, 0] if (!defined($entry)); + $entry->[$ENTRY_NUM]++; + if (($p_len - length($subpath) - 1) > $max_width) { + $entry->[$ENTRY_LONG]++; + } + $prefix{$subpath} = $entry; + } + } + # Find suitable prefix (sort descending by two keys: 1. number of + # entries covered by a prefix, 2. length of prefix) + foreach $path (sort { + ($prefix{$a}->[$ENTRY_NUM] == $prefix{$b}->[$ENTRY_NUM]) + ? + length($b) <=> length($a) : + $prefix{$b}->[$ENTRY_NUM] + <=> $prefix{$a}->[$ENTRY_NUM] + } + keys(%prefix) + ) { + my ($num, $long) = @{$prefix{$path}}; + + # Check for additional requirement: number of filenames + # that would be too long may not exceed a certain percentage + if ($long <= $num * $max_long / 100) { + return $path; + } + } + return ""; +} # # shorten_filename(filename, width) @@ -2898,44 +1680,43 @@ sub get_prefix($$@) sub shorten_filename($$) { - my ($filename, $width) = @_; - my $l = length($filename); - my $s; - my $e; + my ($filename, $width) = @_; + my $l = length($filename); + my $s; + my $e; - return $filename if ($l <= $width); - $e = int(($width - 3) / 2); - $s = $width - 3 - $e; + return $filename if ($l <= $width); + $e = int(($width - 3) / 2); + $s = $width - 3 - $e; - return substr($filename, 0, $s).'...'.substr($filename, $l - $e); + return substr($filename, 0, $s) . '...' . substr($filename, $l - $e); } - sub shorten_number($$) { - my ($number, $width) = @_; - my $result = sprintf("%*d", $width, $number); - - return $result if (length($result) <= $width); - $number = $number / 1000; - return $result if (length($result) <= $width); - $result = sprintf("%*dk", $width - 1, $number); - return $result if (length($result) <= $width); - $number = $number / 1000; - $result = sprintf("%*dM", $width - 1, $number); - return $result if (length($result) <= $width); - return '#'; + my ($number, $width) = @_; + my $result = sprintf("%*d", $width, $number); + + return $result if (length($result) <= $width); + $number = $number / 1000; + return $result if (length($result) <= $width); + $result = sprintf("%*dk", $width - 1, $number); + return $result if (length($result) <= $width); + $number = $number / 1000; + $result = sprintf("%*dM", $width - 1, $number); + return $result if (length($result) <= $width); + return '#'; } sub shorten_rate($$$) { - my ($hit, $found, $width) = @_; - my $result = rate($hit, $found, "%", 1, $width); + my ($hit, $found, $width) = @_; + my $result = rate($hit, $found, "%", 1, $width); - return $result if (length($result) <= $width); - $result = rate($hit, $found, "%", 0, $width); - return $result if (length($result) <= $width); - return "#"; + return $result if (length($result) <= $width); + $result = rate($hit, $found, "%", 0, $width); + return $result if (length($result) <= $width); + return "#"; } # @@ -2944,257 +1725,193 @@ sub shorten_rate($$$) sub list() { - my $data = read_info_file($list); - my $filename; - my $found; - my $hit; - my $entry; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - my $total_found = 0; - my $total_hit = 0; - my $fn_total_found = 0; - my $fn_total_hit = 0; - my $br_total_found = 0; - my $br_total_hit = 0; - my $prefix; - my $strlen = length("Filename"); - my $format; - my $heading1; - my $heading2; - my @footer; - my $barlen; - my $rate; - my $fnrate; - my $brrate; - my $lastpath; - my $F_LN_NUM = 0; - my $F_LN_RATE = 1; - my $F_FN_NUM = 2; - my $F_FN_RATE = 3; - my $F_BR_NUM = 4; - my $F_BR_RATE = 5; - my @fwidth_narrow = (5, 5, 3, 5, 4, 5); - my @fwidth_wide = (6, 5, 5, 5, 6, 5); - my @fwidth = @fwidth_wide; - my $w; - my $max_width = $opt_list_width; - my $max_long = $opt_list_truncate_max; - my $fwidth_narrow_length; - my $fwidth_wide_length; - my $got_prefix = 0; - my $root_prefix = 0; - - # Calculate total width of narrow fields - $fwidth_narrow_length = 0; - foreach $w (@fwidth_narrow) { - $fwidth_narrow_length += $w + 1; - } - # Calculate total width of wide fields - $fwidth_wide_length = 0; - foreach $w (@fwidth_wide) { - $fwidth_wide_length += $w + 1; - } - # Get common file path prefix - $prefix = get_prefix($max_width - $fwidth_narrow_length, $max_long, - keys(%{$data})); - $root_prefix = 1 if ($prefix eq rootdir()); - $got_prefix = 1 if (length($prefix) > 0); - $prefix =~ s/\/$//; - # Get longest filename length - foreach $filename (keys(%{$data})) { - if (!$opt_list_full_path) { - if (!$got_prefix || !$root_prefix && - !($filename =~ s/^\Q$prefix\/\E//)) { - my ($v, $d, $f) = splitpath($filename); - - $filename = $f; - } - } - # Determine maximum length of entries - if (length($filename) > $strlen) { - $strlen = length($filename) - } - } - if (!$opt_list_full_path) { - my $blanks; - - $w = $fwidth_wide_length; - # Check if all columns fit into max_width characters - if ($strlen + $fwidth_wide_length > $max_width) { - # Use narrow fields - @fwidth = @fwidth_narrow; - $w = $fwidth_narrow_length; - if (($strlen + $fwidth_narrow_length) > $max_width) { - # Truncate filenames at max width - $strlen = $max_width - $fwidth_narrow_length; - } - } - # Add some blanks between filename and fields if possible - $blanks = int($strlen * 0.5); - $blanks = 4 if ($blanks < 4); - $blanks = 8 if ($blanks > 8); - if (($strlen + $w + $blanks) < $max_width) { - $strlen += $blanks; - } else { - $strlen = $max_width - $w; - } - } - # Filename - $w = $strlen; - $format = "%-${w}s|"; - $heading1 = sprintf("%*s|", $w, ""); - $heading2 = sprintf("%-*s|", $w, "Filename"); - $barlen = $w + 1; - # Line coverage rate - $w = $fwidth[$F_LN_RATE]; - $format .= "%${w}s "; - $heading1 .= sprintf("%-*s |", $w + $fwidth[$F_LN_NUM], - "Lines"); - $heading2 .= sprintf("%-*s ", $w, "Rate"); - $barlen += $w + 1; - # Number of lines - $w = $fwidth[$F_LN_NUM]; - $format .= "%${w}s|"; - $heading2 .= sprintf("%*s|", $w, "Num"); - $barlen += $w + 1; - # Function coverage rate - $w = $fwidth[$F_FN_RATE]; - $format .= "%${w}s "; - $heading1 .= sprintf("%-*s|", $w + $fwidth[$F_FN_NUM] + 1, - "Functions"); - $heading2 .= sprintf("%-*s ", $w, "Rate"); - $barlen += $w + 1; - # Number of functions - $w = $fwidth[$F_FN_NUM]; - $format .= "%${w}s|"; - $heading2 .= sprintf("%*s|", $w, "Num"); - $barlen += $w + 1; - # Branch coverage rate - $w = $fwidth[$F_BR_RATE]; - $format .= "%${w}s "; - $heading1 .= sprintf("%-*s", $w + $fwidth[$F_BR_NUM] + 1, - "Branches"); - $heading2 .= sprintf("%-*s ", $w, "Rate"); - $barlen += $w + 1; - # Number of branches - $w = $fwidth[$F_BR_NUM]; - $format .= "%${w}s"; - $heading2 .= sprintf("%*s", $w, "Num"); - $barlen += $w; - # Line end - $format .= "\n"; - $heading1 .= "\n"; - $heading2 .= "\n"; - - # Print heading - print($heading1); - print($heading2); - print(("="x$barlen)."\n"); - - # Print per file information - foreach $filename (sort(keys(%{$data}))) - { - my @file_data; - my $print_filename = $filename; - - $entry = $data->{$filename}; - if (!$opt_list_full_path) { - my $p; - - $print_filename = $filename; - if (!$got_prefix || !$root_prefix && - !($print_filename =~ s/^\Q$prefix\/\E//)) { - my ($v, $d, $f) = splitpath($filename); - - $p = catpath($v, $d, ""); - $p =~ s/\/$//; - $print_filename = $f; - } else { - $p = $prefix; - } - - if (!defined($lastpath) || $lastpath ne $p) { - print("\n") if (defined($lastpath)); - $lastpath = $p; - print("[$lastpath/]\n") if (!$root_prefix); - } - $print_filename = shorten_filename($print_filename, - $strlen); - } - - (undef, undef, undef, undef, undef, undef, undef, undef, - $found, $hit, $fn_found, $fn_hit, $br_found, $br_hit) = - get_info_entry($entry); - - # Assume zero count if there is no function data for this file - if (!defined($fn_found) || !defined($fn_hit)) { - $fn_found = 0; - $fn_hit = 0; - } - # Assume zero count if there is no branch data for this file - if (!defined($br_found) || !defined($br_hit)) { - $br_found = 0; - $br_hit = 0; - } - - # Add line coverage totals - $total_found += $found; - $total_hit += $hit; - # Add function coverage totals - $fn_total_found += $fn_found; - $fn_total_hit += $fn_hit; - # Add branch coverage totals - $br_total_found += $br_found; - $br_total_hit += $br_hit; - - # Determine line coverage rate for this file - $rate = shorten_rate($hit, $found, $fwidth[$F_LN_RATE]); - # Determine function coverage rate for this file - $fnrate = shorten_rate($fn_hit, $fn_found, $fwidth[$F_FN_RATE]); - # Determine branch coverage rate for this file - $brrate = shorten_rate($br_hit, $br_found, $fwidth[$F_BR_RATE]); - - # Assemble line parameters - push(@file_data, $print_filename); - push(@file_data, $rate); - push(@file_data, shorten_number($found, $fwidth[$F_LN_NUM])); - push(@file_data, $fnrate); - push(@file_data, shorten_number($fn_found, $fwidth[$F_FN_NUM])); - push(@file_data, $brrate); - push(@file_data, shorten_number($br_found, $fwidth[$F_BR_NUM])); - - # Print assembled line - printf($format, @file_data); - } - - # Determine total line coverage rate - $rate = shorten_rate($total_hit, $total_found, $fwidth[$F_LN_RATE]); - # Determine total function coverage rate - $fnrate = shorten_rate($fn_total_hit, $fn_total_found, - $fwidth[$F_FN_RATE]); - # Determine total branch coverage rate - $brrate = shorten_rate($br_total_hit, $br_total_found, - $fwidth[$F_BR_RATE]); - - # Print separator - print(("="x$barlen)."\n"); - - # Assemble line parameters - push(@footer, sprintf("%*s", $strlen, "Total:")); - push(@footer, $rate); - push(@footer, shorten_number($total_found, $fwidth[$F_LN_NUM])); - push(@footer, $fnrate); - push(@footer, shorten_number($fn_total_found, $fwidth[$F_FN_NUM])); - push(@footer, $brrate); - push(@footer, shorten_number($br_total_found, $fwidth[$F_BR_NUM])); - - # Print assembled line - printf($format, @footer); -} + my $readSourceFile = ReadCurrentSource->new(); + my $data = + TraceFile->load($list, $readSourceFile, $lcovutil::verify_checksum); + my $strlen = length("Filename"); + my $lastpath; + my $F_LN_NUM = 0; + my $F_LN_RATE = 1; + my $F_FN_NUM = 2; + my $F_FN_RATE = 3; + my $F_BR_NUM = 4; + my $F_BR_RATE = 5; + my $F_MCDC_NUM = 6; + my $F_MCDC_RATE = 7; + my @fwidth_narrow = (5, 5, 3, 5, 4, 5, 4, 5); + my @fwidth_wide = (6, 5, 5, 5, 6, 5, 6, 5); + my @fwidth = @fwidth_wide; + my $max_width = $opt_list_width; + my $max_long = $opt_list_truncate_max; + + # Calculate total width of narrow fields + my $fwidth_narrow_length = 0; + foreach my $w (@fwidth_narrow) { + $fwidth_narrow_length += $w + 1; + } + # Calculate total width of wide fields + my $fwidth_wide_length = 0; + foreach my $w (@fwidth_wide) { + $fwidth_wide_length += $w + 1; + } + # Get common file path prefix + my $prefix = get_prefix($max_width - $fwidth_narrow_length, + $max_long, $data->files()); + my $root_prefix = ($prefix eq rootdir()); + my $got_prefix = (length($prefix) > 0); + $prefix =~ s/$lcovutil::dirseparator$//; + # Get longest filename length + foreach my $filename ($data->files()) { + if (!$opt_list_full_path) { + if (!$got_prefix || + !$root_prefix && !(($lcovutil::case_insensitive && + $filename =~ s/^\Q$prefix\/\E//i) || + (!$lcovutil::case_insensitive && + $filename =~ s/^\Q$prefix\/\E//)) + ) { + my ($v, $d, $f) = splitpath($filename); + + $filename = $f; + } + } + # Determine maximum length of entries + if (length($filename) > $strlen) { + $strlen = length($filename); + } + } + if (!$opt_list_full_path) { + + my $w = $fwidth_wide_length; + # Check if all columns fit into max_width characters + if ($strlen + $fwidth_wide_length > $max_width) { + # Use narrow fields + @fwidth = @fwidth_narrow; + $w = $fwidth_narrow_length; + if (($strlen + $fwidth_narrow_length) > $max_width) { + # Truncate filenames at max width + $strlen = $max_width - $fwidth_narrow_length; + } + } + # Add some blanks between filename and fields if possible + my $blanks = int($strlen * 0.5); + $blanks = 4 if ($blanks < 4); + $blanks = 8 if ($blanks > 8); + if (($strlen + $w + $blanks) < $max_width) { + $strlen += $blanks; + } else { + $strlen = $max_width - $w; + } + } + # Filename + my $w = $strlen; + my $format = "%-${w}s|"; + my $heading1 = sprintf("%*s|", $w, ""); + my $heading2 = sprintf("%-*s|", $w, "Filename"); + my $barlen = $w + 1; + + # name, total_found, total_hit, total_column_width, rate_column_width + my @types = (['Lines', \&TraceInfo::sum, 0, 0, $F_LN_NUM, $F_LN_RATE], + [$lcovutil::func_coverage ? 'Functions' : undef, + \&TraceInfo::func, 0, 0, $F_FN_NUM, $F_FN_RATE + ], + [$lcovutil::br_coverage ? 'Branches' : undef, + \&TraceInfo::sumbr, 0, 0, $F_BR_NUM, $F_BR_RATE + ], + [$lcovutil::mcdc_coverage ? 'MC/DC' : undef, + \&TraceInfo::mcdc, 0, 0, $F_MCDC_NUM, $F_MCDC_RATE + ],); + + my $sep = ''; + foreach my $d (@types) { + my ($type, $cb, $found, $hit, $n, $r) = @$d; + next unless $type; + $w = $fwidth[$r]; + $format .= "$sep%${w}s "; + $heading1 .= sprintf("$sep%-*s ", $w + $fwidth[$n], $type); + $heading2 .= sprintf("$sep%-*s ", $w, "Rate"); + $barlen += $w + 1; + # Number of coverpoints + $w = $fwidth[$n]; + $format .= "%${w}s"; + $heading2 .= sprintf("%*s", $w, "Num"); + $barlen += $w + 1; + $sep = '|'; + } + --$barlen; # no separator for last column + # Line end + $format .= "\n"; + $heading1 .= "\n"; + $heading2 .= "\n"; + + # Print heading + print($heading1); + print($heading2); + print(("=" x $barlen) . "\n"); + + # Print per file information + foreach my $filename (sort($data->files())) { + my $entry = $data->data($filename); + my $print_filename = $entry->filename(); + if (!$opt_list_full_path) { + my $p; + + $print_filename = $filename; + if (!$got_prefix || + !$root_prefix && !(($lcovutil::case_insensitive && + $print_filename =~ s/^\Q$prefix\/\E//i) || + (!$lcovutil::case_insensitive && + $print_filename =~ s/^\Q$prefix\/\E//)) + ) { + my ($v, $d, $f) = splitpath($filename); + + $p = catpath($v, $d, ""); + $p =~ s/$lcovutil::dirseparator$//; + $print_filename = $f; + } else { + $p = $prefix; + } + + if (!defined($lastpath) || $lastpath ne $p) { + print("\n") if (defined($lastpath)); + $lastpath = $p; + print("[$lastpath/]\n") if (!$root_prefix); + } + $print_filename = shorten_filename($print_filename, $strlen); + } + my @file_data; + push(@file_data, $print_filename); + foreach my $d (@types) { + my ($type, $cb, $total_found, $total_hit, $n, $r) = @$d; + next unless defined($type); + my $data = &{$cb}($entry); + + my ($found, $hit) = $data->get_found_and_hit(); + # add to totals + $d->[2] += $found, $d->[3] += $hit; + + push(@file_data, shorten_rate($hit, $found, $fwidth[$r])); + push(@file_data, shorten_number($found, $fwidth[$n])); + } + # Print assembled line + printf($format, @file_data); + } + # Print separator + print(("=" x $barlen) . "\n"); + + # Assemble line parameters + my @footer; + push(@footer, sprintf("%*s", $strlen, "Total:")); + foreach my $d (@types) { + my ($type, $cb, $total_found, $total_hit, $n, $r) = @$d; + next unless defined($type); + + push(@footer, shorten_rate($total_hit, $total_found, $fwidth[$r])); + push(@footer, shorten_number($total_found, $fwidth[$n])); + } + # Print assembled line + printf($format, @footer); +} # # get_common_filename(filename1, filename2) @@ -3209,757 +1926,33 @@ sub list() sub get_common_filename($$) { - my @list1 = split("/", $_[0]); - my @list2 = split("/", $_[1]); - my @result; - - # Work in reverse order, i.e. beginning with the filename itself - while (@list1 && @list2 && ($list1[$#list1] eq $list2[$#list2])) - { - unshift(@result, pop(@list1)); - pop(@list2); - } - - # Did we find any similarities? - if (scalar(@result) > 0) - { - return (join("/", @result), join("/", @list1), - join("/", @list2)); - } - else - { - return undef; - } -} - - -# -# strip_directories($path, $depth) -# -# Remove DEPTH leading directory levels from PATH. -# - -sub strip_directories($$) -{ - my $filename = $_[0]; - my $depth = $_[1]; - my $i; - - if (!defined($depth) || ($depth < 1)) - { - return $filename; - } - for ($i = 0; $i < $depth; $i++) - { - $filename =~ s/^[^\/]*\/+(.*)$/$1/; - } - return $filename; -} - - -# -# read_diff(filename) -# -# Read diff output from FILENAME to memory. The diff file has to follow the -# format generated by 'diff -u'. Returns a list of hash references: -# -# (mapping, path mapping) -# -# mapping: filename -> reference to line hash -# line hash: line number in new file -> corresponding line number in old file -# -# path mapping: filename -> old filename -# -# Die in case of error. -# - -sub read_diff($) -{ - my $diff_file = $_[0]; # Name of diff file - my %diff; # Resulting mapping filename -> line hash - my %paths; # Resulting mapping old path -> new path - my $mapping; # Reference to current line hash - my $line; # Contents of current line - my $num_old; # Current line number in old file - my $num_new; # Current line number in new file - my $file_old; # Name of old file in diff section - my $file_new; # Name of new file in diff section - my $filename; # Name of common filename of diff section - my $in_block = 0; # Non-zero while we are inside a diff block - local *HANDLE; # File handle for reading the diff file - - info("Reading diff $diff_file\n"); - - # Check if file exists and is readable - stat($diff_file); - if (!(-r _)) - { - die("ERROR: cannot read file $diff_file!\n"); - } - - # Check if this is really a plain file - if (!(-f _)) - { - die("ERROR: not a plain file: $diff_file!\n"); - } - - # Check for .gz extension - if ($diff_file =~ /\.gz$/) - { - # Check for availability of GZIP tool - system_no_output(1, "gunzip", "-h") - and die("ERROR: gunzip command not available!\n"); - - # Check integrity of compressed file - system_no_output(1, "gunzip", "-t", $diff_file) - and die("ERROR: integrity check failed for ". - "compressed file $diff_file!\n"); - - # Open compressed file - open(HANDLE, "-|", "gunzip -c '$diff_file'") - or die("ERROR: cannot start gunzip to decompress ". - "file $_[0]!\n"); - } - else - { - # Open decompressed file - open(HANDLE, "<", $diff_file) - or die("ERROR: cannot read file $_[0]!\n"); - } - - # Parse diff file line by line - while () - { - chomp($_); - $line = $_; - - foreach ($line) - { - # Filename of old file: - # --- - /^--- (\S+)/ && do - { - $file_old = strip_directories($1, $strip); - last; - }; - # Filename of new file: - # +++ - /^\+\+\+ (\S+)/ && do - { - # Add last file to resulting hash - if ($filename) - { - my %new_hash; - $diff{$filename} = $mapping; - $mapping = \%new_hash; - } - $file_new = strip_directories($1, $strip); - $filename = $file_old; - $paths{$filename} = $file_new; - $num_old = 1; - $num_new = 1; - last; - }; - # Start of diff block: - # @@ -old_start,old_num, +new_start,new_num @@ - /^\@\@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+\@\@$/ && do - { - $in_block = 1; - while ($num_old < $1) - { - $mapping->{$num_new} = $num_old; - $num_old++; - $num_new++; - } - last; - }; - # Unchanged line - # - /^ / && do - { - if ($in_block == 0) - { - last; - } - $mapping->{$num_new} = $num_old; - $num_old++; - $num_new++; - last; - }; - # Line as seen in old file - # - /^-/ && do - { - if ($in_block == 0) - { - last; - } - $num_old++; - last; - }; - # Line as seen in new file - # - /^\+/ && do - { - if ($in_block == 0) - { - last; - } - $num_new++; - last; - }; - # Empty line - /^$/ && do - { - if ($in_block == 0) - { - last; - } - $mapping->{$num_new} = $num_old; - $num_old++; - $num_new++; - last; - }; - } - } - - close(HANDLE); - - # Add final diff file section to resulting hash - if ($filename) - { - $diff{$filename} = $mapping; - } - - if (!%diff) - { - die("ERROR: no valid diff data found in $diff_file!\n". - "Make sure to use 'diff -u' when generating the diff ". - "file.\n"); - } - return (\%diff, \%paths); -} - - -# -# apply_diff($count_data, $line_hash) -# -# Transform count data using a mapping of lines: -# -# $count_data: reference to hash: line number -> data -# $line_hash: reference to hash: line number new -> line number old -# -# Return a reference to transformed count data. -# - -sub apply_diff($$) -{ - my $count_data = $_[0]; # Reference to data hash: line -> hash - my $line_hash = $_[1]; # Reference to line hash: new line -> old line - my %result; # Resulting hash - my $last_new = 0; # Last new line number found in line hash - my $last_old = 0; # Last old line number found in line hash - - # Iterate all new line numbers found in the diff - foreach (sort({$a <=> $b} keys(%{$line_hash}))) - { - $last_new = $_; - $last_old = $line_hash->{$last_new}; - - # Is there data associated with the corresponding old line? - if (defined($count_data->{$line_hash->{$_}})) - { - # Copy data to new hash with a new line number - $result{$_} = $count_data->{$line_hash->{$_}}; - } - } - # Transform all other lines which come after the last diff entry - foreach (sort({$a <=> $b} keys(%{$count_data}))) - { - if ($_ <= $last_old) - { - # Skip lines which were covered by line hash - next; - } - # Copy data to new hash with an offset - $result{$_ + ($last_new - $last_old)} = $count_data->{$_}; - } - - return \%result; -} - - -# -# apply_diff_to_brcount(brcount, linedata) -# -# Adjust line numbers of branch coverage data according to linedata. -# - -sub apply_diff_to_brcount($$) -{ - my ($brcount, $linedata) = @_; - my $db; - - # Convert brcount to db format - $db = brcount_to_db($brcount); - # Apply diff to db format - $db = apply_diff($db, $linedata); - # Convert db format back to brcount format - ($brcount) = db_to_brcount($db); - - return $brcount; -} - - -# -# get_hash_max(hash_ref) -# -# Return the highest integer key from hash. -# - -sub get_hash_max($) -{ - my ($hash) = @_; - my $max; - - foreach (keys(%{$hash})) { - if (!defined($max)) { - $max = $_; - } elsif ($hash->{$_} > $max) { - $max = $_; - } - } - return $max; -} - -sub get_hash_reverse($) -{ - my ($hash) = @_; - my %result; - - foreach (keys(%{$hash})) { - $result{$hash->{$_}} = $_; - } - - return \%result; -} - -# -# apply_diff_to_funcdata(funcdata, line_hash) -# - -sub apply_diff_to_funcdata($$) -{ - my ($funcdata, $linedata) = @_; - my $last_new = get_hash_max($linedata); - my $last_old = $linedata->{$last_new}; - my $func; - my %result; - my $line_diff = get_hash_reverse($linedata); - - foreach $func (keys(%{$funcdata})) { - my $line = $funcdata->{$func}; - - if (defined($line_diff->{$line})) { - $result{$func} = $line_diff->{$line}; - } elsif ($line > $last_old) { - $result{$func} = $line + $last_new - $last_old; - } - } - - return \%result; -} - - -# -# get_line_hash($filename, $diff_data, $path_data) -# -# Find line hash in DIFF_DATA which matches FILENAME. On success, return list -# line hash. or undef in case of no match. Die if more than one line hashes in -# DIFF_DATA match. -# - -sub get_line_hash($$$) -{ - my $filename = $_[0]; - my $diff_data = $_[1]; - my $path_data = $_[2]; - my $conversion; - my $old_path; - my $new_path; - my $diff_name; - my $common; - my $old_depth; - my $new_depth; - - # Remove trailing slash from diff path - $diff_path =~ s/\/$//; - foreach (keys(%{$diff_data})) - { - my $sep = ""; - - $sep = '/' if (!/^\//); - - # Try to match diff filename with filename - if ($filename =~ /^\Q$diff_path$sep$_\E$/) - { - if ($diff_name) - { - # Two files match, choose the more specific one - # (the one with more path components) - $old_depth = ($diff_name =~ tr/\///); - $new_depth = (tr/\///); - if ($old_depth == $new_depth) - { - die("ERROR: diff file contains ". - "ambiguous entries for ". - "$filename\n"); - } - elsif ($new_depth > $old_depth) - { - $diff_name = $_; - } - } - else - { - $diff_name = $_; - } - }; - } - if ($diff_name) - { - # Get converted path - if ($filename =~ /^(.*)$diff_name$/) - { - ($common, $old_path, $new_path) = - get_common_filename($filename, - $1.$path_data->{$diff_name}); - } - return ($diff_data->{$diff_name}, $old_path, $new_path); - } - else - { - return undef; - } -} - - -# -# convert_paths(trace_data, path_conversion_data) -# -# Rename all paths in TRACE_DATA which show up in PATH_CONVERSION_DATA. -# - -sub convert_paths($$) -{ - my $trace_data = $_[0]; - my $path_conversion_data = $_[1]; - my $filename; - my $new_path; - - if (scalar(keys(%{$path_conversion_data})) == 0) - { - info("No path conversion data available.\n"); - return; - } - - # Expand path conversion list - foreach $filename (keys(%{$path_conversion_data})) - { - $new_path = $path_conversion_data->{$filename}; - while (($filename =~ s/^(.*)\/[^\/]+$/$1/) && - ($new_path =~ s/^(.*)\/[^\/]+$/$1/) && - ($filename ne $new_path)) - { - $path_conversion_data->{$filename} = $new_path; - } - } - - # Adjust paths - FILENAME: foreach $filename (keys(%{$trace_data})) - { - # Find a path in our conversion table that matches, starting - # with the longest path - foreach (sort({length($b) <=> length($a)} - keys(%{$path_conversion_data}))) - { - # Is this path a prefix of our filename? - if (!($filename =~ /^$_(.*)$/)) - { - next; - } - $new_path = $path_conversion_data->{$_}.$1; - - # Make sure not to overwrite an existing entry under - # that path name - if ($trace_data->{$new_path}) - { - # Need to combine entries - $trace_data->{$new_path} = - combine_info_entries( - $trace_data->{$filename}, - $trace_data->{$new_path}, - $filename); - } - else - { - # Simply rename entry - $trace_data->{$new_path} = - $trace_data->{$filename}; - } - delete($trace_data->{$filename}); - next FILENAME; - } - info("No conversion available for filename $filename\n"); - } -} - -# -# sub adjust_fncdata(funcdata, testfncdata, sumfnccount) -# -# Remove function call count data from testfncdata and sumfnccount which -# is no longer present in funcdata. -# - -sub adjust_fncdata($$$) -{ - my ($funcdata, $testfncdata, $sumfnccount) = @_; - my $testname; - my $func; - my $f_found; - my $f_hit; - - # Remove count data in testfncdata for functions which are no longer - # in funcdata - foreach $testname (keys(%{$testfncdata})) { - my $fnccount = $testfncdata->{$testname}; - - foreach $func (keys(%{$fnccount})) { - if (!defined($funcdata->{$func})) { - delete($fnccount->{$func}); - } - } - } - # Remove count data in sumfnccount for functions which are no longer - # in funcdata - foreach $func (keys(%{$sumfnccount})) { - if (!defined($funcdata->{$func})) { - delete($sumfnccount->{$func}); - } - } -} - -# -# get_func_found_and_hit(sumfnccount) -# -# Return (f_found, f_hit) for sumfnccount -# - -sub get_func_found_and_hit($) -{ - my ($sumfnccount) = @_; - my $function; - my $f_found; - my $f_hit; - - $f_found = scalar(keys(%{$sumfnccount})); - $f_hit = 0; - foreach $function (keys(%{$sumfnccount})) { - if ($sumfnccount->{$function} > 0) { - $f_hit++; - } - } - return ($f_found, $f_hit); -} - -# -# diff() -# - -sub diff() -{ - my $trace_data = read_info_file($diff); - my $diff_data; - my $path_data; - my $old_path; - my $new_path; - my %path_conversion_data; - my $filename; - my $line_hash; - my $new_name; - my $entry; - my $testdata; - my $testname; - my $sumcount; - my $funcdata; - my $checkdata; - my $testfncdata; - my $sumfnccount; - my $testbrdata; - my $sumbrcount; - my $found; - my $hit; - my $f_found; - my $f_hit; - my $br_found; - my $br_hit; - my $converted = 0; - my $unchanged = 0; - my @result; - local *INFO_HANDLE; - - ($diff_data, $path_data) = read_diff($ARGV[0]); - - foreach $filename (sort(keys(%{$trace_data}))) - { - # Find a diff section corresponding to this file - ($line_hash, $old_path, $new_path) = - get_line_hash($filename, $diff_data, $path_data); - if (!$line_hash) - { - # There's no diff section for this file - $unchanged++; - next; - } - $converted++; - if ($old_path && $new_path && ($old_path ne $new_path)) - { - $path_conversion_data{$old_path} = $new_path; - } - # Check for deleted files - if (scalar(keys(%{$line_hash})) == 0) - { - info("Removing $filename\n"); - delete($trace_data->{$filename}); - next; - } - info("Converting $filename\n"); - $entry = $trace_data->{$filename}; - ($testdata, $sumcount, $funcdata, $checkdata, $testfncdata, - $sumfnccount, $testbrdata, $sumbrcount) = - get_info_entry($entry); - # Convert test data - foreach $testname (keys(%{$testdata})) - { - # Adjust line numbers of line coverage data - $testdata->{$testname} = - apply_diff($testdata->{$testname}, $line_hash); - # Adjust line numbers of branch coverage data - $testbrdata->{$testname} = - apply_diff_to_brcount($testbrdata->{$testname}, - $line_hash); - # Remove empty sets of test data - if (scalar(keys(%{$testdata->{$testname}})) == 0) - { - delete($testdata->{$testname}); - delete($testfncdata->{$testname}); - delete($testbrdata->{$testname}); - } - } - # Rename test data to indicate conversion - foreach $testname (keys(%{$testdata})) - { - # Skip testnames which already contain an extension - if ($testname =~ /,[^,]+$/) - { - next; - } - # Check for name conflict - if (defined($testdata->{$testname.",diff"})) - { - # Add counts - ($testdata->{$testname}) = add_counts( - $testdata->{$testname}, - $testdata->{$testname.",diff"}); - delete($testdata->{$testname.",diff"}); - # Add function call counts - ($testfncdata->{$testname}) = add_fnccount( - $testfncdata->{$testname}, - $testfncdata->{$testname.",diff"}); - delete($testfncdata->{$testname.",diff"}); - # Add branch counts - combine_brcount( - $testbrdata->{$testname}, - $testbrdata->{$testname.",diff"}, - $BR_ADD, 1); - delete($testbrdata->{$testname.",diff"}); - } - # Move test data to new testname - $testdata->{$testname.",diff"} = $testdata->{$testname}; - delete($testdata->{$testname}); - # Move function call count data to new testname - $testfncdata->{$testname.",diff"} = - $testfncdata->{$testname}; - delete($testfncdata->{$testname}); - # Move branch count data to new testname - $testbrdata->{$testname.",diff"} = - $testbrdata->{$testname}; - delete($testbrdata->{$testname}); - } - # Convert summary of test data - $sumcount = apply_diff($sumcount, $line_hash); - # Convert function data - $funcdata = apply_diff_to_funcdata($funcdata, $line_hash); - # Convert branch coverage data - $sumbrcount = apply_diff_to_brcount($sumbrcount, $line_hash); - # Update found/hit numbers - # Convert checksum data - $checkdata = apply_diff($checkdata, $line_hash); - # Convert function call count data - adjust_fncdata($funcdata, $testfncdata, $sumfnccount); - ($f_found, $f_hit) = get_func_found_and_hit($sumfnccount); - ($br_found, $br_hit) = get_br_found_and_hit($sumbrcount); - # Update found/hit numbers - $found = 0; - $hit = 0; - foreach (keys(%{$sumcount})) - { - $found++; - if ($sumcount->{$_} > 0) - { - $hit++; - } - } - if ($found > 0) - { - # Store converted entry - set_info_entry($entry, $testdata, $sumcount, $funcdata, - $checkdata, $testfncdata, $sumfnccount, - $testbrdata, $sumbrcount, $found, $hit, - $f_found, $f_hit, $br_found, $br_hit); - } - else - { - # Remove empty data set - delete($trace_data->{$filename}); - } - } - - # Convert filenames as well if requested - if ($convert_filenames) - { - convert_paths($trace_data, \%path_conversion_data); - } - - info("$converted entr".($converted != 1 ? "ies" : "y")." converted, ". - "$unchanged entr".($unchanged != 1 ? "ies" : "y")." left ". - "unchanged.\n"); - - # Write data - if (!$data_stdout) - { - info("Writing data to $output_filename\n"); - open(INFO_HANDLE, ">", $output_filename) - or die("ERROR: cannot write to $output_filename!\n"); - @result = write_info_file(*INFO_HANDLE, $trace_data); - close(*INFO_HANDLE); - } - else - { - @result = write_info_file(*STDOUT, $trace_data); - } - - return @result; + my ($vol1, $dir1, $file1) = File::Spec->splitpath($_[0]); + my ($vol2, $dir2, $file2) = File::Spec->splitpath($_[1]); + my @list1 = ($vol1, File::Spec->splitdir($dir1), $file1); + my @list2 = ($vol2, File::Spec->splitdir($dir2), $file2); + my @result; + + # Work in reverse order, i.e. beginning with the filename itself + while ( + @list1 && + @list2 && + ( (!$lcovutil::case_insensitive && $list1[$#list1] eq $list2[$#list2]) + || + ($lcovutil::case_insensitive && + lc($list1[$#list1]) eq lc($list2[$#list2]))) + ) { + unshift(@result, pop(@list1)); + pop(@list2); + } + + # Did we find any similarities? + if (scalar(@result) > 0) { + return (File::Spec->catfile(@result), + File::Spec->catfile(@list1), + File::Spec->catfile(@list2)); + } else { + return undef; + } } # @@ -3968,362 +1961,91 @@ sub diff() sub summary() { - my $filename; - my $current; - my $total; - my $ln_total_found; - my $ln_total_hit; - my $fn_total_found; - my $fn_total_hit; - my $br_total_found; - my $br_total_hit; - - # Read and combine trace files - foreach $filename (@opt_summary) { - $current = read_info_file($filename); - if (!defined($total)) { - $total = $current; - } else { - $total = combine_info_files($total, $current); - } - } - # Calculate coverage data - foreach $filename (keys(%{$total})) - { - my $entry = $total->{$filename}; - my $ln_found; - my $ln_hit; - my $fn_found; - my $fn_hit; - my $br_found; - my $br_hit; - - (undef, undef, undef, undef, undef, undef, undef, undef, - $ln_found, $ln_hit, $fn_found, $fn_hit, $br_found, - $br_hit) = get_info_entry($entry); - - # Add to totals - $ln_total_found += $ln_found; - $ln_total_hit += $ln_hit; - $fn_total_found += $fn_found; - $fn_total_hit += $fn_hit; - $br_total_found += $br_found; - $br_total_hit += $br_hit; - } - - - return ($ln_total_found, $ln_total_hit, $fn_total_found, $fn_total_hit, - $br_total_found, $br_total_hit); -} + my @merge = AggregateTraces::find_from_glob(@opt_summary); + info(1, "Summarize " . scalar(@merge) . " files...\n"); -# -# system_no_output(mode, parameters) -# -# Call an external program using PARAMETERS while suppressing depending on -# the value of MODE: -# -# MODE & 1: suppress STDOUT -# MODE & 2: suppress STDERR -# -# Return 0 on success, non-zero otherwise. -# - -sub system_no_output($@) -{ - my $mode = shift; - my $result; - local *OLD_STDERR; - local *OLD_STDOUT; - - # Save old stdout and stderr handles - ($mode & 1) && open(OLD_STDOUT, ">>&", "STDOUT"); - ($mode & 2) && open(OLD_STDERR, ">>&", "STDERR"); - - # Redirect to /dev/null - ($mode & 1) && open(STDOUT, ">", "/dev/null"); - ($mode & 2) && open(STDERR, ">", "/dev/null"); - - system(@_); - $result = $?; - - # Close redirected handles - ($mode & 1) && close(STDOUT); - ($mode & 2) && close(STDERR); - - # Restore old handles - ($mode & 1) && open(STDOUT, ">>&", "OLD_STDOUT"); - ($mode & 2) && open(STDERR, ">>&", "OLD_STDERR"); - - return $result; -} - - -# -# read_config(filename) -# -# Read configuration file FILENAME and return a reference to a hash containing -# all valid key=value pairs found. -# - -sub read_config($) -{ - my $filename = $_[0]; - my %result; - my $key; - my $value; - local *HANDLE; - - if (!open(HANDLE, "<", $filename)) - { - warn("WARNING: cannot read configuration file $filename\n"); - return undef; - } - while () - { - chomp; - # Skip comments - s/#.*//; - # Remove leading blanks - s/^\s+//; - # Remove trailing blanks - s/\s+$//; - next unless length; - ($key, $value) = split(/\s*=\s*/, $_, 2); - if (defined($key) && defined($value)) - { - $result{$key} = $value; - } - else - { - warn("WARNING: malformed statement in line $. ". - "of configuration file $filename\n"); - } - } - close(HANDLE); - return \%result; -} - - -# -# apply_config(REF) -# -# REF is a reference to a hash containing the following mapping: -# -# key_string => var_ref -# -# where KEY_STRING is a keyword and VAR_REF is a reference to an associated -# variable. If the global configuration hashes CONFIG or OPT_RC contain a value -# for keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. -# - -sub apply_config($) -{ - my $ref = $_[0]; - - foreach (keys(%{$ref})) - { - if (defined($opt_rc{$_})) { - ${$ref->{$_}} = $opt_rc{$_}; - } elsif (defined($config->{$_})) { - ${$ref->{$_}} = $config->{$_}; - } - } -} - -sub warn_handler($) -{ - my ($msg) = @_; - - warn("$tool_name: $msg"); -} - -sub die_handler($) -{ - my ($msg) = @_; - - temp_cleanup(); - die("$tool_name: $msg"); -} - -sub abort_handler($) -{ - temp_cleanup(); - exit(1); -} - -sub temp_cleanup() -{ - # Ensure temp directory is not in use by current process - chdir("/"); - - if (@temp_dirs) { - info("Removing temporary directories.\n"); - foreach (@temp_dirs) { - rmtree($_); - } - @temp_dirs = (); - } + my ($total, $effective) = + AggregateTraces::merge(ReadCurrentSource->new(), @merge); + return $total; } sub setup_gkv_sys() { - system_no_output(3, "mount", "-t", "debugfs", "nodev", - "/sys/kernel/debug"); + system_no_output(3, "mount", "-t", "debugfs", "nodev", "/sys/kernel/debug"); } sub setup_gkv_proc() { - if (system_no_output(3, "modprobe", "gcov_proc")) { - system_no_output(3, "modprobe", "gcov_prof"); - } + if (system_no_output(3, "modprobe", "gcov_proc")) { + system_no_output(3, "modprobe", "gcov_prof"); + } } sub check_gkv_sys($) { - my ($dir) = @_; + my ($dir) = @_; - if (-e "$dir/reset") { - return 1; - } - return 0; + if (-e "$dir/reset") { + return 1; + } + return 0; } sub check_gkv_proc($) { - my ($dir) = @_; + my ($dir) = @_; - if (-e "$dir/vmlinux") { - return 1; - } - return 0; + if (-e "$dir/vmlinux") { + return 1; + } + return 0; } sub setup_gkv() { - my $dir; - my $sys_dir = "/sys/kernel/debug/gcov"; - my $proc_dir = "/proc/gcov"; - my @todo; - - if (!defined($gcov_dir)) { - info("Auto-detecting gcov kernel support.\n"); - @todo = ( "cs", "cp", "ss", "cs", "sp", "cp" ); - } elsif ($gcov_dir =~ /proc/) { - info("Checking gcov kernel support at $gcov_dir ". - "(user-specified).\n"); - @todo = ( "cp", "sp", "cp", "cs", "ss", "cs"); - } else { - info("Checking gcov kernel support at $gcov_dir ". - "(user-specified).\n"); - @todo = ( "cs", "ss", "cs", "cp", "sp", "cp", ); - } - foreach (@todo) { - if ($_ eq "cs") { - # Check /sys - $dir = defined($gcov_dir) ? $gcov_dir : $sys_dir; - if (check_gkv_sys($dir)) { - info("Found ".$GKV_NAME[$GKV_SYS]." gcov ". - "kernel support at $dir\n"); - return ($GKV_SYS, $dir); - } - } elsif ($_ eq "cp") { - # Check /proc - $dir = defined($gcov_dir) ? $gcov_dir : $proc_dir; - if (check_gkv_proc($dir)) { - info("Found ".$GKV_NAME[$GKV_PROC]." gcov ". - "kernel support at $dir\n"); - return ($GKV_PROC, $dir); - } - } elsif ($_ eq "ss") { - # Setup /sys - setup_gkv_sys(); - } elsif ($_ eq "sp") { - # Setup /proc - setup_gkv_proc(); - } - } - if (defined($gcov_dir)) { - die("ERROR: could not find gcov kernel data at $gcov_dir\n"); - } else { - die("ERROR: no gcov kernel data found\n"); - } -} - - -# -# get_overall_line(found, hit, name_singular, name_plural) -# -# Return a string containing overall information for the specified -# found/hit data. -# - -sub get_overall_line($$$$) -{ - my ($found, $hit, $name_sn, $name_pl) = @_; - my $name; - - return "no data found" if (!defined($found) || $found == 0); - $name = ($found == 1) ? $name_sn : $name_pl; - - return rate($hit, $found, "% ($hit of $found $name)"); -} - - -# -# print_overall_rate(ln_do, ln_found, ln_hit, fn_do, fn_found, fn_hit, br_do -# br_found, br_hit) -# -# Print overall coverage rates for the specified coverage types. -# - -sub print_overall_rate($$$$$$$$$) -{ - my ($ln_do, $ln_found, $ln_hit, $fn_do, $fn_found, $fn_hit, - $br_do, $br_found, $br_hit) = @_; - - info("Summary coverage rate:\n"); - info(" lines......: %s\n", - get_overall_line($ln_found, $ln_hit, "line", "lines")) - if ($ln_do); - info(" functions..: %s\n", - get_overall_line($fn_found, $fn_hit, "function", "functions")) - if ($fn_do); - info(" branches...: %s\n", - get_overall_line($br_found, $br_hit, "branch", "branches")) - if ($br_do); -} - - -# -# rate(hit, found[, suffix, precision, width]) -# -# Return the coverage rate [0..100] for HIT and FOUND values. 0 is only -# returned when HIT is 0. 100 is only returned when HIT equals FOUND. -# PRECISION specifies the precision of the result. SUFFIX defines a -# string that is appended to the result if FOUND is non-zero. Spaces -# are added to the start of the resulting string until it is at least WIDTH -# characters wide. -# - -sub rate($$;$$$) -{ - my ($hit, $found, $suffix, $precision, $width) = @_; - my $rate; - - # Assign defaults if necessary - $precision = 1 if (!defined($precision)); - $suffix = "" if (!defined($suffix)); - $width = 0 if (!defined($width)); - - return sprintf("%*s", $width, "-") if (!defined($found) || $found == 0); - $rate = sprintf("%.*f", $precision, $hit * 100 / $found); - - # Adjust rates if necessary - if ($rate == 0 && $hit > 0) { - $rate = sprintf("%.*f", $precision, 1 / 10 ** $precision); - } elsif ($rate == 100 && $hit != $found) { - $rate = sprintf("%.*f", $precision, 100 - 1 / 10 ** $precision); - } - - return sprintf("%*s", $width, $rate.$suffix); + my $dir; + my $sys_dir = "/sys/kernel/debug/gcov"; + my $proc_dir = "/proc/gcov"; + my @todo; + + if (!defined($gcov_dir)) { + info("Auto-detecting gcov kernel support.\n"); + @todo = ("cs", "cp", "ss", "cs", "sp", "cp"); + } elsif ($gcov_dir =~ /proc/) { + info("Checking gcov kernel support at $gcov_dir (user-specified).\n"); + @todo = ("cp", "sp", "cp", "cs", "ss", "cs"); + } else { + info("Checking gcov kernel support at $gcov_dir (user-specified).\n"); + @todo = ("cs", "ss", "cs", "cp", "sp", "cp",); + } + foreach (@todo) { + if ($_ eq "cs") { + # Check /sys + $dir = defined($gcov_dir) ? $gcov_dir : $sys_dir; + if (check_gkv_sys($dir)) { + info("Found " . + $GKV_NAME[$GKV_SYS] . " gcov kernel support at $dir\n"); + return ($GKV_SYS, $dir); + } + } elsif ($_ eq "cp") { + # Check /proc + $dir = defined($gcov_dir) ? $gcov_dir : $proc_dir; + if (check_gkv_proc($dir)) { + info("Found " . $GKV_NAME[$GKV_PROC] . + " gcov kernel support at $dir\n"); + return ($GKV_PROC, $dir); + } + } elsif ($_ eq "ss") { + # Setup /sys + setup_gkv_sys(); + } elsif ($_ eq "sp") { + # Setup /proc + setup_gkv_proc(); + } + } + if (defined($gcov_dir)) { + die("could not find gcov kernel data at $gcov_dir\n"); + } else { + die("no gcov kernel data found\n"); + } } diff --git a/bin/llvm2lcov b/bin/llvm2lcov new file mode 100755 index 00000000..7bcd9211 --- /dev/null +++ b/bin/llvm2lcov @@ -0,0 +1,545 @@ +#!/usr/bin/env perl +# +# Copyright (c) MediaTek USA Inc., 2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# This script traverses C/C++ coverage data in JSON format, generated by +# 'llvm-cov export -format=text ....' +# Coverage data is in one or more JSON data files (generated by llvm-cov) and +# translates it into LCOV .info format. +# +# $ clang[++] -o myExe -fprofile-inst-generate -fcoverage-mapping \ +# [-fcoverage-mcdc] .... +# $ ./myExe ... +# $ llvm-profdata merge -o myExe.profdata --sparse *.profraw +# $ llvm-cov export -format=text -instr-profile=myExe.profdata \ +# ./myExe > myExe.json +# $ llvm2lcov [--output myExe.info] [--test-name name] [options] myExe.json +# +# In order to generate MC/DC data, note that you must: +# - use LLVM/18 or newer +# - enable MC/DC instrumentation in your compile/link steps, and +# - pass the '--mcdc-coverage' flag to llvm2lcov +# +# You can also use LLVM/21 or newer to generate MC/DC data more cleanly. +# +# See 'llvm2lcov --help' for more usage information +# +# See the LLVM documentation for more information on flags and compilation options. + +use strict; +use version; +use warnings; +require Exporter; + +use File::Basename qw(basename dirname fileparse); +use File::Spec::Functions qw /abs2rel catdir file_name_is_absolute splitdir + splitpath catpath catfile/; +use File::Temp; +use File::Copy qw(copy move); +use File::Path; +use Cwd qw/abs_path getcwd realpath/; +use Time::HiRes; # for profiling +use Capture::Tiny; +use FindBin; +use Storable; +use POSIX; + +use lib "$FindBin::RealBin/../lib"; +use lcovutil; + +sub print_usage(*) +{ + local *HANDLE = shift; + print(HANDLE < myExe.json + # use this script to convert to LCOV format + \$ $lcovutil::tool_name --output myExe.info --test-name myTestcase \\ + --mcdc-coverage --branch-coverage myExe.json + # and generate a genhtml-format coverage report: + \$ genhtml -o html_report myExe.info ... + +EOF +} + +sub parse +{ + my $testname = shift; + + die('JSON file argument required') unless @_; + + my $top = TraceFile->new(); + + $testname = '' unless defined($testname); + + my $srcReader = ReadCurrentSource->new(); + + foreach my $jsonFile (@_) { + die("no such JSON file '$jsonFile'") unless -e $jsonFile; + my $json = JsonSupport::load($jsonFile); + die("unrecognized JSON file format in $jsonFile") + unless (defined($json) && + exists($json->{data}) && + 'ARRAY' eq ref($json->{data})); + my $json_version = version->parse($json->{version}); + + lcovutil::info("read $jsonFile\n"); + + foreach my $k (@{$json->{data}}) { + #lcovutil::info("starting data entry..\n"); + foreach my $f (@{$k->{files}}) { + lcovutil::info('parsing ' . $f->{filename} . " ..\n"); + my $filename = + ReadCurrentSource::resolve_path($f->{filename}, 1); + if (TraceFile::skipCurrentFile($filename)) { + if (!exists($lcovutil::excluded_files{$filename})) { + $lcovutil::excluded_files{$filename} = 1; + lcovutil::info("Excluding $filename\n"); + } + next; + } + $srcReader->open($filename); + + my $fileInfo = $top->data($filename); + + my $version = lcovutil::extractFileVersion($filename); + $fileInfo->version($version) + if (defined($version) && $version ne ""); + + my $lineData = $fileInfo->test($testname); + + my $summary = $f->{summary}; + my $branches = $f->{branches}; + my $segments = $f->{segments}; + my $expansions = $f->{expansions}; + my $mcdc = $f->{mcdc_records} + if $lcovutil::mcdc_coverage && exists($f->{mcdc_records}); + + my $index = 0; + my $currentLine = 0; + + while ($index < $#$segments) { + my $segment = $segments->[$index]; + die("unexpected segment data") + unless scalar(@$segment) == 6; + my ($line, $col, $count, $hasCount, $isRegionEntry, $isGap) + = @$segment; + $currentLine = $line if !$currentLine; + if ($hasCount) { + $segment = $segments->[$index + 1]; + die("unexpected segment data") + unless scalar(@$segment) == 6; + my ($next_line, $next_col, $next_count, $next_hasCount, + $next_isRegionEntry, $next_isGap) + = @$segment; + if ($currentLine == $next_line && !$next_isRegionEntry) + { + while ($next_line == $currentLine && + ++$index < $#$segments) { + $segment = $segments->[$index + 1]; + die("unexpected segment data") + unless scalar(@$segment) == 6; + $next_line = $segment->[0]; + $count = $next_count + if ($count && + $next_count > $count && + $currentLine == $next_line); + $next_count = $segment->[2]; + } + $lineData->append($currentLine, $count); + ++$currentLine; + } else { + my $bound = $next_line; + my $i = $index; + while (!$next_isRegionEntry && + $next_line == $bound && + ++$i < $#$segments) { + $segment = $segments->[$i + 1]; + die("unexpected segment data") + unless scalar(@$segment) == 6; + $next_line = $segment->[0]; + $next_isRegionEntry = $segment->[4]; + } + --$bound + if ($next_isRegionEntry && + $next_line == $bound && + !($isRegionEntry && $line == $next_line)); + $count = $next_count + if $next_count > $count && $line == $next_line; + while ($currentLine <= $bound) { + $lineData->append($currentLine, $count); + ++$currentLine; + } + ++$index; + } + } else { + do { + ++$index; + $segment = $segments->[$index]; + die("unexpected segment data") + unless scalar(@$segment) == 6; + ($line, $col, $count, $hasCount, + $isRegionEntry, $isGap) = @$segment; + } while (!$hasCount && $index < $#$segments); + $currentLine = $isRegionEntry ? $line : $line + 1; + } + } + if ($mcdc && $json_version < version->parse("3.0.1")) { + my $mcdcData = $fileInfo->testcase_mcdc($testname); + my @mcdcBranches + ; # array (start line, start column, expression) + foreach my $branch (@$branches) { + die("unexpected branch data") + unless scalar(@$branch) == 9; + # Consider only branches of "MCDCBranchRegion" kind. + next if ($branch->[-1] != 6); + my ($line, $startCol, $endline, + $endcol, $trueCount, $falseCount, + $fileId, $expandedId, $kind) = @$branch; + my $expr = + $srcReader->getExpr($line, $startCol, $endline, + $endcol) + if $srcReader->notEmpty(); + push(@mcdcBranches, [$line, $startCol, $expr]); + } + foreach my $m (@$mcdc) { + die("unexpected MC/DC data") unless scalar(@$m) == 7; + my ($line, $startCol, $endLine, $endCol, $expandedId, + $kind, $cov) + = @$m; + die("unexpected MC/DC cov") + unless 'ARRAY' eq ref($cov); + + # read the source line and extract the expression... + my $expr = + $srcReader->getExpr($line, $startCol, $endLine, + $endCol) + if ($srcReader->notEmpty()); + my @brExprs; + foreach my $branch (@mcdcBranches) { + my ($brLine, $brCol, $brExpr) = @$branch; + if (($brLine > $line || + ($brLine == $line && $brCol >= $startCol)) + && + ( $brLine < $endLine || + ($brLine == $endLine && $brCol <= $endCol)) + ) { + push(@brExprs, [$brLine, $brCol, $brExpr]); + } + } + @brExprs = + sort { $a->[0] <=> $b->[0] || $a->[1] <=> $b->[1] } + @brExprs; + my $current_mcdc = + $mcdcData->new_mcdc($mcdcData, $line); + my $groupSize = scalar(@$cov); + my $idx = 0; + foreach my $c (@$cov) { + my $branchExpr = $brExprs[$idx]->[2] + if $groupSize == scalar(@brExprs); + my $fullExpr = + defined($branchExpr) && + defined($expr) ? "'$branchExpr' in '$expr'" : + $idx; + $current_mcdc->insertExpr($filename, $groupSize, 0, + $c, $idx, $fullExpr); + $current_mcdc->insertExpr($filename, $groupSize, 1, + $c, $idx, $fullExpr); + ++$idx; + } + $mcdcData->close_mcdcBlock($current_mcdc); + } + } + lcovutil::info(2, "finished parsing $filename\n"); + } + + foreach my $f (@{$k->{functions}}) { + my $name = $f->{name}; + my $filenames = $f->{filenames}; # array + my $filename = + ReadCurrentSource::resolve_path($filenames->[0], 1); + next if (TraceFile::skipCurrentFile($filename)); + die('unexpected unknown file \'' . $filenames->[0] . '\'') + unless $top->file_exists($filename); + $srcReader->open($filename); + + my $info = $top->data($filename); + my $count = $f->{count}; + my $regions = $f->{regions}; # startline/col, endline/col/ + my $branches = $f->{branches}; + # The version "3.0.1" adds fileId to mcdc. + # This allows using MC/DC branches from expansions for placing MC/DC entries defined in expansions to expansions call sites. + my $mcdc = $f->{mcdc_records} + if ($lcovutil::mcdc_coverage && + $json_version >= version->parse("3.0.1") && + exists($f->{mcdc_records})); + + my $functionMap = $info->testfnc($testname); + # use branch data to derive MC/DC expression - so need + # it, even if user didn't ask + my $branchData = $info->testbr($testname) + if $lcovutil::br_coverage || $mcdc; + my $mcdcData = $info->testcase_mcdc($testname) + if ($json_version >= version->parse("3.0.1") && + $lcovutil::mcdc_coverage); + my $startLine = $regions->[0]->[0]; # startline of first region + my $endline = $regions->[0]->[2]; # endline of last region + if ($lcovutil::func_coverage) { + my $func = + $functionMap->define_function($name, $startLine, + $endline) + unless defined($functionMap->findName($name)); + $functionMap->add_count($name, $count); + } + + my @mcdcBranches + ; # array (fileId, start line, start column, expression) + my %expanded_mcdcBranches + ; # hash of branch's fileId -> branch's start line + + if ($branchData) { + my $funcBranchData = BranchData->new(); + my $regionIdx = 0; + foreach my $b (@$branches) { + die("unexpected branch data") unless scalar(@$b) == 9; + my ($brStartLine, $brStartCol, $endLine, + $endCol, $trueCount, $falseCount, + $fileId, $expandedId, $kind) = @$b; + my ($line, $col) = ($brStartLine, $brStartCol); + my $expr; + + if ($fileId == 0) { + $expr = + $srcReader->getExpr($line, $col, $endLine, + $endCol) + if $srcReader->notEmpty(); + } else { + # Find a source range, which contains the branch. + while ($regionIdx < scalar(@$regions)) { + my ($rStartLine, $rStartCol, $rEndLine, + $rEndCol, $rCount, $rFileId, + $rExpandedId, $rKind + ) = @{$regions->[$regionIdx]}; + if ($rExpandedId == $fileId && $rKind == 1) { + if ($rFileId != 0) { + # Check previous regions to find one + # that describes lines of the function's + # source file. + my $rIdx = $regionIdx - 1; + $fileId = $rFileId; + while ($fileId != 0 && $rIdx >= 0) { + ($rStartLine, $rStartCol, + $rEndLine, $rEndCol, + $rCount, $rFileId, + $rExpandedId, $rKind + ) = @{$regions->[$rIdx]}; + $fileId = $rFileId + if ($rExpandedId == $fileId && + $rKind == 1); + --$rIdx; + } + } + ($line, $col) = ($rStartLine, $rStartCol); + last; + } + ++$regionIdx; + } + } + $fileId = $b->[6]; + # Consider only branches of "MCDCBranchRegion" kind. + if ($mcdc && + $kind == 6 && + !defined($expanded_mcdcBranches{$fileId})) { + if ($fileId && + scalar(@mcdcBranches) && + $fileId == $mcdcBranches[-1]->[0]) { + pop(@mcdcBranches); + $expanded_mcdcBranches{$fileId} = $line; + } else { + push(@mcdcBranches, + [$fileId, $line, $col, $expr]); + } + } + + if ($lcovutil::br_coverage) { + # Processed branch on the same line doesn't have to be the previous. + my $brEntry = $funcBranchData->value($line); + my $branchIdx = + !defined($brEntry) ? 0 : + scalar(@{$brEntry->getBlock(0)}); + my $br = + BranchBlock->new($branchIdx, $trueCount, + !defined($expr) ? $branchIdx : + "(" . $expr . ") == True"); + $funcBranchData->append($line, 0, $br, $filename); + + ++$branchIdx; + $br = + BranchBlock->new($branchIdx, $falseCount, + !defined($expr) ? $branchIdx : + "(" . $expr . ") == False"); + $funcBranchData->append($line, 0, $br, $filename); + } + } + $branchData->union($funcBranchData) + if $lcovutil::br_coverage; + } + if ($mcdc) { + foreach my $m (@$mcdc) { + die("unexpected MC/DC data") unless scalar(@$m) == 10; + my ($line, $col, $endLine, $endCol, + $trueCount, $falseCount, $fileId, $expandedId, + $kind, $cov) = @$m; + die("unexpected MC/DC cov") + unless 'ARRAY' eq ref($cov); + my $expr; + my @brExprs; + if ($fileId == $expandedId) { + foreach my $branch (@mcdcBranches) { + my ($brFileId, $brLine, $brCol, $brExpr) = + @$branch; + if (($brLine > $line || + ($brLine == $line && $brCol >= $col)) + && + ( $brLine < $endLine || + ($brLine == $endLine && + $brCol <= $endCol)) + ) { + push(@brExprs, [$brLine, $brCol, $brExpr]); + } + } + @brExprs = sort { + $a->[0] <=> $b->[0] || + $a->[1] <=> $b->[1] + } @brExprs; + $expr = + $srcReader->getExpr($line, $col, + $endLine, $endCol) + if $srcReader->notEmpty(); + } else { + $line = $expanded_mcdcBranches{$fileId}; + } + my $current_mcdc = + $mcdcData->new_mcdc($mcdcData, $line); + my $groupSize = scalar(@$cov); + my $idx = 0; + foreach my $c (@$cov) { + my $brExpr = $brExprs[$idx]->[2] + if ($fileId == $expandedId && + $idx < scalar(@brExprs)); + my $fullExpr = defined($brExpr) && + defined($expr) ? "'$brExpr' in '$expr'" : $idx; + $current_mcdc->insertExpr($filename, $groupSize, 0, + $c, $idx, $fullExpr); + $current_mcdc->insertExpr($filename, $groupSize, 1, + $c, $idx, $fullExpr); + ++$idx; + } + $mcdcData->close_mcdcBlock($current_mcdc); + } + } + $info->testbr()->remove($testname) + if $mcdc && !$lcovutil::br_coverage; + } + } + lcovutil::info(2, "finished $jsonFile\n"); + } + # now create the merge summary... + foreach my $filename ($top->files()) { + my $info = $top->data($filename); + + my @work; + push(@work, [$info->sum(), $info->test]); + push(@work, [$info->sumbr(), $info->testbr]) + if $lcovutil::br_coverage; + push(@work, [$info->func(), $info->testfnc]) + if $lcovutil::func_coverage; + push(@work, [$info->mcdc(), $info->testcase_mcdc()]) + if $lcovutil::mcdc_coverage; + + foreach my $d (@work) { + my ($sum, $pertest) = @$d; + $sum->union($pertest->value($testname)); + } + } + return $top; +} + +my $output_filename = 'llvm2lcov.info'; +my $testname; +my %opts = ('test-name|t=s' => \$testname, + 'output-filename|o=s' => \$output_filename,); +my %rc_opts; +if (!lcovutil::parseOptions(\%rc_opts, \%opts, \$output_filename)) { + print(STDERR "argparse failed\n"); + exit(1); +} + +my $info = parse($testname, @ARGV); +$info->applyFilters(ReadCurrentSource->new()); +$info->write_info_file($output_filename); + +$info->print_summary() if $lcovutil::verbose >= 0; +my $exit_code = 0; +$info->checkCoverageCriteria(); +CoverageCriteria::summarize(); +$exit_code = 1 if $CoverageCriteria::coverageCriteriaStatus; + +lcovutil::summarize_messages(); + +lcovutil::cleanup_callbacks(); + +exit $exit_code; diff --git a/bin/perl2lcov b/bin/perl2lcov new file mode 100755 index 00000000..1b40bd44 --- /dev/null +++ b/bin/perl2lcov @@ -0,0 +1,444 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2023-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# perl2lcov [--output mydata.info] [--testname name] [options] cover_db+ +# +# This script traverses perl coverage information in one or more coverage +# data directories (generated by the perl Devel::Cover module) and +# translates it into LCOV .info format. +# +# In addition to common options supported by other tools in the LCOV +# suite (e.g., --comment, --version-script, --ignore-error, --substitute, +# --exclude, etc.), the tool options are: +# +# --output filename: +# The lcov data will be written to the specified file - or to +# the file called 'perlcov.info' in the current run directory +# if this option is not used. +# +# --testname name: +# Coverage info will be associated with the testcase name provided. +# It is not necessary to provide a name. +# +# See the Devel::Cover documentation for directions on how to generate +# perl coverage data. + +use Devel::Cover::DB; +use Devel::Cover::Truth_Table; +use strict; +use warnings; +use Getopt::Long; +use FindBin; + +use lib "$FindBin::RealBin/../lib"; +use lcovutil qw($tool_name); + +sub print_usage +{ + local *HANDLE = $_[0]; + + print(HANDLE <[$mid]; + if ($line < $v->[0]) { + $max = $mid - 1; + } elsif ($line > $v->[0]) { + $best = $v; + $min = $mid + 1; + } else { + # line number matched...which ought not to happen because + # Deval::Cover reports subroutine start as first executable + # line in the function. + # That won't be the line containing "package ..." - unless the + # user wrote the whole thing on one line. Not clever. Deserves + # to lose, if something in here breaks. + return $v; + } + } + return $best; +} + +$lcovutil::br_coverage = 1; +$lcovutil::func_coverage = 1; +$lcovutil::derive_function_end_line = 1; +$lcovutil::derive_function_end_line_all_files = 1; +lcovutil::save_cmd_line(\@ARGV, "$FindBin::RealBin"); +lcovutil::set_extensions('perl', '.*'); + +my $testname = ''; +my $output_file = 'perlcov.info'; +our %options = ('testname=s' => \$testname, + 'output|o=s' => \$output_file,); +if (!lcovutil::parseOptions({}, \%options)) { + print(STDERR "Use $lcovutil::tool_name --help to get usage information.\n"); + exit(1); +} + +my $info = TraceFile->new(); + +foreach my $db (@ARGV) { + # parse the other files first - to grab the data we want - + # Not quite sure how to map 'cond' to LCOV branch coverage. + + # save a readable message before remapping the $db + my $msg = + "$db appears to be empty; perhaps you need to run 'cover $db' before executing $0."; + my $db = Devel::Cover::DB->new(db => $db); + my $cover = $db->cover; + my @items = $cover->items; + if (!@items) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, $msg); + next; + } + foreach my $file ($cover->items) { + my $filename = lcovutil::subst_file_name($file); + lcovutil::info("process $filename" . + ($filename ne $file ? " (substituted from $file)" : '') . "\n"); + if (TraceFile::skipCurrentFile($filename)) { + lcovutil::info(" (excluded)\n"); + next; + } + my $f = $cover->file($file); + my $fileData = + $info->data($file); # really, want to use stored file name + my $functionMap = $fileData->testfnc($testname); + my $lineMap = $fileData->test($testname); + my $branchMap = $fileData->testbr($testname); + + # use statement coverage to mark un-evaluated branches + my ($stmts, $branches, $conditions, $subroutines); + my @packageExtents; + # Devel::Cover doesn't instrument all the functions in every file - + # so need a workaround to find better extents for some of them + my @functionExtents; + + foreach my $criteria ($f->items) { + # some types we don't use + next if (grep(/^$criteria$/, ('pod', 'time', 'path'))); + my $c = $f->criterion($criteria); + if ($criteria eq 'branch') { + $branches = $c; + } elsif ($criteria eq 'condition') { + $conditions = $c; + } elsif ($criteria eq 'subroutine') { + $subroutines = $c; + if (-f $file) { + open(GREP, '-|', 'grep', '--line-number', '-E', + '^\s*(package|sub) ', $file) or + die("unable to grep $file: $!"); + while () { + if (/^(\d+):\s*package\s+(\S+)\s*;/) { + push(@packageExtents, [$1, $2 . '::']); + } elsif (/^(\d+):\s*sub\s+([^\s(]+)/) { + push(@functionExtents, [$1, $2]); + } else { + die("unexpected grep output '$_'"); + } + } + close(GREP); + } + } elsif ($criteria eq 'statement') { + $stmts = $c; + } else { + lcovutil::ignorable_error($lcovutil::ERROR_UNKNOWN_CATEGORY, + "unexpected data type '$criteria'"); + } + } + if (!defined($stmts)) { + # this seems to happen sometimes if we re-run 'cover' multiple + # times on the same DB - e.g., during testing. + lcovutil::ignorable_error($lcovutil::ERROR_UNSUPPORTED, + "unable to process $file without statement data"); + next; + } + if ($lcovutil::verify_checksum && + !-f $file) { + lcovutil::ignorable_error($lcovutil::ERROR_SOURCE, + "cannot read '$f': unable to compute --checksum"); + } + my $version = lcovutil::extractFileVersion($file) if -f $file; + $fileData->version($version) if defined($version) && $version ne ''; + + # run through data to verify that there are no branch, function, or + # conditional coverpoints where there is no line data + foreach my $c (['branch', $branches], + ['condition', $conditions], + ['subroutine', $subroutines] + ) { + next unless defined($c->[1]); + foreach my $line ($c->[1]->items) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + 'found ' . $c->[0] . + " coverpoint on $line but no lineCov there" + ) unless defined($stmts->location($line)); + } + } + + foreach my $line ($stmts->items) { + my $l = $stmts->location($line); + my $lineCount = $l->[0]->[0]; + $lineMap->append($line, $lineCount); + + if ($subroutines) { + my $s = $subroutines->location($line); + if (defined($s)) { + my ($count, $name) = @{$s->[0]}; + if ($name !~ /(BEGIN|__ANON__)/) { + my $p = findPackage(\@packageExtents, $line); + if (defined($p)) { + $name = $p->[1] . $name; + } + $functionMap->define_function($name, $line); + $functionMap->add_count($name, $count); + } + } + } + if (defined($conditions)) { + my $cond = $conditions->location($line); + if (defined($cond)) { + my @br = $conditions->truth_table($line); + my $blockID = 0; + my @subst; + # the intent of this transform is for the branchExpr + # to show which parts of the condition have evaluated + # to true or false. + # However, this doesn't quite work because the truth + # table computed by Devel::Cover is sometimes ordered + # with the dependent clause after the independent + # one - and sometimes the opposite. + # For the moment: punt when we don't grok + foreach my $block (@br) { + my $counts = $block->[0]; + my $expr = $block->[1]; + my $simplified = $expr; + for (my $i = 0; $i <= $#subst; ++$i) { + my ($from, $to) = @{$subst[$i]}; + $simplified =~ s/\Q$from\E/$to/; + } + my @expr; + while ($simplified =~ + /(.+?)\s+(and|or|xor|&&|\|\|)\s+(.+)/) { + $simplified = $3; + $1 =~ s/^\s+|\s+$//g; + push(@expr, $1); + } + push(@expr, $simplified); + #@expr = split(/\s+(and|or|xor|&&|\|\|)\s+/, $simplified); + my $branchID = 0; + foreach my $entry (@$counts) { + my $taken = + $lineCount == 0 ? '-' : $entry->{covered}; + my $inputs = $entry->{inputs}; + my $branchExpr = ''; + if (scalar(@$inputs) == scalar(@expr)) { + # this is the case we expect.. + my $sep = ''; + for (my $i = 0; $i <= $#$inputs; ++$i) { + my $v = $inputs->[$i]; + next if ($v eq 'X'); + $branchExpr .= $sep; + $branchExpr .= " ! " if $v eq '0'; + $branchExpr .= $expr[$i]; + $sep = ', '; + } + for (my $i = 0; $i <= $#subst; ++$i) { + my ($to, $from) = @{$subst[$i]}; + $branchExpr =~ s/$from/($to)/; + } + $branchExpr =~ s/^\s+|\s+$//g; + } else { + # punt. Just report the original Devel::Cover + # expressions. Hope the user can sort it out + $branchExpr = $expr; + } + my $br = + BranchBlock->new($branchID++, $taken, + $branchExpr, 0); + $branchMap->append($line, $blockID, $br, $file); + } + push(@subst, [$expr, '__' . scalar(@subst) . '__']); + ++$blockID; + } + # condition data is more compreshensive than branch + # if both exist on the line. + next; + } + } + if (defined($branches)) { + my $br = $branches->location($line); + if (defined($br)) { + my ($true, $false) = @{$br->[0]->[0]}; + my $expr = $br->[0]->[1]->{'text'}; + my $id = 0; + for my $c ([$true, $expr], [$false, '! ' . $expr]) { + # this is not an exception... + my $b = + BranchBlock->new($id++, + $lineCount == 0 ? '-' : $c->[0], + $c->[1], 0); + # blockID is always zero + $branchMap->append($line, 0, $b, $file); + } + } + } + } + $fileData->sum()->union($lineMap); + $fileData->sumbr()->union($branchMap); + $fileData->func()->union($functionMap); + + # have to do this manually due to some Perl quirks - + # in particular, there may be code outside of the subroutine we are + # walking...and we want to correct the end line + TraceFile::_deriveFunctionEndLines($fileData); + my $lineData = $fileData->sum(); + my $funcData = $fileData->testfnc(); + + foreach my $func ($fileData->func()->valuelist()) { + # where is the nearest 'package' after my start line? + my $first = $func->line(); + my $end = $func->end_line(); + next unless defined($end); + # find package or function enclosing my end line.. + my $last = $end; + foreach my $ext (\@packageExtents, \@functionExtents) { + while (1) { + my $p = findPackage($ext, $last); + if (defined($p) && $p->[0] > $first) { + $last = $p->[0] - 1; + lcovutil::info(1, + $func->name() . + ": found update end line $last in " . + $p->[1] . "\n"); + # iterate in case there is another package above the first one + } else { + last; + } + } + } + next unless $last < $end; + + # what is the last executable line before the 'package' or 'sub' decl? + while ($last > $first) { + if (defined($lineData->value($last))) { + last; + } + --$last; + } + lcovutil::info(1, + "resetting " . $func->name() . + " end line to $last (from $end)\n"); + $func->set_end_line($last); + + foreach my $tn ($funcData->keylist()) { + my $d = $funcData->value($tn); + my $f = $d->findKey($first); + $f->set_end_line($last); + } + + } #foreach function + + } # foreach file +} #foreach cover db + +$info->applyFilters(); +$info->add_comments(@lcovutil::comments); +$info->write_info_file($output_file, $lcovutil::verify_checksum); + +$info->checkCoverageCriteria(); +CoverageCriteria::summarize(); +my $exit_code = 0 != $CoverageCriteria::coverageCriteriaStatus; + +lcovutil::warn_file_patterns(); +lcovutil::summarize_cov_filters(); +lcovutil::summarize_messages(1); # silent if no messages + +exit $exit_code; diff --git a/bin/py2lcov b/bin/py2lcov new file mode 100755 index 00000000..a22a9748 --- /dev/null +++ b/bin/py2lcov @@ -0,0 +1,212 @@ +#!/usr/bin/env python3 + +# Copyright (c) MediaTek USA Inc., 2020-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# This script traverses Python coverage data in one or more coverage +# data file (generated by the Coverage.py module) and translates it into +# LCOV .info format. +# +# py2lcov [--output mydata.info] [--test-name name] [options] coverage.dat+ +# +# See 'py2lcov --help' for more usage information +# +# See https://coverage.readthedocs.io for directions on how to use Coverage.py +# to generate Python coverage data. +# +# +# arguably, should do this in Perl so we could use the lcovutil module utilities +# In the meantime: suggested use model is to translate the python XML using this +# utility, then read it back into lcov for additional processing. +# +# @todo figure out/enhance Coverage.py to characterize branch expressions +# @todo perhaps this should be integrated into the Coverage.py module itself. +# This might no longer be a good idea as XML translation is no longer limited +# to the Coverage.py module. + +import os +import os.path +import sys +import re +import argparse +import xml.etree.ElementTree as ET +import fnmatch +import subprocess +import copy +import base64 +import hashlib +import pdb +from xml2lcovutil import ProcessFile + +def main(): + usageString="""py2lcov: Translate Python coverage data to LCOV .info format. +Please also see https://coverage.readthedocs.io + +Note that the '--no-functions' argument may result in subtly inconsistent coverage +data if a 'no-functions' coverage DB is merged with one which contains derived +function data because the 'def myFunc(...)' line will acquire a 'hit' count +of 1 because the python interpreter considers the 'def' to have been executed +when the line is interpreted (i.e., when the function is defined). +This will generate an 'inconsistent' error if the function is not executed in +your tests because the (derived) function will have a zero hit count but the +first line of the function has a non-zero count. +Best practice is to either always specify '--no-functions' or never specify +'--no-functions'. + +py2lcov uses Coverage.py to extract coverage data. +Note that the name of the Coverage.py executable my differ on your platform. +By default, py2lcov uses 'coverage' (which it expects to be in your path). +You can use a different executable, either: + - through your COVERAGE_COMMAND environment variable, or + - via the 'py2lcov --cmd exename ..' command line option. + +py2lcov does not implement the full suite of LCOV features (e.g., filtering, +substitutions, etc.). +Please generate the translated LCOV format file and then read the data +back in to lcov to use any of those features. +%(usage)s + +Example: + $ export PYCOV_DATA=path/to/pydata + + For 'coverage' versions 6.6.1 and higher (which support "--data-file"): + $ coverage run --data-file=${PYCOV_DATA} --append --branch \\ + `which myPthonScript.py` args_to_my_python_script + + For older versions which don't support "--data-files": + use COVERAGE_FILE environment variable to specify data file + $ COVERAGE_FILE=${PYCOV_DATA} coverage run --append --branch \\ + `which myPthonScript.py` args_to_my_python_script + + # now use py2lcov to translate the XML to INFO file format - + # also include version information in the generated coverage data. + $ py2lcov -o pydata.info ${PYCOV_DATA} + + # apply some filtering + $ lcov -a pydata.info -o filtered.info --filter branch,blank + + # and use genhtml to produce an HTML coverage report: + $ genhtml -o html_report pydata.info .... + # the filtered result. + $ genhtml -o html_filtered filtered.info .... + # use differential coverage to see exactly what filtering did + $ genhtml -o html_differential --baseline-file mydata.info filtered.info ... + + Deprecated feature: + For backward compatibility, py2lcov also supports translation to LCOV + format from intermediate XML: + + # first translate from Python coerage data to XML: + $ coverage xml --data-file=${PYCOV_DATA} -o pydata.xml |& tee pydata.log + # or - if your Coverage.py module is too old to support '--data-file': + $ COVERAGE_FILE=${PYCOV_DATA} coverage xml -o pydata.xml |& tee pydata.log + + # then translate XML to LCOV format: + $ py2lcov -i pydata.xml -o pydata.info --version-script myCovScript + + """ % { + 'usage' : ProcessFile.usageNote, + } + + from_env = '' + cover_cmd = 'coverage' + if 'COVERAGE_COMMAND' in os.environ: + cover_cmd = os.environ['COVERAGE_COMMAND'] + from_env = ' (from your COVERAGE_COMMAND environment variable)' + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=usageString) + + parser.add_argument('-i', '--input', dest='input', default=None, + help="DEPRECATED: specify the input xml file from coverage.py") + parser.add_argument('-o', '--output', dest='output', default='py2lcov.info', + help="specify the out LCOV .info file, default: py2lcov.info") + parser.add_argument('-t', '--test-name', '--testname', dest='testName', default='', + help="specify the test name for the TN: entry in LCOV .info file") + parser.add_argument('-e', '--exclude', dest='excludePatterns', default='', + help="specify the exclude file patterns separated by ','") + parser.add_argument('-v', '--verbose', dest='verbose', default=0, action='count', + help="print debug messages") + parser.add_argument('--version-script', dest='version', + help="version extract callback") + parser.add_argument('--checksum', dest='checksum', action='store_true', + default=False, + help="compute line checksum - see 'man lcov'") + parser.add_argument("--no-functions", dest='deriveFunctions', + default=True, action='store_false', + help="do not derive function coverpoints") + parser.add_argument("--tabwidth", dest='tabwidth', default=8, type=int, + help='tabsize when computing indent') + parser.add_argument('-k', "--keep-going", dest='keepGoing', default=False, action='store_true', + help="ignore errors") + parser.add_argument('--cmd', dest='cover_cmd', default=cover_cmd, + help='executable used to extract python data - e.g., "python3-coverage". Default is "%s"%s.' % (cover_cmd, from_env)) + parser.add_argument('inputs', nargs='*', + help="list of python coverage data input files - expected to be XML or Python .dat format") + + args = parser.parse_args() + + if args.input: + if not args.keepGoing: + print("--input is deprecated - please use 'py2lcov ... %s" % (args.input)) + args.inputs.append(args.input); + + + if not args.inputs: + # no input file - see if COVERAGE_FILE environment variable is set + try: + args.inputs.append(os.environ['COVERAGE_FILE']) + print("reading input from default COVERAGE_FILE '%s'" % args.inputs[0]) + except: + print("Error: no input files") + sys.exit(1) + + args.isPython = True + p = ProcessFile(args) + + for f in args.inputs: + base, ext = os.path.splitext(f) + if ext == '.xml': + p.process_xml_file(f) + continue + + # assume that anything not ending in .xml is a Coverage.py data file + xml = base + '.xml' + suffix = 1 + while os.path.exists(xml): + xml = base + '.xml%d' % suffix + suffix += 1 + env = os.environ.copy() + env["COVERAGE_FILE"] = f + cmd = [args.cover_cmd, "xml", "-o", xml] + try: + x = subprocess.run(cmd, shell=False, check=True, stdout=True, stderr=True, env=env) + except subprocess.CalledProcessError as err: + print("Error: error during XML conversion of %s: %s" % ( + f, str(err))); + if not args.keepGoing: + sys.exit(1) + continue + p.process_xml_file(xml) + os.unlink(xml) + + p.close() + + +if __name__ == '__main__': + main() diff --git a/bin/updateversion.pl b/bin/updateversion.pl deleted file mode 100755 index d39918a6..00000000 --- a/bin/updateversion.pl +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; - -use File::Basename; - -sub update_man_page($); -sub update_bin_tool($); -sub update_txt_file($); -sub update_spec_file($); -sub write_version_file($); -sub get_file_info($); - -our $directory = $ARGV[0]; -our $version = $ARGV[1]; -our $release = $ARGV[2]; -our $full = $ARGV[3]; - -our @man_pages = ("man/gendesc.1", "man/genhtml.1", "man/geninfo.1", - "man/genpng.1", "man/lcov.1", "man/lcovrc.5"); -our @bin_tools = ("bin/gendesc", "bin/genhtml", "bin/geninfo", - "bin/genpng", "bin/lcov"); -our @txt_files = ("README"); -our @spec_files = ("rpm/lcov.spec"); - -if (!defined($directory) || !defined($version) || !defined($release)) { - die("Usage: $0 DIRECTORY|FILE VERSION RELEASE FULL_VERSION\n"); -} - -# Determine mode of operation -if (-f $directory) { - my $file = $directory; - my $base = basename($file); - - if (grep(/^$base$/, map({ basename($_) } @man_pages))) { - print("Updating man page $file\n"); - update_man_page($file); - } elsif (grep(/^$base$/, map({ basename($_) } @bin_tools))) { - print("Updating bin tool $file\n"); - update_bin_tool($file); - } elsif (grep(/^$base$/, map({ basename($_) } @txt_files))) { - print("Updating text file $file\n"); - update_txt_file($file); - } elsif (grep(/^$base$/, map({ basename($_) } @spec_files))) { - print("Updating spec file $file\n"); - update_spec_file($file); - } elsif ($base eq ".version") { - print("Updating version file $file\n"); - write_version_file($file); - } else { - print("WARNING: Skipping unknown file $file\n"); - } - print("Done.\n"); - exit(0); -} - -foreach (@man_pages) { - print("Updating man page $_\n"); - update_man_page($directory."/".$_); -} -foreach (@bin_tools) { - print("Updating bin tool $_\n"); - update_bin_tool($directory."/".$_); -} -foreach (@txt_files) { - print("Updating text file $_\n"); - update_txt_file($directory."/".$_); -} -foreach (@spec_files) { - print("Updating spec file $_\n"); - update_spec_file($directory."/".$_); -} -print("Updating version file $directory/.version\n"); -write_version_file("$directory/.version"); -print("Done.\n"); - -sub get_file_info($) -{ - my ($filename) = @_; - my ($sec, $min, $hour, $year, $month, $day); - my @stat; - my $gittime; - - return (0, 0, 0) if (!-e $filename); - @stat = stat($filename); - my $epoch = int($ENV{SOURCE_DATE_EPOCH} || $stat[9]); - $epoch = $stat[9] if $stat[9] < $epoch; - ($sec, $min, $hour, $day, $month, $year) = gmtime($epoch); - $year += 1900; - $month += 1; - - return (sprintf("%04d-%02d-%02d", $year, $month, $day), - sprintf("%04d%02d%02d%02d%02d.%02d", $year, $month, $day, - $hour, $min, $sec), - sprintf("%o", $stat[2] & 07777)); -} - -sub update_man_page($) -{ - my ($filename) = @_; - my @date = get_file_info($filename); - my $date_string = $date[0]; - local *IN; - local *OUT; - - $date_string =~ s/-/\\-/g; - open(IN, "<$filename") || die ("Error: cannot open $filename\n"); - open(OUT, ">$filename.new") || - die("Error: cannot create $filename.new\n"); - while () { - s/\"LCOV\s+\d+\.\d+\"/\"LCOV $version\"/g; - s/\d\d\d\d\\\-\d\d\\\-\d\d/$date_string/g; - print(OUT $_); - } - close(OUT); - close(IN); - chmod(oct($date[2]), "$filename.new"); - system("mv", "-f", "$filename.new", "$filename"); - system("touch", "$filename", "-t", $date[1]); -} - -sub update_bin_tool($) -{ - my ($filename) = @_; - my @date = get_file_info($filename); - local *IN; - local *OUT; - - open(IN, "<$filename") || die ("Error: cannot open $filename\n"); - open(OUT, ">$filename.new") || - die("Error: cannot create $filename.new\n"); - while () { - s/^(our\s+\$lcov_version\s*=).*$/$1 "LCOV version $full";/g; - print(OUT $_); - } - close(OUT); - close(IN); - chmod(oct($date[2]), "$filename.new"); - system("mv", "-f", "$filename.new", "$filename"); - system("touch", "$filename", "-t", $date[1]); -} - -sub update_txt_file($) -{ - my ($filename) = @_; - my @date = get_file_info($filename); - local *IN; - local *OUT; - - open(IN, "<$filename") || die ("Error: cannot open $filename\n"); - open(OUT, ">$filename.new") || - die("Error: cannot create $filename.new\n"); - while () { - s/(Last\s+changes:\s+)\d\d\d\d-\d\d-\d\d/$1$date[0]/g; - print(OUT $_); - } - close(OUT); - close(IN); - chmod(oct($date[2]), "$filename.new"); - system("mv", "-f", "$filename.new", "$filename"); - system("touch", "$filename", "-t", $date[1]); -} - -sub update_spec_file($) -{ - my ($filename) = @_; - my @date = get_file_info($filename); - local *IN; - local *OUT; - - open(IN, "<$filename") || die ("Error: cannot open $filename\n"); - open(OUT, ">$filename.new") || - die("Error: cannot create $filename.new\n"); - while () { - s/^(Version:\s*)\d+\.\d+.*$/$1$version/; - s/^(Release:\s*).*$/$1$release/; - print(OUT $_); - } - close(OUT); - close(IN); - system("mv", "-f", "$filename.new", "$filename"); - system("touch", "$filename", "-t", $date[1]); -} - -sub write_version_file($) -{ - my ($filename) = @_; - my $fd; - - open($fd, ">", $filename) or die("Error: cannot write $filename: $!\n"); - print($fd "VERSION=$version\n"); - print($fd "RELEASE=$release\n"); - print($fd "FULL=$full\n"); - close($fd); -} diff --git a/bin/xml2lcov b/bin/xml2lcov new file mode 100755 index 00000000..ae535017 --- /dev/null +++ b/bin/xml2lcov @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 + +# Copyright (c) MediaTek USA Inc., 2020-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# This script traverses Python coverage data in one or more coverage +# data file (generated by the Coverage.py module) and translates it into +# LCOV .info format. +# +# xml2lcov [--output mydata.info] [--test-name name] [options] coverage.xml+ +# +# See 'xml2lcov --help' for more usage information +# +# See the Cobertura documentation to see how to generate the XML coverage data + +import os +import os.path +import sys +import re +import argparse +import xml.etree.ElementTree as ET +import fnmatch +import subprocess +import copy +import base64 +import hashlib +import pdb +from xml2lcovutil import ProcessFile + +def main(): + usageString="""xml2lcov: Translate XML coverage data (e.g., generated by Cobertura) +to LCOV .info format. +See the Cobertura documentation for information on how to generate the +XML coverage data file. + +Note that xml2lcov does not implement the full suite of LCOV features +(e.g., demangling, filtering, substitutions, etc.). +Please generate the translated LCOV format file and then read the data +back in to lcov to use any of those features. +%(usage)s +Example: + + # generate the LCOV-format .info file + $ xml2lcov -o mydata.info coverage1.xml coverage2.xml + + # apply some filtering + $ lcov -a mydata.info --filter branch,blank -o filtered.info + + # and use genhtml to produce an HTML coverage report: + $ genhtml -o html_report mydata.info .... + $ genhtml -o html_filtered filtered.info .... + # use differential coverage to see exactly what filtering did + $ genhtml -o html_differential --baseline-file mydata.info filtered.info ... + + """ % { + 'usage' : ProcessFile.usageNote, + } + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=usageString) + + parser.add_argument('-o', '--output', dest='output', default='xml2lcov.info', + help="specify the out LCOV .info file, default: xml2lcov.info") + parser.add_argument('-t', '--test-name', '--testname', dest='testName', default='', + help="specify the test name for the TN: entry in LCOV .info file") + parser.add_argument('-e', '--exclude', dest='excludePatterns', default='', + help="specify the exclude file patterns separated by ','") + parser.add_argument('-v', '--verbose', dest='verbose', default=0, action='count', + help="print debug messages") + parser.add_argument('--version-script', dest='version', + help="version extract callback") + parser.add_argument('--checksum', dest='checksum', action='store_true', + default=False, + help="compute line checksum - see 'man lcov'") + parser.add_argument('-k', "--keep-going", dest='keepGoing', default=False, action='store_true', + help="ignore errors") + parser.add_argument('inputs', nargs='*', + help="list of python coverage data input files - expected to be XML or Python .dat format") + + args = parser.parse_args() + + if not args.inputs: + print("Error: no input files") + sys.exit(1) + + p = ProcessFile(args) + + for f in args.inputs: + p.process_xml_file(f) + + p.close() + + +if __name__ == '__main__': + main() diff --git a/bin/xml2lcovutil.py b/bin/xml2lcovutil.py new file mode 100644 index 00000000..260a0eb0 --- /dev/null +++ b/bin/xml2lcovutil.py @@ -0,0 +1,497 @@ +#!/usr/bin/env python3 + +# Copyright (c) MediaTek USA Inc., 2020-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# This script traverses XML coverage data in +# https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd +# format (e.g., generated by Cobertura or the Coverage.py module) and translates it +# into LCOV .info format. +# +# Arguably, this should be done in Perl so we could use lcovutil module utilities. +# In the meantime: suggested use model is to translate the XML using this +# utility, then read it back into lcov for additional processing. +# +# @todo figure out how to characterize branch expressions in XML data + +import os +import os.path +import sys +import re +import xml.etree.ElementTree as ET +import fnmatch +import subprocess +import copy +import base64 +import hashlib +import pdb + +def line_hash(line: str) -> str: + """Produce a hash of a source line for use in the LCOV file.""" + hashed = hashlib.md5(line.encode("utf-8")).digest() + return base64.b64encode(hashed).decode("ascii").rstrip("=") + + +class ProcessFile: + """Expected/support scriptArgs: + args.input : name of XML file + args.outf : output FILE handle (written to) + args.testName : LCOV test name (optional) - see 'man lcov' + args.excludePatterns : + comma-separated list of glob patterns + args.verbose : verbosity + args.version : version script callback + args.checksum : compute base64 checksum for each line - see 'man lcov' + args.isPython : input XML file came from Coverage.py - so apply certain + Python-specific derivations. + args.deriveFunctions : + derive function coverpoints (primarily useful for Python - + see 'py2lcov --help' and the Coverage.py documentation + args.tabWidth : tab width to assume when deriving information from indentation - + used during Python function derivation. + args.keepGoing : do not stop when error or inconsistency is detected + + """ + + usageNote = """ + +Note that the XML coverage data format does not contain enough information +to deduce exactly which branch expressions have been taken or not taken. + +It reports the total number of branch expressions associated with a particular +line, and the number of those which have been taken. There is no way to know +(except, possibly by inspection of surrounding code and/or some understanding +of your implementation) exactly which ones. + +This is a problem in at least 2 ways: + + - It is not straightforward to use the result to improve your regression + suite because you don't really know what was exercised/not exercised. + + - Coverage data merge is problematic. + + o For example: you have two testcase XML files, each of which hit + 4 of 8 branches on some line. + + o Does that mean you hit 4 of them (both tests exercised the same + code), all 8 (tests exercised disjoint subsets), or some number + between? + + This implementation assumes that the first M branches are the ones + which are hit and the remaining N-M were not hit, in each testcase. + Thus, the combined result in the above example would claim 4 of 8 + branches hit. + This definition turns out to be a lower bound. +""" + + def __init__(self, scriptArgs): + self._args = scriptArgs + + self._excludePatterns = scriptArgs.excludePatterns.split(',') if scriptArgs.excludePatterns else None + self._versionScript = scriptArgs.version.split(',') if scriptArgs.version else None + if self._versionScript and self._versionScript[0][-3:] == ".pm": + # hard to handle Perl module in python - so we hack it + self._vesionModule = self._versionScript + self._versionScript = None + + self._outf = open(scriptArgs.output, "w") + try: + self._isPython = scriptArgs.isPython + except: + self._isPython = False + + self._outf.write("TN:%s\n" % scriptArgs.testName) + + def close(self): + + self._outf.close() + + if self._args.version and None == self._versionScript: + lcov = os.path.join(os.path.split(sys.argv[0])[0], 'lcov') + cmd = [ + lcov, + "-a", self._args.output, + "-o", self._args.output, + "--version-script", self._args.version, + *(["--checksum"] if self._args.checksum else []), + "--rc", "compute_file_version=1", + "--branch-coverage", + "--ignore", "inconsistent", + ] + try: + x = subprocess.run(cmd, shell=False, check=True, stdout=True, stderr=True) + except subprocess.CalledProcessError as err: + print("Error during lcov version append operation: %s" % ( + str(err))) + if not self._args.keepGoing: + sys.exit(1); + + + def process_xml_file(self, xml_file): + + tree = ET.parse(xml_file) + root = tree.getroot() + source_paths = [] + + try: + if(root[0].tag == 'sources'): + for source in root[0]: + # keep track of number of times we use each source_path to find + # some file. Unused source paths are likely a problem. + if self._args.verbose: + print("source: '%s'" %(source.text)) + # unclear why the Coverage.py version on GitHub node + # generates empty sources + if source.text == None: + print("skipping empty source (???)") + continue + source_paths.append([source.text, 0]) + else: + print("Error: parse xml fail: no 'sources' in %s" %(xml_file)) + sys.exit(1) + if(root[1].tag == 'packages'): + if (self._args.verbose): + print("packages: " + str(root[1].attrib)) + else: + print("Error: parse xml fail: no 'packages' in %s" %(xml_file)) + sys.exit(1) + except Exception as err: + print("Error: parse xml fail in %s: %s" % (xml_file, str(err))) + if not self._args.keepGoing: + sys.exit(1) + return + + for package in root[1]: + # name="." means current directory + # name=".folder1.folder2" means external module or directory + # name="abc" means internal module or directory + pname = package.attrib['name'] + if self._args.verbose: + print("package: '%s'" % (pname)) + isExternal = (pname.startswith('.') and pname != '.') + #pdb.set_trace() + for classes in package: + for fileNode in classes: + name = fileNode.attrib['filename'] + if self._args.excludePatterns and any([fnmatch.fnmatchcase(name, ef) for ef in self._excludePatterns]): + if self._args.verbose: + print("%s is excluded" % name) + continue + if self._args.verbose > 1: + print(" file: %s" % (name)) + if not isExternal: + for s in source_paths: + if self._args.verbose > 1: + print(" check src_path (%s %d)" % (s[0], s[1])) + path = os.path.join(s[0], name) + if os.path.exists(path): + name = path + s[1] += 1 # this source path used for something + break + else: + print("did not find %s in search path" % (path)) + + self._outf.write("SF:%s\n" % name) + if self._versionScript: + cmd = copy.deepcopy(self._versionScript) + cmd.append(name) + try: + version = subprocess.check_output(cmd) + self._outf.write("VER:%s\n" % version.strip().decode('UTF-8')) + except Exception as err: + print("Error: no version for %s: %s" %( + name, str(err))) + if not self._args.keepGoing: + sys.exit(-1) + + self.process_file(fileNode, name) + self._outf.write("end_of_record\n") + + for s in source_paths: + if s[1] == 0: + print("Warning: XM file '%s': source_path '%s' is unused" %(xml_file, s[0])) + + + def process_file(self, fileNode, filename): + + sourceCode = None + if (self._args.checksum or + (self._isPython and self._args.deriveFunctions)): + try: + with open(filename, 'r') as f: + sourceCode = f.read().split('\n') + except: + feature = ' compute line checksum' if self._args.checksum else '' + if self._isPython and self._args.deriveFunctions: + if feature != '': + feature += ' or' + feature += ' derive function data' + + print("cannot open %s - unable to %s" % (filename, feature)); + if not self._args.keepGoing: + sys.exit(1) + + def count(indent): + count = 0 + for c in indent: + if c == ' ': + count += 1 + else: + assert(c == '\t') # shouldn't be anything but space or tab + count += self._args.tabWidth + return count + + def buildFunction(functions, objStack, currentObj, lastLine): + if currentObj and prevLine: + currentObj['end'] = lastLine # last line + prefix = '' + sep = '' + for e in objStack: + prefix += sep + e['name'] + sep = "::" if e['type'] == 'class' else '.' + if currentObj['type'] == 'def': + fullname = prefix + sep + currentObj['name'] + # function might be unreachable dead code + try: + hit = currentObj['hit'] + except: + hit = 0 + functions.append({'name' : fullname, + 'start' : currentObj['start'], + 'end' : currentObj['end'], + 'hit' : hit}) + + # just collect the function/class name - ignore the params + parseLine = re.compile(r'(\s*)((def|class)\s*([^\( \t]+))?') + #parseLine = re.compile(r'(\s*)((def|class)\s*([^:]+)(:|$))?') + + # no information about actual branch expressions/branch + # coverage - only the percentage and number hit/not hit + parseCondition = re.compile(r'\d+\% \((\d+)/(\d+)\)') + + functions = [] # list of [functionName startLine endLine hitcout] + for node in fileNode: + + if node.tag == 'methods': + # build function decls... + for method in node: + func = method.attrib['name'] + + # does this method contain any lines? + for lines in method: + assert(lines.tag == 'lines') + first = None + last = None + hit = 0 + if lines.tag == 'lines': + # might want to hang onto the method lines - and + # check that the 'lines' tag we find in the parent + # node contains all of the method lines we found + functionLines = {} + branches = {} + for l in lines: + lineNum = int(l.attrib['number']) + lineHit = int(l.attrib['hits']) + functionLines[lineNum] = lineHit + if first == None: + first = lineNum + last = lineNum + hit = lineHit + else: + assert(lineNum > last) + last = lineNum; + if 'branch' in l.attrib and 'true' == l.attrib['branch']: + assert('condition-coverage' in l.attrib) + m = parseCondition.search(l.attrib['condition-coverage']) + assert(m) + # [taken total] + branches[lineNum] = [m.group(1), m.group(2)] + + if first != None: + functions.append({'name' : func, + 'start' : first, + 'end' : last, + 'hit' : hit, + 'lines' : functionLines, + 'branches' : branches}) + elif self._args.verbose: + # there seem to be a fair few functions + # which contain no data + print("elided empty function %s" %(func)) + + continue + + if node.tag != 'lines': + print("not handling tag %s" %(node.tag)) + continue + + # Keep track of current function/class scope - which we use to find + # the first and last executable lines in each function, + # Want to keep track of the function end line - so we can use lcov + # function exclusions. + # currentObj: + # type: 'class' or 'def' + # name: as appears in regexp + # indent: indent count of 'def' or 'class' statement + # start: line of item (where 'def' or 'class' is found + # end: last line of function + # hit: whether first line of function is hit or not + currentObj = None # {type name startIndent lineNo first end start} + objStack = [] + prevLine = None + totals = { 'line' : [0, 0, 'LF', 'LH'], + 'branch' : [0, 0, 'BRF', 'BRH'], + 'function' : [0, 0, 'FNF', 'FNH'], + } + # need to save the statement data and print later because Coverage.py + # has an odd interpretation of the execution status of the function + # decl line. + # - C/C++ mark it executed if the line is entered - so it + # is an analog of function coverage. + # - Coverage.py appears to mark it executed when the containing + # scope is executed (i.e., when a lazy interpret might compile + # the function). + # However, we want to mark the decl executed only if the function + # is executed - and we decide that the function is executed if first + # line in the function is hit. + # - as a result, after seeing all the functions, we want to go back + # and mark the function decl line as 'not hit' if we decided that + # the function itself is not executed. + lineData = {} + for line in node: + lineNo = int(line.attrib['number']) + hit = int(line.attrib["hits"]) + lineData[lineNo] = hit; + + totals['line'][0] += 1 + if hit: + totals['line'][1] += 1 + if sourceCode and self._isPython: + # try to derive function names and begin/end lines in Python code + if lineNo <= len(sourceCode): + m = parseLine.search(sourceCode[lineNo-1]) + if m: + indent = count(m.group(1)) + #print(sourceCode[lineNo-1]) + while currentObj and indent <= currentObj['indent']: + # lower indent - so this is a new object + #print("build " + currentObj['name']) + buildFunction(functions, objStack, + currentObj, prevLine) + + try: + currentObj = objStack.pop() + except IndexError as err: + currentObj = None + break + + if m.group(2): + if currentObj: + objStack.append(currentObj) + objtype = m.group(3) + name = m.group(4).rstrip() + if (-1 != name.find('(') and + ')' != name[-1]): + name += '...)' + currentObj = { 'type': objtype, + 'name': name, + 'indent': indent, + 'start': lineNo, + } + else: + # just a line - may be the first executable + # line in some function: + if currentObj and not 'hit' in currentObj: + currentObj['hit'] = hit + # mark that function decl line is not + # hit if the function is not hit + if 0 == hit: + assert(currentObj['start'] in lineData) + lineData[currentObj['start']] = 0 + + prevLine = lineNo + else: + print('"%s":%d: Error: out of range: file contains %d lines' % ( + filename, lineNo, len(sourceCode))) + if not self._args.keepGoing: + sys.exit(1) + + if "branch" in line.attrib and line.attrib["branch"] == 'true': + # attrib is always true from xmlreport.py - but may not + # be true cobertura report + assert('condition-coverage' in line.attrib) + m = parseCondition.search(line.attrib['condition-coverage']) + assert(m) + taken = int(m.group(1)) + total = int(m.group(2)) + # no information of which clause is taken or not + # set taken conditions start from 0 and followed by + # non-taken conditions + # taken conditions + for cond in range(0,taken): + self._outf.write("BRDA:%d,0,%d,1\n" % (lineNo, cond)) + totals['branch'][0] += 1 + totals['branch'][1] += 1 + # non-taken conditions + for cond in range(taken, total): + totals['branch'][0] += 1 + self._outf.write("BRDA:%d,0,%d,0\n" % (lineNo, cond)) + + # and build all the pending functions + # these were still open when we hit the end of file - e.g., because + # they are last elements in some package file and there are no + # no executable lines after the function decl. + # There may be more than one function in the stack, if the last + # object is nested. + while currentObj: + buildFunction(functions, objStack, currentObj, prevLine) + + try: + currentObj = objStack.pop() + except IndexError as err: + currentObj = None + break + + # print the LCOV function data + idx = 0 + for f in functions: + totals['function'][0] += 1 + f['idx'] = idx + idx += 1 + if f['hit']: + totals['function'][1] += 1 + self._outf.write("FNL:%(idx)d,%(start)d,%(end)d\nFNA:%(idx)d,%(hit)d,%(name)s\n" % f) + # print the LCOV line data. + for lineNo in sorted(lineData.keys()): + checksum = '' + if self._args.checksum: + try: + checksum = ',' + line_hash(sourceCode[lineNo-1]) + except IndexError as err: + print('"%s":%d: unable to compute checksum for missing line' % (filename, lineNo)) + if not self._args.keepGoing: + raise(err) + + self._outf.write("DA:%d,%d%s\n" % (lineNo, lineData[lineNo], checksum)); + + # print the LCOV totals - not used by lcov, but maybe somebody does + for key in totals: + d = totals[key] + if d[0] == 0: + continue + self._outf.write("%s:%d\n" % (d[2], d[0])) + self._outf.write("%s:%d\n" % (d[3], d[1])) diff --git a/example/Makefile b/example/Makefile index 2f698a1b..3a5435e8 100644 --- a/example/Makefile +++ b/example/Makefile @@ -8,12 +8,43 @@ # CC := gcc -CFLAGS := -Wall -I. -fprofile-arcs -ftest-coverage +CFLAGS := -Wall -I. --coverage +LDFLAGS := --coverage -LCOV := ../bin/lcov -GENHTML := ../bin/genhtml -GENDESC := ../bin/gendesc -GENPNG := ../bin/genpng +LCOV_FLAGS = --branch-coverage + +ifneq ($(COVER_DB),) +export PERL_COVER_ARGS := -MDevel::Cover=-db,$(COVER_DB),-coverage,statement,branch,condition,subroutine,-silent,1 +EXEC_COVER := perl ${PERL_COVER_ARGS} +endif + +# MC/DC enabled in gcc/14 and above +ENABLE_MCDC=$(shell X=`$(CXX) -dumpversion` ; IFS='.' read -r -a VER <<< $$X ; if [ "$${VER[0]}" -ge 14 ] ; then echo 1 ; else echo 0 ; fi) + +ifeq ($(ENABLE_MCDC), 1) +CFLAGS += -fcondition-coverage +LDFLAGS += -fcondition-coverage +LCOV_FLAGS += --mcdc-coverage +endif + +# path differs depending on whether we are in the install dir or not +# run with "make LCOV_HOME=/path/to/my/release" if necessary + +ifeq ($(LCOV_HOME),) +LCOV_HOME := $(shell if [ -d ../bin ] ; then echo `realpath ..` ; else echo `realpath ../../..` ; fi) +endif + +EG_SRCDIR = $(shell if [ -d $(LCOV_HOME)/example ] ; then echo $(LCOV_HOME)/example ; else echo $(LCOV_HOME)/share/lcov/example ; fi) +# override this variable to write the differential testcase data someplace +# else +REPO = exampleRepo +BINDIR = $(LCOV_HOME)/bin +SCRIPTS = $(LCOV_HOME)/scripts +LCOV := $(EXEC_COVER) $(BINDIR)/lcov $(LCOV_FLAGS) +GENHTML := $(EXEC_COVER) $(BINDIR)/genhtml $(LCOV_FLAGS) +GENDESC := $(EXEC_COVER) $(BINDIR)/gendesc +GENPNG := $(EXEC_COVER) $(BINDIR)/genpng +SCRIPTDIR = $(shell if [ -d ../scripts ] ; then echo $(SCRIPTS) ; else echo $(LCOV_HOME)/share/lcov/support-scripts ; fi) # Depending on the presence of the GD.pm perl module, we can use the # special option '--frames' for genhtml @@ -25,12 +56,20 @@ else FRAMES := endif -.PHONY: clean output test_noargs test_2_to_2000 test_overflow +ifeq ("${V}","1") + echocmd= +else + echocmd=echo $1 ; +.SILENT: clean +endif + + +.PHONY: clean output test_noargs test_2_to_2000 test_overflow test_differential all: output example: example.o iterate.o gauss.o - $(CC) example.o iterate.o gauss.o -o example -lgcov + $(CC) $^ -o $@ $(LDFLAGS) example.o: example.c iterate.h gauss.h $(CC) $(CFLAGS) -c example.c -o example.o @@ -41,7 +80,7 @@ iterate.o: methods/iterate.c iterate.h gauss.o: methods/gauss.c gauss.h $(CC) $(CFLAGS) -c methods/gauss.c -o gauss.o -output: example descriptions test_noargs test_2_to_2000 test_overflow +output: example descriptions test_noargs test_2_to_2000 test_overflow test_differential @echo @echo '*' @echo '* Generating HTML output' @@ -56,11 +95,21 @@ output: example descriptions test_noargs test_2_to_2000 test_overflow @echo '* See '`pwd`/output/index.html @echo '*' @echo + @echo "Generate HTML with hierarchical report and additional navigation features" + @echo '*' + $(GENHTML) trace_noargs.info trace_args.info trace_overflow.info \ + --output-directory hierarchical \ + --title "Basic example - hierarchical" \ + --show-details --hierarchical --show-navigation \ + --description-file descriptions $(FRAMES) \ + --legend + @echo + @echo '* See '`pwd`/hierarchical/index.html descriptions: descriptions.txt $(GENDESC) descriptions.txt -o descriptions -all_tests: example test_noargs test_2_to_2000 test_overflow +all_tests: example test_noargs test_2_to_2000 test_overflow differential test_noargs: @echo @@ -92,7 +141,92 @@ test_overflow: ./example 0 100000 || true $(LCOV) --capture --directory . --output-file trace_overflow.info --test-name "test_overflow" --no-external -clean: - rm -rf *.o *.bb *.bbg *.da *.gcno *.gcda *.info output example \ - descriptions +# This test pretends to write some code, run some tests, then modify +# code and rerun some tests. +# The differential coverage report is categorizes the code based on +# what changed in the source code and what changed in the tests. +# Note that we are suing perl module for callbacks here - but could use +# script or executable if desired. +# Could use --verbose and --quiet flags to make the execution less noisy - +# but verbosity can help to debug configuration issues. +# See the lcov README and man pages for more details. +test_differential: + @echo + @echo '*' + @echo '* Test case 4: differential coverage example' + @echo '*' + @echo + @echo "Step 1: Initialize GIT repo with example source" + @$(RM) -rf $(REPO) + @git init $(REPO) ; + @(cd $(REPO) ; \ + cp -f $(EG_SRCDIR)/*.h $(EG_SRCDIR)/example.c . ; \ + mkdir methods ; \ + cp -f $(EG_SRCDIR)/methods/gauss.c methods ; \ + cp -f $(EG_SRCDIR)/methods/iterate.c methods ; \ + find . \( -name '*.h' -o -name '*.c' \) -exec git add {} \; ; \ + git commit -m baseline ; git tag baseline ) + @echo "Build example and run some tests" + (cd $(REPO) ; \ + $(CC) -o example $(CFLAGS) -I. example.c methods/gauss.c methods/iterate.c ; \ + ./example ; \ + ./example 2 1000 ; \ + ./example 2 100000 || true ) + + @echo "Step 2: Capture initial coverage" + (cd $(REPO) ; \ + $(LCOV) --capture -o baseline.info -d . --version-script $(SCRIPTDIR)/gitversion.pm ) + + @echo "Step 3: Modify source code" + @(cd $(REPO) ; \ + cp -f $(EG_SRCDIR)/example_mod.c example.c ; \ + cp -f $(EG_SRCDIR)/methods/iterate_mod.c methods/iterate.c ; \ + git add example.c methods/iterate.c ; \ + git commit -m current ; \ + git tag current ) + + @echo "Step 4: Rebuild and run tests (fewer tests this time)" + ( cd $(REPO) ; \ + find . -name '*.gc*' -delete ; \ + $(CC) -o example $(CFLAGS) -I. example.c methods/gauss.c methods/iterate.c ; \ + ./example ) + + @echo "Step 5: Capture new coverage (after source changes)" + (cd $(REPO) ; \ + $(LCOV) --capture -o current.info -d . --version-script $(SCRIPTDIR)/gitversion.pm ) + @echo "Compute source diffs" + (cd $(REPO) ; \ + $(SCRIPTDIR)/gitdiff -b . `git rev-list -n 1 baseline` \ + `git rev-list -n 1 current` > udiff.txt ) + + @echo "Step 6: Generate differential coverage report" + @echo " (caching revision control data may result in improved runtime performance)" + (cd $(REPO) ; \ + $(GENHTML) -o differential --baseline-file baseline.info \ + --diff-file udiff.txt --show-owners \ + --title "Differential coverage example" \ + --annotate $(SCRIPTDIR)/gitblame.pm,--cache,./my_cache \ + --version-script $(SCRIPTDIR)/gitversion.pm \ + -- current.info ) + + @echo "point your browser to `realpath $(REPO)`/differential/index.html" + + @echo "Step 7: Generate subset report for code review:" + @echo " (reuse revision control data cached in previous step)" + (cd $(REPO) ; \ + $(GENHTML) -o review --baseline-file baseline.info \ + --diff-file udiff.txt --show-owners \ + --title "Differential report for code review" \ + --annotate $(SCRIPTDIR)/gitblame.pm,--cache,./my_cache \ + --version-script $(SCRIPTDIR)/gitversion.pm \ + --select-script $(SCRIPTDIR)/select.pm \ + --select-script --tla --select-script UNC,UIC,LBC \ + -- current.info ) + + @echo "point your browser to `realpath $(REPO)`/review/index.html" + +clean: + $(call echocmd," CLEAN lcov/example") + rm -rf *.o *.bb *.bbg *.da *.gcno *.gcda *.info output example descriptions \ + $(REPO) hierarchical diff --git a/example/README b/example/README index cf6cf2e4..1a8f9c24 100644 --- a/example/README +++ b/example/README @@ -1,6 +1,62 @@ +To see some examples of LCOV generated HTML coverage reports, +and point a web browser into the resulting reports: -To get an example of how the LCOV generated HTML output looks like, -type 'make output' and point a web browser to the resulting file +Default view: - output/index.html + - Point your browser to + output/index.html +Hierarchical view: + + - Point your browser to + hierarchical/index.html + + - Note that that the coverage data is the same - only the report + format is different: + + - Follows directory structure, similar to MS file viewer + ('--hierarchical' flag) + + - Additional navigation links also enabled + ('--show-navigation' flag) + +Differential coverage: + + - Point your browser to + exampleRepo/differential/index.html + + - This example is slightly complicated because it emulates a moderately + realistic project in that it pretends to see project changes: + + - updates to two project source files example.c and iterate.c + + - change to the test suite: only one test of updated + code rather than 3 of the original code + + - The Makefile simulates this by checking code into a git repo, + building an executable and then updating a few source files, rebuildling, + and running some tests. + +Code review: + + - point your browser to + exampleRepo/differential/index.html + + - This example builds on the "Differential coverage" example, above + to emulate a possible code review methodology in which adds code + coverage to the review criteria. + The intent is to generate a reduced report which shows only the + code changes which negatively affect code coverage - while removing + other details which only distract from the review. + + - Use the 'genhtml --select-script ...' feature to show only new + source code which was negatively affected by the change under + review (uncovered and/or lost code). + You might want to modify the select criteria to include positive + change (e.g., GNC, GBC, and GIC categories). + + - Real use cases are likely to use more sophisticated select-script + callbacks (e.g., to select from a range of changelists). + +Feel free to edit the Makefile or to run the lcov utilities directly, +to see the effect of other options that you find in the lcov man pages. diff --git a/example/example.c b/example/example.c index f9049aa6..b3f2fd46 100644 --- a/example/example.c +++ b/example/example.c @@ -28,33 +28,33 @@ static int end = 9; int main (int argc, char* argv[]) { - int total1, total2; + int total1, total2; - /* Accept a pair of numbers as command line arguments. */ + /* Accept a pair of numbers as command line arguments. */ - if (argc == 3) - { - start = atoi(argv[1]); - end = atoi(argv[2]); - } + if (argc == 3) + { + start = atoi(argv[1]); + end = atoi(argv[2]); + } - /* Use both methods to calculate the result. */ + /* Use both methods to calculate the result. */ - total1 = iterate_get_sum (start, end); - total2 = gauss_get_sum (start, end); + total1 = iterate_get_sum (start, end); + total2 = gauss_get_sum (start, end); - /* Make sure both results are the same. */ + /* Make sure both results are the same. */ - if (total1 != total2) - { - printf ("Failure (%d != %d)!\n", total1, total2); - } - else - { - printf ("Success, sum[%d..%d] = %d\n", start, end, total1); - } + if (total1 != total2) + { + printf ("Failure (%d != %d)!\n", total1, total2); + } + else + { + printf ("Success, sum[%d..%d] = %d\n", start, end, total1); + } - return 0; + return 0; } diff --git a/example/example_mod.c b/example/example_mod.c new file mode 100644 index 00000000..3d08bd82 --- /dev/null +++ b/example/example_mod.c @@ -0,0 +1,48 @@ + /* + * example_mod.c + * + * Identical behaviour to 'example.c' - but with some trivial code changes + * (including this change to comment section) - to create a few differences, + * for differential coverage report example. + * + */ + +#include +#include +#include "iterate.h" +#include "gauss.h" + +static int start = 0; +static int end = 9; + + +int main (int argc, char* argv[]) +{ + int total1, total2; + + /* Accept a pair of numbers as command line arguments. */ + + if (argc == 3) + { + start = atoi(argv[argc-2]); + end = atoi(argv[argc-1]); + } + + + /* Use both methods to calculate the result. */ + + total1 = iterate_get_sum (start, end); + total2 = gauss_get_sum (start, end); + + + /* Make sure both results are the same. */ + + if (total1 == total2) + { + + printf ("Success, sum[%d..%d] = %d\n", start, end, total1); + return 0; + } + printf ("Failure (%d != %d)!\n", total1, total2); + return 1; +} diff --git a/example/methods/gauss.c b/example/methods/gauss.c index 9da3ce50..cb9e98f3 100644 --- a/example/methods/gauss.c +++ b/example/methods/gauss.c @@ -37,12 +37,12 @@ int gauss_get_sum (int min, int max) { - /* This algorithm doesn't work well with invalid range specifications - so we're intercepting them here. */ - if (max < min) - { - return 0; - } + /* This algorithm doesn't work well with invalid range specifications + so we're intercepting them here. */ + if (max < min) + { + return 0; + } - return (int) ((max + min) * (double) (max - min + 1) / 2); + return (int) ((max + min) * (double) (max - min + 1) / 2); } diff --git a/example/methods/iterate.c b/example/methods/iterate.c index 023d1801..e98d8a6f 100644 --- a/example/methods/iterate.c +++ b/example/methods/iterate.c @@ -1,4 +1,4 @@ -/* + /* * methods/iterate.c * * Calculate the sum of a given range of integer numbers. @@ -13,33 +13,45 @@ #include #include +#include #include "iterate.h" +void test_data_logging(int, int); int iterate_get_sum (int min, int max) { - int i, total; + int i, total; - total = 0; + test_data_logging(min, max); - /* This is where we loop over each number in the range, including - both the minimum and the maximum number. */ + total = 0; - for (i = min; i <= max; i++) - { - /* We can detect an overflow by checking whether the new - sum would become negative. */ + /* This is where we loop over each number in the range, including + both the minimum and the maximum number. */ - if (total + i < total) - { - printf ("Error: sum too large!\n"); - exit (1); - } + for (i = min; i <= max; i++) + { + /* We can detect an overflow by checking whether the new + sum would exceed the maximum integer value. */ - /* Everything seems to fit into an int, so continue adding. */ + if (total > INT_MAX - i) + { + printf ("Error: sum too large!\n"); + exit (1); + } - total += i; - } + /* Everything seems to fit into an int, so continue adding. */ - return total; + total += i; + } + + return total; +} + +void +test_data_logging(int min, int max) +{ + (void)min; /* quiet compiler complaints */ + (void)max; + printf("this is some debug data logging code that gets removed in the final product\n"); } diff --git a/example/methods/iterate_mod.c b/example/methods/iterate_mod.c new file mode 100644 index 00000000..a2fc9c9d --- /dev/null +++ b/example/methods/iterate_mod.c @@ -0,0 +1,38 @@ +/* + * methods/iterate_mod.c + * + * identical to 'iterate.c', but with some trivial code changs to create + * differences - for differential coverage report. + * + */ + +#include +#include +#include +#include "iterate.h" + + +int iterate_get_sum (int min, int max) +{ + int total = 0; + + /* This is where we loop over each number in the range, including + both the minimum and the maximum number. */ + + /* just reverse iteration order */ + for (int i = max; i >= min; total += i , --i) + { + /* We can detect an overflow by checking whether the new + sum would exceed the maximum integer value. */ + + if (total > INT_MAX - i) + { + printf ("Error: sum too large!\n"); + exit (1); + } + + /* Everything seems to fit into an int, so continue adding. */ + } + + return total; +} diff --git a/lcovrc b/lcovrc index d64688ef..7a18354e 100644 --- a/lcovrc +++ b/lcovrc @@ -4,23 +4,132 @@ # To change settings for a single user, place a customized copy of this file # at location ~/.lcovrc # +# Note that this example script does not include all configuration options +# see 'man lcovrc(5) for a complete list and usage description. + +# include some other config file +# e.g, user-specific options. Note the environment variable expansion +# config_file = $ENV{HOME}/.user_lcovrc +# or project specific - hard-coded from environment variable +# config_file = /path/to/myproject/.lcovrc +# or in the current run directory +# config_file = $ENV{PWD}/.lcovrc # Specify an external style sheet file (same as --css-file option of genhtml) #genhtml_css_file = gcov.css -# Specify coverage rate limits (in %) for classifying file entries +# use 'dark' mode display (light foreground, dark background) instead of default +# same as 'genhtml --dark-mode ....' +#genhtml_dark_mode = 1 + +# Header text to use at top of each page +# Default is "LCOV - (differential )? coverage report" +#genhtml_header = Coverage report for my project + +# Footer text to use at the bottom of each page +# Default is LCOV tool version +#genhtml_footer = My footer text + +# Specify global coverage rate limits (in %) for classifying file entries # HI: hi_limit <= rate <= 100 graph color: green # MED: med_limit <= rate < hi_limit graph color: orange # LO: 0 <= rate < med_limit graph color: red genhtml_hi_limit = 90 genhtml_med_limit = 75 +# Specify line coverage rate limits (in %) for classifying file entries +# HI: ln_hi_limit <= rate <= 100 graph color: green +# MED: ln_med_limit <= rate < ln_hi_limit graph color: orange +# LO: 0 <= rate < ln_med_limit graph color: red +# genhtml_line_hi_limit = 90 +# genhtml_line_med_limit = 75 + +# Specify function coverage rate limits (in %) for classifying file entries +# HI: fn_hi_limit <= rate <= 100 graph color: green +# MED: fn_med_limit <= rate < fn_hi_limit graph color: orange +# LO: 0 <= rate < fn_med_limit graph color: red +# genhtml_function_hi_limit = 90 +# genhtml_function_med_limit = 75 + +# Specify branch coverage rate limits (in %) for classifying file entries +# HI: br_hi_limit <= rate <= 100 graph color: green +# MED: br_med_limit <= rate < br_hi_limit graph color: orange +# LO: 0 <= rate < br_med_limit graph color: red +# genhtml_branch_hi_limit = 90 +# genhtml_branch_med_limit = 75 + +# Ignore some errors during geninfo/lcov/genhtml processing - comma-separated +# string. Same as using "--ignore-errors a,b,c" command line option. +# See man pages for list of ignorable messages +#ignore_errors = empty,mismatch + +# Stop emitting message after this number have been printed +# 0 == no limit +max_message_count = 100 + +# If set, do not stop when an 'ignorable error' occurs - try to generate +# a result, however flawed. This is equivalent to the '--keep-going' +# command line switch. +# Default is 1: stop when error occurs +# See the man pages for more detail +#stop_on_error = 1 + +# If nonzero, treat warnings as error +# note that ignored messages will still appear as warnings +# Default is 0 +#treat_warning_as_error = 1 + +# If set to non-zero, only issue particluar warning once per file +# Default is 1 +#warn_once_per_file = 1 + +# extension associated with lcov trace files - glob match pattern +# used as argument to 'find' - to find coverage files contained in +# a directory argument +#info_file_pattern = *.info + +# List of file extensions which should be treated as RTL code (e.g., Verilog) +# Comma-separated list. Default is "v,vh,sv,vhd?" +#rtl_file_extensions = v,vh,sv,vhd? + +# list of file extensions which should be treated as C/C++ code +# (comma-separated list) +#c_file_extensions = h,c,i,C,H,I,cpp,hpp,icc,cc,hh,cxx,hxx + +# list of file extensions which should be treated as Java code +#java_file_extensions = java + +# list of file extensions which should be treated as perl code +#perl_file_extensions = pl,pm + +# list of file extensions which should be treated as python code +#python_file_extensions = py + +# maximum number of lines to look at, when filtering bogus branch expressions +#filter_lookahead = 5 + +# if nonzero, bitwise operators '|', '&', '~' indicate conditional expressions +#filter_bitwise_conditional = 1 + +# if nonzero, '--filter blank' is applied to blank lines, regardless +# of their hit count +#filter_blank_aggressive = 1 + # Width of line coverage field in source code view genhtml_line_field_width = 12 # Width of branch coverage field in source code view genhtml_branch_field_width = 16 +# Width of MC/DC coverage field in source code view +genhtml_mcdc_field_width = 16 + +# width of 'owner' field in source code view - default is 20 +#genhtml_owner_field_width = 20 + +# width of 'age' field in source code view - default is 5 +#genhtml_age_field_width = 5 + # Width of overview image (used by --frames option of genhtml) genhtml_overview_width = 80 @@ -52,10 +161,6 @@ genhtml_no_source = 0 # option of genhtml) genhtml_num_spaces = 8 -# Highlight lines with converted-only data if non-zero (same as --highlight -# option of genhtml) -genhtml_highlight = 0 - # Include color legend in HTML output if non-zero (same as --legend option of # genhtml) genhtml_legend = 0 @@ -79,13 +184,21 @@ genhtml_legend = 0 # genhtml) genhtml_sort = 1 -# Include function coverage data display (can be disabled by the -# --no-func-coverage option of genhtml) -#genhtml_function_coverage = 1 +# Display coverage idata in hierarchical directory structure rather than +# top/directory/source +#genhtml_hierarchical = 1 + +# Display coverage data using 'flat' view: top-level table holds all +# files with no intermediate directory pages. +#genhtml_flat_view = 1 -# Include branch coverage data display (can be disabled by the -# --no-branch-coverage option of genhtml) -#genhtml_branch_coverage = 1 +# Enable hyperlinks from coverage summary table to first source code line +# in corresponding category ('Hit' or "Missed') in non-differential report. +# Feature is always enabled in differential coverage report. +#genhtml_show_navigation = 1 + +# If nonzero, add column to "function coverage detail" table to show the proportion of lines and branches within the function which are exercised. +#genhtml_show_function_proportion = 0 # Specify the character set of all generated HTML pages genhtml_charset=UTF-8 @@ -99,13 +212,36 @@ genhtml_desc_html=0 # Show missed counts instead of hit counts #genhtml_missed=1 +# group function aliases in report - see '--merge' section in man(1) genhtml +#merge_function_aliasess = 1 + +# If set, suppress list of aliases in function detail table +#suppress_function_aliases = 1 + +# If set, derive function end line from line coverpoint data - default ON +#derive_function_end_line = 1 + +# If set, derive function end lines for all file types. +# By default, we derive end lines for C/C++ files only +#derive_end_line_all_files = 0 + +# Maximum size of function (number lines) which will be checked by '--filter trivial' +#trivial_function_threshold = 5 + +# Set threshold for hit count which tool should deem likely to indicate +# a toolchain bug (corrupt coverage data) +# excessive_count_theshold = number + # Demangle C++ symbols -#genhtml_demangle_cpp=1 +# Call multiple times to specify command and command line arguments +# ('-Xlinker'-like behaviour) +#demangle_cpp = c++filt # Name of the tool used for demangling C++ function names +# This argument is deprecated - please use 'demangle_cpp' instead #genhtml_demangle_cpp_tool = c++filt - # Specify extra parameters to be passed to the demangling tool +# This argument is deprecated - please use 'demangle_cpp' instead #genhtml_demangle_cpp_params = "" # Location of the gcov tool (same as --gcov-info option of geninfo) @@ -114,14 +250,21 @@ genhtml_desc_html=0 # Adjust test names to include operating system information if non-zero #geninfo_adjust_testname = 0 +# Ignore testcase names in .info file +#forget_testcase_names = 0 + # Calculate checksum for each source code line if non-zero (same as --checksum # option of geninfo if non-zero, same as --no-checksum if zero) -#geninfo_checksum = 1 +#checksum = 1 # Specify whether to capture coverage data for external source files (can # be overridden by the --external and --no-external options of geninfo/lcov) #geninfo_external = 1 +# Specify whether to capture coverage data from compile-time data files +# which have no corresponding runtime data. +#geninfo_capture_all = 1 + # Enable libtool compatibility mode if non-zero (same as --compat-libtool option # of geninfo if non-zero, same as --no-compat-libtool if zero) #geninfo_compat_libtool = 0 @@ -129,7 +272,11 @@ genhtml_desc_html=0 # Use gcov's --all-blocks option if non-zero #geninfo_gcov_all_blocks = 1 -# Specify compatiblity modes (same as --compat option of geninfo). +# Adjust 'executed' non-zero hit count of lines which contain no branches +# and have attribute '"unexecuted_blocks": true' +#geninfo_unexecuted_blocks = 0 + +# Specify compatibility modes (same as --compat option of geninfo). #geninfo_compat = libtool=on, hammer=auto, split_crc=auto # Adjust path to source files by removing or changing path components that @@ -144,7 +291,7 @@ geninfo_auto_base = 1 geninfo_intermediate = auto # Specify if exception branches should be excluded from branch coverage. -geninfo_no_exception_branch = 0 +no_exception_branch = 0 # Directory containing gcov kernel files # lcov_gcov_dir = /proc/gcov @@ -174,8 +321,212 @@ lcov_list_width = 80 # lcov_list_full_path is non-zero. lcov_list_truncate_max = 20 -# Specify if function coverage data should be collected and processed. -lcov_function_coverage = 1 +# Specify if function coverage data should be collected, processed, and displayed +function_coverage = 1 + +# Specify if branch coverage data should be collected, processed, and displayed +branch_coverage = 0 + +# Specify if MC/DC coverage data should be collected, processed, and displayed +mcdc_coverage = 0 + +# Ask lcov/genhtml/geninfo to return non-zero exit code if branch coverage is below +# threshold +# Default is 0.0 - i.e., do not check threshold. +#fail_under_branches = 75.0 + +# Ask lcov/genhtml/geninfo to return non-zero exit code if line coverage is below +# threshold +# Default is 0.0 - i.e., do not check threshold. +#fail_under_lines = 97.5 + +# specify revision control annotation script for genhtml +#genhtml_annotate_script = path_to_my_executable + +# specify the annotation popup that will appear if user hovers over source code +# set to empty to elide tooltip +# Substitutions: +# %d: line number +# %C: commit ID (from annotate callback - see --anotate-script entry in +# genhtml documentation) +# %U: commit author abbreviated name +# %F: commit author full name +# %D: commit date (as returned by annotate callback) +# %d: simplified commit date - yyyy-mm-dd part only (remove time of day) +# %A: commit age +# %l: line number +#genhtml_annotate_tooltip = Commit ID: %C by %F on %D + +# specify coverage criteria script for genhtml/lcov/geninfo +#criteria_script = path_to_my_executable_or_module + +# specify whether you want date and/or owner information passed to your criteria +# callback - in addition to summary information, which is always returned +#criteria_callback_data = date,owner + +# specify when the criteria callback will be executed +#criteria_callback_levels = top,directory,file + +# specify callback to decide whether particular source line should be +# included or suppresses +# select_script = path_to_exe | callback_parameter + +# specify number of lines of context to include around each selected +# source line +# num_context_lines = 5 + +# specify age cutpoints for genhtml date bins +# can call option multiple times or use comma-separted list (or both) +#genhtml_date_bins = 7 +#genhtml_date_bins = 30,180 + +# if enabled, show 'author' coverpoint summary table in page headers +# if not 'all', then show only authors who are responsible for coverpoints +# which are not hit +#genhtml_show_owner_table = 0 | 1 | all + +# truncate owner table to show at most this many authors - +# subset will be those with the highest count of un-exercised code +# owner_table_entries = 5 + +# truncate the owner table at this level/these levels only +# possible values are 'file', 'directory', 'top' (without quotes) +# option may be specified multiple times and/or as a comma-separated list +# truncate_owner_table = top,directory,file + +# if enabled, show author of non-code source lines (e.g., comments) +# in 'source' detail column +#genhtml_show_noncode_owners = 0 | 1 + +# if enabled, generate fake source code for missing files and out-of-range +# lines +#genhtml_synthesize_missing = 0 | 1 + + +# ask geninfo/lcov/genhtml to include only certain files (glob match pattern) +# call multiple times to specify multiple patterns +#include = my/files/*.c +#include = my/files/*.h + +# ask geninfo/lcov/genhtml to exclude certain files (glob match pattern) +# call multiple times to specify multiple patterns +#exclude = your/files/*.c +#exclude = your/files/*.h + +# ask geninfo/lcov/genhtml to munge file pathnames when reading/writing .info files +# call multiple times to specify multiple patterns +# this pattern removes "/.lib" from the pathname (see the lcov man page for details) +#substitute = s#/.libs##g +# this pattern changes "/tmp/build" to "/usr/src" (see the lcov man page for details) +#substitute = s#/tmp/build#/usr/src#g + +# ask geninfo/lcov/genhtml to exclude coverpoints associated with lines which match +# the regexp. Call multiple times to specify multiple patterns +# omit lines which end with "// MY_LINE_EXCLUDE_MARKER +#omit_lines = .+?//\s*MY_LINE_EXCLUDE_MARKER\s* +# add another exclusion marker +#omit_lines = .+?//\s*MY_PROJECT_EXCLUDE_MARKER\s* + +# ask geninfo/lcov/genhtml to exclude coverpoints whose lines are within +# the start/end of a function matching the regexp. +#Call multiple times to specify multiple patterns +#erase_functions = main +# and another - note that this looks like a demangled C++ function +# that the argument is a regexp - so special characters need to be escaped +#erase_functions = debug)_.*\(int,\s*int\) + +# check that source file exists before calling version-script, annotate-script +# or other callback. If set and file does not exist, 'source' error is +# generated. Default: 1 (check for existence). +#check_existence_before_callback = 1 + +# specify path to version identification script +#version_script = path_to_my_executable + +# tell the tool to generate missing file version information when +# readind coverage DB file +# compute_file_version = 0 | 1 + +# specify path to file pathname resolution script +#resolve_script = path_to_my_executable + +# Specify JSON module to use, or choose best available if set to auto +lcov_json_module = auto + +# Specify maximum number of parallel slaves +# default: 1 (no parallelism) +#parallel = 1 + +# Specify maximum memory to use during parallel processing, in Mb. +# Do not fork if estimated memory consumption exceeds this number. +# default: 0 (no limit) +#memory = 1024 + +# Specify the number of consecutive fork() failures to allow before +# giving up +# max_fork_fails = 5 + +# Throttling control: the maximum number of files that genhtml will +# handle in a single (parallel) thread +# max_tasks_per_core = 20 + +# Seconds to wait after failing to fork() before retrying +# fork_fail_timeout = 10 + +# Throttling control: specify a percentage of system memory to use as +# maximum during parallel processing. +# Do not fork if estimated memory consumption exceeds the maximum. +# this value is used only if the maximum memory is not set. +# default: not set +#memory_percentage = 1024 + + +# Character used to split list-type parameters +# - for example, the list of "--ignore_errors source,mismatch" +# default: , (comma) +#split_char = , + +# use case insensitive compare to find matching files, for include/exclude +# directives, etc +#case_insensitive = 0 + +# sort file names before capture and/or aggregation - to reduce certain +# types of processing order dependencies which can affect coverage results. +# see the lcovrc man page for more details. +#sort_input = 1 + +# override line default line exclusion regexp +#lcov_excl_line = LCOV_EXCL_LINE + +# override branch exclusion regexp +#lcov_excl_br_line = LCOV_EXCL_BR_LINE + +# override exception branch exclusion regexp +#lcov_excl_exception_br_line = LCOV_EXCL_EXCEPTION_BR_LINE + +# override start of exclude region regexp +#lcov_excl_start = LCOV_EXCL_START + +# override end of exclude region regexp +#lcov_excl_stop = LCOV_EXCL_STOP + +# override unreachable line default line exclusion regexp +#lcov_unreachable_line = LCOV_UNREACHABLE_LINE + +# override start of unreachable region regexp +#lcov_unreachable_start = LCOV_UNREACHABLE_START + +# override end of unreachable region regexp +#lcov_unreachable_stop = LCOV_UNREACHABLE_STOP + +# override start of branch exclude region regexp +#lcov_excl_br_start = LCOV_EXCL_BR_START + +# override start of exclude region regexp +#lcov_excl_br_stop = LCOV_EXCL_BR_STOP + +# override start of exclude region regexp +#lcov_excl_exception_br_start = LCOV_EXCL_EXCEPTION_BR_START -# Specify if branch coverage data should be collected and processed. -lcov_branch_coverage = 0 +# override start of exclude region regexp +#lcov_excl_exception_br_stop = LCOV_EXCL_EXCEPTION_BR_STOP diff --git a/lib/lcovutil.pm b/lib/lcovutil.pm new file mode 100644 index 00000000..68baa166 --- /dev/null +++ b/lib/lcovutil.pm @@ -0,0 +1,9743 @@ +# some common utilities for lcov-related scripts + +use strict; +use warnings; +require Exporter; + +package lcovutil; + +use File::Path qw(rmtree); +use File::Basename qw(basename dirname); +use File::Temp qw /tempdir/; +use File::Spec; +use Scalar::Util qw/looks_like_number/; +use Cwd qw/abs_path getcwd/; +use Storable qw(dclone); +use Capture::Tiny; +use Module::Load::Conditional qw(check_install); +use Digest::MD5 qw(md5_base64); +use FindBin; +use Getopt::Long; +use DateTime; +use Config; +use POSIX; +use Fcntl qw(:flock SEEK_END); + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw($tool_name $tool_dir $lcov_version $lcov_url $VERSION + @temp_dirs set_tool_name + info warn_once set_info_callback init_verbose_flag $verbose + debug $debug + append_tempdir create_temp_dir temp_cleanup folder_is_empty $tmp_dir $preserve_intermediates + summarize_messages define_errors + parse_ignore_errors ignorable_error ignorable_warning + is_ignored message_count explain_once + die_handler warn_handler abort_handler + + $maxParallelism $maxMemory init_parallel_params current_process_size + $memoryPercentage $max_fork_fails $fork_fail_timeout + save_profile merge_child_profile save_cmd_line + + @opt_rc apply_rc_params $split_char parseOptions + strip_directories + @file_subst_patterns subst_file_name + @comments + + $br_coverage $func_coverage $mcdc_coverage + @cpp_demangle do_mangle_check $demangle_cpp_cmd + $cpp_demangle_tool $cpp_demangle_params + get_overall_line rate + + $FILTER_BRANCH_NO_COND $FILTER_FUNCTION_ALIAS + $FILTER_EXCLUDE_REGION $FILTER_EXCLUDE_BRANCH $FILTER_LINE + $FILTER_LINE_CLOSE_BRACE $FILTER_BLANK_LINE $FILTER_LINE_RANGE + $FILTER_TRIVIAL_FUNCTION $FILTER_DIRECTIVE + $FILTER_MISSING_FILE $FILTER_INITIALIZER_LIST + $FILTER_EXCEPTION_BRANCH $FILTER_ORPHAN_BRANCH + @cov_filter + $EXCL_START $EXCL_STOP $EXCL_BR_START $EXCL_BR_STOP + $EXCL_EXCEPTION_BR_START $EXCL_EXCEPTION_BR_STOP + $EXCL_LINE $EXCL_BR_LINE $EXCL_EXCEPTION_LINE + $UNREACHABLE_START $UNREACHABLE_STOP $UNREACHABLE_LINE + @exclude_file_patterns @include_file_patterns %excluded_files + @omit_line_patterns @exclude_function_patterns $case_insensitive + munge_file_patterns warn_file_patterns transform_pattern + parse_cov_filters summarize_cov_filters + disable_cov_filters reenable_cov_filters is_filter_enabled + filterStringsAndComments simplifyCode balancedParens + set_extensions + $source_filter_lookahead $source_filter_bitwise_are_conditional + $exclude_exception_branch + $derive_function_end_line $derive_function_end_line_all_files + $trivial_function_threshold + $filter_blank_aggressive + + $lcov_filter_parallel $lcov_filter_chunk_size + + %lcovErrors $ERROR_GCOV $ERROR_SOURCE $ERROR_GRAPH $ERROR_MISMATCH + $ERROR_BRANCH $ERROR_EMPTY $ERROR_FORMAT $ERROR_VERSION $ERROR_UNUSED + $ERROR_PACKAGE $ERROR_CORRUPT $ERROR_NEGATIVE $ERROR_COUNT $ERROR_PATH + $ERROR_UNSUPPORTED $ERROR_DEPRECATED $ERROR_INCONSISTENT_DATA + $ERROR_CALLBACK $ERROR_RANGE $ERROR_UTILITY $ERROR_USAGE $ERROR_INTERNAL + $ERROR_PARALLEL $ERROR_PARENT $ERROR_CHILD $ERROR_FORK + $ERROR_EXCESSIVE_COUNT $ERROR_MISSING $ERROR_UNREACHABLE + report_parallel_error report_exit_status check_parent_process + report_unknown_child + + $ERROR_UNMAPPED_LINE $ERROR_UNKNOWN_CATEGORY $ERROR_ANNOTATE_SCRIPT + $stop_on_error + + @extractVersionScript $verify_checksum $compute_file_version + + configure_callback cleanup_callbacks + + is_external @internal_dirs $opt_no_external @build_directory + $default_precision check_precision + + system_no_output $devnull $dirseparator + + %tlaColor %tlaTextColor use_vanilla_color %pngChar %pngMap + %dark_palette %normal_palette parse_w3cdtf +); + +our @ignore; +our @message_count; +our @expected_message_count; +our %message_types; +our $message_log; +our $message_filename; +our $suppressAfter = 100; # stop warning after this number of messages +our %ERROR_ID; +our %ERROR_NAME; +our $tool_dir = "$FindBin::RealBin"; +our $tool_name = basename($0); # import from lcovutil module +our $VERSION = `"$tool_dir"/get_version.sh --full`; +chomp($VERSION); +our $lcov_version = 'LCOV version ' . $VERSION; +our $lcov_url = "https://github.com//linux-test-project/lcov"; +our @temp_dirs; +our $tmp_dir = '/tmp'; # where to put temporary/intermediate files +our $preserve_intermediates; # this is useful only for debugging +our $sort_inputs; # sort input file lists - to reduce unpredictability +our $devnull = File::Spec->devnull(); # portable way to do it +our $dirseparator = ($^O =~ /Win/) ? '\\' : '/'; +our $interp = ($^O =~ /Win/) ? $^X : undef; + +our $debug = 0; # if set, emit debug messages +our $verbose = 0; # default level - higher to enable additional logging + +our $split_char = ','; # by default: split on comma + +# share common definition for all error types. +# Note that geninfo cannot produce some types produced by genhtml, and vice +# versa. Easier to maintain a common definition. +our $ERROR_GCOV; +our $ERROR_SOURCE; +our $ERROR_GRAPH; +our $ERROR_FORMAT; # bad record in .info file +our $ERROR_EMPTY; # no records found in info file +our $ERROR_VERSION; +our $ERROR_UNUSED; # exclude/include/substitute pattern not used +our $ERROR_MISMATCH; +our $ERROR_BRANCH; # branch numbering is not correct +our $ERROR_PACKAGE; # missing utility package +our $ERROR_CORRUPT; # corrupt file +our $ERROR_NEGATIVE; # unexpected negative count in coverage data +our $ERROR_COUNT; # too many messages of type +our $ERROR_UNSUPPORTED; # some unsupported feature or usage +our $ERROR_PARALLEL; # error in fork/join +our $ERROR_DEPRECATED; # deprecated feature +our $ERROR_CALLBACK; # callback produced an error +our $ERROR_INCONSISTENT_DATA; # something wrong with .info +our $ERROR_UNREACHABLE; # coverpoint hit in "unreachable" region +our $ERROR_RANGE; # line number out of range +our $ERROR_UTILITY; # some tool failed - e.g., 'find' +our $ERROR_USAGE; # misusing some feature +our $ERROR_PATH; # path issues +our $ERROR_INTERNAL; # tool issue +our $ERROR_PARENT; # parent went away so child should die +our $ERROR_CHILD; # nonzero child exit status +our $ERROR_FORK; # fork failed +our $ERROR_EXCESSIVE_COUNT; # suspiciously large hit count +our $ERROR_MISSING; # file missing/not found +# genhtml errors +our $ERROR_UNMAPPED_LINE; # inconsistent coverage data +our $ERROR_UNKNOWN_CATEGORY; # we did something wrong with inconsistent data +our $ERROR_ANNOTATE_SCRIPT; # annotation failed somehow + +my @lcovErrs = (["annotate", \$ERROR_ANNOTATE_SCRIPT], + ["branch", \$ERROR_BRANCH], + ["callback", \$ERROR_CALLBACK], + ["category", \$ERROR_UNKNOWN_CATEGORY], + ["child", \$ERROR_CHILD], + ["corrupt", \$ERROR_CORRUPT], + ["count", \$ERROR_COUNT], + ["deprecated", \$ERROR_DEPRECATED], + ["empty", \$ERROR_EMPTY], + ['excessive', \$ERROR_EXCESSIVE_COUNT], + ["format", \$ERROR_FORMAT], + ["fork", \$ERROR_FORK], + ["gcov", \$ERROR_GCOV], + ["graph", \$ERROR_GRAPH], + ["inconsistent", \$ERROR_INCONSISTENT_DATA], + ["internal", \$ERROR_INTERNAL], + ["mismatch", \$ERROR_MISMATCH], + ["missing", \$ERROR_MISSING], + ["negative", \$ERROR_NEGATIVE], + ["package", \$ERROR_PACKAGE], + ["parallel", \$ERROR_PARALLEL], + ["parent", \$ERROR_PARENT], + ["path", \$ERROR_PATH], + ["range", \$ERROR_RANGE], + ["source", \$ERROR_SOURCE], + ["unmapped", \$ERROR_UNMAPPED_LINE], + ["unreachable", \$ERROR_UNREACHABLE], + ["unsupported", \$ERROR_UNSUPPORTED], + ["unused", \$ERROR_UNUSED], + ['usage', \$ERROR_USAGE], + ['utility', \$ERROR_UTILITY], + ["version", \$ERROR_VERSION],); + +our %lcovErrors; + +our $stop_on_error; # attempt to keep going +our $treat_warning_as_error = 0; +our $warn_once_per_file = 1; +our $excessive_count_threshold; # default not set: don't check + +our $br_coverage = 0; # If set, generate branch coverage statistics +our $mcdc_coverage = 0; # MC/DC +our $func_coverage = 1; # If set, generate function coverage statistics + +# for external file filtering +our @internal_dirs; +our $opt_no_external; + +# Where code was built/where .gcno files can be found +# (if .gcno files are in a different place than the .gcda files) +# also used by genhtml to match diff file entries to .info file +our @build_directory; + +our @configured_callbacks; + +# optional callback to keep track of whatever user decides is important +our @contextCallback; +our $contextCallback; + +# filename substitutions +our @file_subst_patterns; +# resolve callback +our @resolveCallback; +our $resolveCallback; +our %resolveCache; + +# C++ demangling +our @cpp_demangle; # the options passed in +our $demangle_cpp_cmd; # the computed command string +# deprecated: demangler for C++ function names is c++filt +our $cpp_demangle_tool; +# Deprecated: prefer -Xlinker approach with @cpp_dmangle_tool +our $cpp_demangle_params; + +# version extract may be expensive - so only do it once +our %versionCache; +our @extractVersionScript; # script/callback to find version ID of file +our $versionCallback; +our $verify_checksum; # compute and/or check MD5 sum of source code lines + +our $check_file_existence_before_callback = 1; +our $check_data_consistency = 1; + +# Specify coverage rate default precision +our $default_precision = 1; + +# undef indicates not set by command line or RC option - so default to +# sequential processing +our $maxParallelism; +our $max_fork_fails = 5; # consecutive failures +our $fork_fail_timeout = 10; # how long to wait, in seconds +our $maxMemory; # zero indicates no memory limit to parallelism +our $memoryPercentage; +our $in_child_process = 0; +our $max_tasks_per_core = 20; # maybe default to 0? + +our $lcov_filter_parallel = 1; # enable by default +our $lcov_filter_chunk_size; + +our $fail_under_lines; +our $fail_under_branches; + +our $fix_inconsistency = 1; + +sub default_info_impl(@); + +our $info_callback = \&default_info_impl; + +# filter classes that may be requested +# don't report BRDA data for line which seem to have no conditionals +# These may be from C++ exception handling (for example) - and are not +# interesting to users. +our $FILTER_BRANCH_NO_COND; +# don't report line coverage for closing brace of a function +# or basic block, if the immediate predecessor line has the same count. +our $FILTER_LINE_CLOSE_BRACE; +# merge functions which appear on same file/line - guess that that +# they are all the same +our $FILTER_FUNCTION_ALIAS; +# region between LCOV EXCL_START/STOP +our $FILTER_EXCLUDE_REGION; +# region between LCOV EXCL_BR_START/STOP +our $FILTER_EXCLUDE_BRANCH; +# empty line +our $FILTER_BLANK_LINE; +# out of range line - beyond end of file +our $FILTER_LINE_RANGE; +# backward compatibility: empty line, close brace +our $FILTER_LINE; +# filter initializer list-like stuff +our $FILTER_INITIALIZER_LIST; +# remove functions which have only a single line +our $FILTER_TRIVIAL_FUNCTION; +# remove compiler directive lines which llvm-cov seems to generate +our $FILTER_DIRECTIVE; +# remove missing source file +our $FILTER_MISSING_FILE; +# remove branches marked as related to exceptions +our $FILTER_EXCEPTION_BRANCH; +# remove lone branch in block - it can't be an actual conditional +our $FILTER_ORPHAN_BRANCH; +# MC/DC with single expression is identical to branch +our $FILTER_MCDC_SINGLE; +our $FILTER_OMIT_PATTERNS; # special/somewhat faked filter + +our %COVERAGE_FILTERS = ("branch" => \$FILTER_BRANCH_NO_COND, + 'brace' => \$FILTER_LINE_CLOSE_BRACE, + 'blank' => \$FILTER_BLANK_LINE, + 'directive' => \$FILTER_DIRECTIVE, + 'range' => \$FILTER_LINE_RANGE, + 'line' => \$FILTER_LINE, + 'initializer' => \$FILTER_INITIALIZER_LIST, + 'function' => \$FILTER_FUNCTION_ALIAS, + 'missing' => \$FILTER_MISSING_FILE, + 'region' => \$FILTER_EXCLUDE_REGION, + 'branch_region' => \$FILTER_EXCLUDE_BRANCH, + 'exception' => \$FILTER_EXCEPTION_BRANCH, + 'orphan' => \$FILTER_ORPHAN_BRANCH, + 'mcdc' => \$FILTER_MCDC_SINGLE, + "trivial" => \$FILTER_TRIVIAL_FUNCTION,); +our @cov_filter; # 'undef' if filter is not enabled, + # [line_count, coverpoint_count] histogram if + # filter is enabled: number of applications + # of this filter + +our $EXCL_START = "LCOV_EXCL_START"; +our $EXCL_STOP = "LCOV_EXCL_STOP"; +# Marker to say that this code is unreachable - so exclude from +# report, but also generate error if anything in the region is hit +our $UNREACHABLE_START = "LCOV_UNREACHABLE_START"; +our $UNREACHABLE_STOP = "LCOV_UNREACHABLE_STOP"; +our $UNREACHABLE_LINE = "LCOV_UNREACHABLE_LINE"; +our $retainUnreachableCoverpointIfHit = 1; +# Marker to exclude branch coverage but keep function and line coverage +our $EXCL_BR_START = "LCOV_EXCL_BR_START"; +our $EXCL_BR_STOP = "LCOV_EXCL_BR_STOP"; +# marker to exclude exception branches but keep other branches +our $EXCL_EXCEPTION_BR_START = 'LCOV_EXCL_EXCEPTION_BR_START'; +our $EXCL_EXCEPTION_BR_STOP = 'LCOV_EXCL_EXCEPTION_BR_STOP'; +# exclude on this line +our $EXCL_LINE = 'LCOV_EXCL_LINE'; +our $EXCL_BR_LINE = 'LCOV_EXCL_BR_LINE'; +our $EXCL_EXCEPTION_LINE = 'LCOV_EXCL_EXCEPTION_BR_LINE'; + +our @exclude_file_patterns; +our @include_file_patterns; +our %excluded_files; +our $case_insensitive = 0; +our $exclude_exception_branch = 0; +our $derive_function_end_line = 1; +our $derive_function_end_line_all_files = 0; # by default, C only +our $trivial_function_threshold = 5; + +# list of regexps applied to line text - if exclude if matched +our @omit_line_patterns; +# HGC: does not really make sense to support command-line '--unreachable-line +# patterns. Unreachable is typically a branch clause/structural feature - +# as opposed to an 'omit' pattern is typically trace/debug or logging code +# which may or may not be executed (and we don't care) +#our @unreachable_line_patterns; +our @exclude_function_patterns; +# need a pattern copy that we don't disable for function message suppressions +our @suppress_function_patterns; + +our %languageExtensions = ('c' => 'c|h|i|C|H|I|icc|cpp|cc|cxx|hh|hpp|hxx', + 'rtl' => 'v|vh|sv|vhdl?', + 'perl' => 'pl|pm', + 'python' => 'py', + 'java' => 'java'); + +our $info_file_pattern = '*.info'; + +# don't look more than 10 lines ahead when filtering (default) +our $source_filter_lookahead = 10; +# by default, don't treat expressions containing bitwise operators '|', '&', '~' +# as conditional in bogus branch filtering +our $source_filter_bitwise_are_conditional = 0; +# filter out blank lines whether they are hit or not +our $filter_blank_aggressive = 0; + +our %dark_palette = ('COLOR_00' => "e4e4e4", + 'COLOR_01' => "58a6ff", + 'COLOR_02' => "8b949e", + 'COLOR_03' => "3b4c71", + 'COLOR_04' => "006600", + 'COLOR_05' => "4b6648", + 'COLOR_06' => "495366", + 'COLOR_07' => "143e4f", + 'COLOR_08' => "1c1e23", + 'COLOR_09' => "202020", + 'COLOR_10' => "801b18", + 'COLOR_11' => "66001a", + 'COLOR_12' => "772d16", + 'COLOR_13' => "796a25", + 'COLOR_14' => "000000", + 'COLOR_15' => "58a6ff", + 'COLOR_16' => "eeeeee", + 'COLOR_17' => "E5DBDB", + 'COLOR_18' => "82E0AA", + 'COLOR_19' => 'F9E79F', + 'COLOR_20' => 'EC7063',); +our %normal_palette = ('COLOR_00' => "000000", + 'COLOR_01' => "00cb40", + 'COLOR_02' => "284fa8", + 'COLOR_03' => "6688d4", + 'COLOR_04' => "a7fc9d", + 'COLOR_05' => "b5f7af", + 'COLOR_06' => "b8d0ff", + 'COLOR_07' => "cad7fe", + 'COLOR_08' => "dae7fe", + 'COLOR_09' => "efe383", + 'COLOR_10' => "ff0000", + 'COLOR_11' => "ff0040", + 'COLOR_12' => "ff6230", + 'COLOR_13' => "ffea20", + 'COLOR_14' => "ffffff", + 'COLOR_15' => "284fa8", + 'COLOR_16' => "ffffff", + 'COLOR_17' => "E5DBDB", # very light pale grey/blue + 'COLOR_18' => "82E0AA", # light green + 'COLOR_19' => 'F9E79F', # light yellow + 'COLOR_20' => 'EC7063', # lighter red +); + +our %tlaColor = ("UBC" => "#FDE007", + "GBC" => "#448844", + "LBC" => "#CC6666", + "CBC" => "#CAD7FE", + "GNC" => "#B5F7AF", + "UNC" => "#FF6230", + "ECB" => "#CC66FF", + "EUB" => "#DDDDDD", + "GIC" => "#30CC37", + "UIC" => "#EEAA30", + # we don't actually use a color for deleted code. + # ... it is deleted. Does not appear + "DUB" => "#FFFFFF", + "DCB" => "#FFFFFF",); +# colors for the text in the PNG image of the corresponding TLA line +our %tlaTextColor = ("UBC" => "#aaa005", + "GBC" => "#336633", + "LBC" => "#994444", + "CBC" => "#98a0aa", + "GNC" => "#90a380", + "UNC" => "#aa4020", + "ECB" => "#663388", + "EUB" => "#777777", + "GIC" => "#18661c", + "UIC" => "#aa7718", + # we don't actually use a color for deleted code. + # ... it is deleted. Does not appear + "DUB" => "#FFFFFF", + "DCB" => "#FFFFFF",); + +our %pngChar = ('CBC' => '=', + 'LBC' => '=', + 'GBC' => '-', + 'UBC' => '-', + 'ECB' => '<', + 'EUB' => '<', + 'GIC' => '>', + 'UIC' => '>', + 'GNC' => '+', + 'UNC' => '+',); + +our %pngMap = ('=' => ['CBC', 'LBC'] + , # 0th element 'covered', 1st element 'not covered + '-' => ['GBC', 'UBC'], + '<' => ['ECB', 'EUB'], + '>' => ['GIC', 'UIC'], + '+' => ['GNC', 'UNC'],); + +our @opt_rc; # list of command line RC overrides + +our %profileData; +our $profile; # the 'enable' flag/name of output file + +# need to defer any errors until after the options have been +# processed as user might have suppressed the error we were +# trying to emit +my @deferred_rc_errors; # ([err|warn, key, string]) + +sub set_tool_name($) +{ + $tool_name = shift; +} + +# +# system_no_output(mode, parameters) +# +# Call an external program using PARAMETERS while suppressing depending on +# the value of MODE: +# +# MODE & 1: suppress STDOUT (return empty string) +# MODE & 2: suppress STDERR (return empty string) +# MODE & 4: redirect to string +# +# Return (stdout, stderr, rc): +# stdout: stdout string or '' +# stderr: stderr string or '' +# 0 on success, non-zero otherwise +# + +sub system_no_output($@) +{ + my $mode = shift; + # all current uses redirect both stdout and stderr + my @args = @_; + my ($stdout, $stderr, $code) = Capture::Tiny::capture { + system(@args); + }; + if (0 == ($mode & 4)) { + $stdout = '' if $mode & 0x1; + $stderr = '' if $mode & 0x2; + } else { + print(STDOUT $stdout) unless $mode & 0x1; + print(STDERR $stderr) unless $mode & 0x2; + } + return ($stdout, $stderr, $code); +} + +# +# info(printf_parameter) +# +# Use printf to write PRINTF_PARAMETER to stdout only when not --quiet +# + +sub default_info_impl(@) +{ + # Print info string + printf(@_); +} + +sub set_info_callback($) +{ + $info_callback = shift; +} + +sub init_verbose_flag($) +{ + my $quiet = shift; + $lcovutil::verbose -= $quiet; +} + +sub info(@) +{ + my $level = 0; + if ($_[0] =~ /^-?[0-9]+$/) { + $level = shift; + } + &{$info_callback}(@_) + if ($level <= $lcovutil::verbose); + +} + +sub debug +{ + my $level = 0; + if ($_[0] =~ /^[0-9]+$/) { + $level = shift; + } + my $msg = shift; + print(STDERR "DEBUG: $msg") + if ($level < $lcovutil::debug); +} + +sub temp_cleanup() +{ + if (@temp_dirs) { + # Ensure temp directory is not in use by current process + my $cwd = Cwd::getcwd(); + chdir(File::Spec->rootdir()); + info("Removing temporary directories.\n"); + foreach (@temp_dirs) { + rmtree($_); + } + @temp_dirs = (); + chdir($cwd); + } +} + +# +# create_temp_dir() +# +# Create a temporary directory and return its path. +# +# Die on error. +# + +sub create_temp_dir() +{ + my $dir = tempdir(DIR => $lcovutil::tmp_dir, + CLEANUP => !defined($lcovutil::preserve_intermediates)); + if (!defined($dir)) { + die("cannot create temporary directory\n"); + } + append_tempdir($dir); + return $dir; +} + +sub append_tempdir($) +{ + push(@temp_dirs, @_); +} + +sub _msg_handler +{ + my ($msg, $error) = @_; + + if (!($debug || $verbose > 0 || exists($ENV{LCOV_SHOW_LOCATION}))) { + $msg =~ s/ at \S+ line \d+\.$//; + } + # Enforce consistent "WARNING/ERROR:" message prefix + $msg =~ s/^(error|warning):\s+//i; + my $type = $error ? 'ERROR' : 'WARNING'; + + my $txt = "$tool_name: $type: $msg"; + if ($message_log && 'GLOB' eq ref($message_log)) { + flock($message_log, LOCK_EX); + # don't bother to seek...assume modern O_APPEND semantics + #seek($message_log, 0, SEEK_END); + print $message_log $txt; + flock($message_log, LOCK_UN); + } + return $txt; +} + +sub warn_handler($$) +{ + print(STDERR _msg_handler(@_)); +} + +sub die_handler($) +{ + die(_msg_handler(@_, 1)); +} + +sub abort_handler($) +{ + temp_cleanup(); + exit(1); +} + +sub count_cores() +{ + # how many cores? + $maxParallelism = 1; + #linux solution... + if (open my $handle, '/proc/cpuinfo') { + $maxParallelism = scalar(map /^processor/, <$handle>); + close($handle) or die("unable to close /proc/cpuinfo: $!\n"); + } +} + +our $use_MemoryProcess; + +sub read_proc_vmsize +{ + if (open(PROC, "<", '/proc/self/stat')) { + my $str = do { local $/; }; # slurp whole thing + close(PROC) or die("unable to close /proc/self/stat: $!\n"); + my @data = split(' ', $str); + return $data[23 - 1]; # man proc - vmsize is at index 22 + } else { + lcovutil::ignorable_error($lcovutil::ERROR_PACKAGE, + "unable to open: $!"); + return 0; + } +} + +sub read_system_memory +{ + # NOTE: not sure how to do this on windows... + my $total = 0; + eval { + my $f = InOutFile->in('/proc/meminfo'); + my $h = $f->hdl(); + while (<$h>) { + if (/MemTotal:\s+(\d+) kB/) { + $total = $1 * 1024; # read #kB + last; + } + } + }; + if ($@) { + lcovutil::ignorable_error($lcovutil::ERROR_PACKAGE, $@); + } + return $total; +} + +sub init_parallel_params() +{ + if (!defined($lcovutil::maxParallelism)) { + $lcovutil::maxParallelism = 1; + } elsif (0 == $lcovutil::maxParallelism) { + lcovutil::count_cores(); + info("Found $maxParallelism cores.\n"); + } + + if (1 != $lcovutil::maxParallelism && + (defined($lcovutil::maxMemory) || + defined($lcovutil::memoryPercentage)) + ) { + + # need Memory::Process to enable the maxMemory feature + my $cwd = Cwd::getcwd(); + #debug("init: CWD is $cwd\n"); + + eval { + require Memory::Process; + Memory::Process->import(); + $use_MemoryProcess = 1; + }; + # will have done 'cd /' in the die_handler - if Mem::Process not found + #debug("init: chdir back to $cwd\n"); + chdir($cwd); + if ($@) { + push( + @deferred_rc_errors, + [ 1, + $lcovutil::ERROR_PACKAGE, + "package Memory::Process is required to control memory consumption during parallel operations: $@" + ]); + $use_MemoryProcess = 0; + } + } + + if (defined($lcovutil::maxMemory)) { + $lcovutil::maxMemory *= 1 << 20; + } elsif (defined($lcovutil::memoryPercentage)) { + if ($lcovutil::memoryPercentage !~ /^\d+\.?\d*$/ || + $lcovutil::memoryPercentage <= 0) { + push( + @deferred_rc_errors, + [ 1, + $lcovutil::ERROR_USAGE, + "memory_percentage '$lcovutil::memoryPercentage' is not a valid value" + ]); + $lcovutil::memoryPercentage = 100; + } + $lcovutil::maxMemory = + read_system_memory() * ($lcovutil::memoryPercentage / 100.0); + if ($maxMemory) { + my $v = $maxMemory / ((1 << 30) * 1.0); + my $unit = 'Gb'; + if ($v < 1.0) { + $unit = 'Mb'; + $v = $maxMemory / ((1 << 20) * 1.0); + } + info(sprintf("Setting memory throttle limit to %0.1f %s.\n", + $v, $unit + )); + } + } else { + $lcovutil::maxMemory = 0; + } + if (1 != $lcovutil::maxParallelism && # no memory limits if not parallel + 0 != $lcovutil::maxMemory + ) { + if (!$use_MemoryProcess) { + lcovutil::info( + "Attempting to retrieve memory size from /proc instead\n"); + # check if we can get this from /proc (i.e., are we on linux?) + if (0 == read_proc_vmsize()) { + $lcovutil::maxMemory = 0; # turn off that feature + lcovutil::info( + "Continuing execution without Memory::Process or /proc. Note that your maximum memory constraint will be ignored\n" + ); + } + } + } + InOutFile::checkGzip() # we know we are going to use gzip for intermediates + if 1 != $lcovutil::maxParallelism; +} + +our $memoryObj; + +sub current_process_size +{ + if ($use_MemoryProcess) { + $memoryObj = Memory::Process->new + unless defined($memoryObj); + $memoryObj->record('size'); + my $arr = $memoryObj->state; + $memoryObj->reset(); + # current vmsize in kB is element 2 of array + return $arr->[0]->[2] * 1024; # return total in bytes + } else { + # assume we are on linux - and get it from /proc + return read_proc_vmsize(); + } +} + +sub merge_child_profile($) +{ + my $profile = shift; + while (my ($key, $d) = each(%$profile)) { + if ('HASH' eq ref($d)) { + while (my ($f, $t) = each(%$d)) { + if ('HASH' eq ref($t)) { + while (my ($x, $y) = each(%$t)) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected duplicate key $x=$y at $key->$f") + if exists($lcovutil::profileData{$key}{$f}{$x}); + $lcovutil::profileData{$key}{$f}{$x} = $y; + } + } else { + # 'total' key appears in genhtml report + # the others in geninfo. + if (exists($lcovutil::profileData{$key}{$f}) + && + grep(/^$key$/, + ( 'version', 'parse', + 'append', 'total', + 'resolve', 'derive_end', + 'check_consistency')) + ) { + $lcovutil::profileData{$key}{$f} += $t; + } else { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected duplicate key $f=$t in $key:$lcovutil::profileData{$key}{$f}" + ) if exists($lcovutil::profileData{$key}{$f}); + $lcovutil::profileData{$key}{$f} = $t; + } + } + } + } else { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected duplicate key $key=$d in profileData") + if exists($lcovutil::profileData{$key}); + $lcovutil::profileData{$key} = $d; + } + } +} + +sub save_cmd_line($$) +{ + my ($argv, $bin) = @_; + my $cmd = $lcovutil::tool_name; + $lcovutil::profileData{config}{bin} = "$FindBin::RealBin"; + foreach my $arg (@$argv) { + $cmd .= ' '; + if ($arg =~ /\s/) { + $cmd .= "'$arg'"; + } else { + $cmd .= $arg; + } + } + $lcovutil::profileData{config}{cmdLine} = $cmd; + $lcovutil::profileData{config}{buildDir} = Cwd::getcwd(); +} + +sub save_profile($@) +{ + my ($dest, $html) = @_; + + if (defined($lcovutil::profile)) { + $lcovutil::profileData{config}{maxParallel} = $maxParallelism; + $lcovutil::profileData{config}{tool} = $lcovutil::tool_name; + $lcovutil::profileData{config}{version} = $lcovutil::lcov_version; + $lcovutil::profileData{config}{tool_dir} = $lcovutil::tool_dir; + $lcovutil::profileData{config}{url} = $lcovutil::lcov_url; + foreach my $t ('date', 'uname -a', 'hostname') { + my $v = `$t`; + chomp($v); + $lcovutil::profileData{config}{(split(' ', $t))[0]} = $v; + } + my $save = $maxParallelism; + count_cores(); + $lcovutil::profileData{config}{cores} = $maxParallelism; + $maxParallelism = $save; + + my $json = JsonSupport::encode(\%lcovutil::profileData); + + if ('' ne $lcovutil::profile) { + $dest = $lcovutil::profile; + } else { + $dest .= ".json"; + } + if (open(JSON, ">", "$dest")) { + print(JSON $json); + close(JSON) or die("unable to close $dest: $!\n"); + } else { + warn("unable to open profile output $dest: '$!'\n"); + } + + # only generate the extra data if profile enabled + if ($html) { + + my $leader = + '' + . "\n"; + my $tail = "\n"; + + my $outDir = File::Basename::dirname($html); + open(CMD, '>', File::Spec->catfile($outDir, 'cmdline.html')) or + die("unable to create cmdline.html: $!"); + print(CMD $leader, $lcovutil::profileData{config}{cmdLine}, + "\n", $tail); + close(CMD) or die("unable to close cmdline.html: $!"); + + # and the profile data + open(PROF, '>', $html) or die("unable to create $html: $!"); + print(PROF $leader); + + open(IN, '<', $dest) or die("unable to open $dest: $!"); + while () { + print(PROF $_); + } + close(IN) or die("unable to close $dest: $!"); + print(PROF "\n", $tail); + close(PROF) or die("unable to close cmdline.html: $!"); + } + } +} + +sub set_extensions +{ + my ($type, $str) = @_; + die("unknown language '$type'") unless exists($languageExtensions{$type}); + $languageExtensions{$type} = join('|', split($split_char, $str)); +} + +sub do_mangle_check +{ + return unless @lcovutil::cpp_demangle; + + if (1 == scalar(@lcovutil::cpp_demangle)) { + if ('' eq $lcovutil::cpp_demangle[0]) { + # no demangler specified - use c++filt by default + if (defined($lcovutil::cpp_demangle_tool)) { + $lcovutil::cpp_demangle[0] = $lcovutil::cpp_demangle_tool; + } else { + $lcovutil::cpp_demangle[0] = 'c++filt'; + } + } + } elsif (1 < scalar(@lcovutil::cpp_demangle)) { + die("unsupported usage: --demangle-cpp with genhtml_demangle_cpp_tool") + if (defined($lcovutil::cpp_demangle_tool)); + die( + "unsupported usage: --demangle-cpp with genhtml_demangle_cpp_params") + if (defined($lcovutil::cpp_demangle_params)); + } + if ($lcovutil::cpp_demangle_params) { + # deprecated usage + push(@lcovutil::cpp_demangle, + split(' ', $lcovutil::cpp_demangle_params)); + } + # Extra flag necessary on OS X so that symbols listed by gcov get demangled + # properly. + push(@lcovutil::cpp_demangle, '--no-strip-underscores') + if ($^ eq "darwin"); + + $lcovutil::demangle_cpp_cmd = ''; + foreach my $e (@lcovutil::cpp_demangle) { + $lcovutil::demangle_cpp_cmd .= (($e =~ /\s/) ? "'$e'" : $e) . ' '; + } + my $tool = $lcovutil::cpp_demangle[0]; + die("could not find $tool tool needed for --demangle-cpp") + if (lcovutil::system_no_output(3, "echo \"\" | '$tool'")); +} + +sub configure_callback +{ + # if there is just one argument, then assume it might be a + # concatenation - otherwise, just use straight. + my $cb = shift; + my @args = + 1 == scalar(@_) ? + split($lcovutil::split_char, join($lcovutil::split_char, @_)) : + @_; + my $script = $args[0]; + if ($script =~ /\.pm$/) { + my $dir = File::Basename::dirname($script); + my $package = File::Basename::basename($script); + my $class = $package; + $class =~ s/\.pm$//; + unshift(@INC, $dir); + eval { + require $package; + #$package->import(qw(new)); + # the first value in @_ is the script name + $$cb = $class->new(@args); + }; + if ($@ || + !defined($$cb)) { + lcovutil::ignorable_error($lcovutil::ERROR_PACKAGE, + "unable to create callback from module '$script'" . + (defined($@) ? ": $@" : '')); + } + shift(@INC); + } else { + # not module + $$cb = ScriptCaller->new(@args); + } + push(@configured_callbacks, $cb); +} + +sub cleanup_callbacks +{ + if ($lcovutil::contextCallback) { + my $ctx; + eval { $ctx = $lcovutil::contextCallback->context(); }; + if ($@) { + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "context callback '" . + $lcovutil::contextCallback[0] . + " ...' failed: $@"); + } else { + die('unexpect context callback result: expected hash ref') + unless 'HASH' eq ref($ctx); + $lcovutil::profileData{context} = $ctx; + } + } + foreach my $cb (@configured_callbacks) { + undef $$cb; + } +} + +# use these list values from the RC file unless the option is +# passed on the command line +my (@rc_filter, @rc_ignore, @rc_exclude_patterns, + @rc_include_patterns, @rc_subst_patterns, @rc_omit_patterns, + @rc_erase_patterns, @rc_version_script, @unsupported_config, + @rc_source_directories, @rc_build_dir, %unsupported_rc, + $keepGoing, $help, @rc_resolveCallback, + @rc_expected_msg_counts, @rc_criteria_script, @rc_contextCallback, + $rc_no_branch_coverage, $rc_no_func_coverage, $rc_no_checksum, + $version); +my $quiet = 0; +our $tempdirname; + +# these options used only by lcov - but moved here so that we can +# share arg parsing +our ($lcov_remove, # If set, removes parts of tracefile + $lcov_capture, # If set, capture data + $lcov_extract); # If set, extracts parts of tracefile +our @opt_config_files; +our @opt_ignore_errors; +our @opt_expected_message_counts; +our @opt_filter; +our @comments; + +my %deprecated_rc = ("genhtml_demangle_cpp" => "demangle_cpp", + "genhtml_demangle_cpp_tool" => "demangle_cpp", + "genhtml_demangle_cpp_params" => "demangle_cpp", + "geninfo_checksum" => "checksum", + "geninfo_no_exception_branch" => "no_exception_branch", + 'geninfo_adjust_src_path' => 'substitute', + "lcov_branch_coverage" => "branch_coverage", + "lcov_function_coverage" => "function_coverage", + "genhtml_function_coverage" => "function_coverage", + "genhtml_branch_coverage" => "branch_coverage", + 'genhtml_criteria_script' => 'criteria_script', + "lcov_fail_under_lines" => 'fail_under_lines', + 'genhtml_highlight' => undef,); + +my ($cExtensions, $rtlExtensions, $javaExtensions, + $perlExtensions, $pythonExtensions); + +my %rc_common = ( + 'derive_function_end_line' => \$lcovutil::derive_function_end_line, + 'derive_function_end_line_all_files' => + \$derive_function_end_line_all_files, + 'trivial_function_threshold' => \$lcovutil::trivial_function_threshold, + "lcov_tmp_dir" => \$lcovutil::tmp_dir, + "lcov_json_module" => \$JsonSupport::rc_json_module, + "branch_coverage" => \$lcovutil::br_coverage, + 'mcdc_coverage' => \$lcovutil::mcdc_coverage, + "function_coverage" => \$lcovutil::func_coverage, + "lcov_excl_line" => \$lcovutil::EXCL_LINE, + "lcov_excl_br_line" => \$lcovutil::EXCL_BR_LINE, + "lcov_excl_exception_br_line" => \$lcovutil::EXCL_EXCEPTION_LINE, + "lcov_excl_start" => \$lcovutil::EXCL_START, + "lcov_excl_stop" => \$lcovutil::EXCL_STOP, + "lcov_excl_br_start" => \$lcovutil::EXCL_BR_START, + "lcov_excl_br_stop" => \$lcovutil::EXCL_BR_STOP, + "lcov_excl_exception_br_start" => \$lcovutil::EXCL_EXCEPTION_BR_START, + "lcov_excl_exception_br_stop" => \$lcovutil::EXCL_EXCEPTION_BR_STOP, + 'lcov_unreachable_start' => \$lcovutil::UNREACHABLE_START, + 'lcov_unreachable_stop' => \$lcovutil::UNREACHABLE_STOP, + 'lcov_unreachable_line' => \$lcovutil::UNREACHABLE_LINE, + 'retain_unreachable_coverpoints_if_executed' => + \$lcovutil::retainUnreachableCoverpointIfHit, + "lcov_function_coverage" => \$lcovutil::func_coverage, + "lcov_branch_coverage" => \$lcovutil::br_coverage, + "ignore_errors" => \@rc_ignore, + "max_message_count" => \$lcovutil::suppressAfter, + "message_log" => \$lcovutil::message_log, + 'expected_message_count' => \@rc_expected_msg_counts, + 'stop_on_error' => \$lcovutil::stop_on_error, + 'treat_warning_as_error' => \$lcovutil::treat_warning_as_error, + 'warn_once_per_file' => \$lcovutil::warn_once_per_file, + 'check_data_consistency' => \$lcovutil::check_data_consistency, + "rtl_file_extensions" => \$rtlExtensions, + "c_file_extensions" => \$cExtensions, + "perl_file_extensions" => \$perlExtensions, + "python_file_extensions" => \$pythonExtensions, + "java_file_extensions" => \$javaExtensions, + 'info_file_pattern' => \$info_file_pattern, + "filter_lookahead" => \$lcovutil::source_filter_lookahead, + "filter_bitwise_conditional" => + \$lcovutil::source_filter_bitwise_are_conditional, + 'filter_blank_aggressive' => \$filter_blank_aggressive, + "profile" => \$lcovutil::profile, + "parallel" => \$lcovutil::maxParallelism, + "memory" => \$lcovutil::maxMemory, + "memory_percentage" => \$lcovutil::memoryPercentage, + "max_fork_fails" => \$lcovutil::max_fork_fails, + "max_tasks_per_core" => \$lcovutil::max_tasks_per_core, + "fork_fail_timeout" => \$lcovutil::fork_fail_timeout, + 'source_directory' => \@rc_source_directories, + 'build_directory' => \@rc_build_dir, + + "no_exception_branch" => \$lcovutil::exclude_exception_branch, + 'filter' => \@rc_filter, + 'exclude' => \@rc_exclude_patterns, + 'include' => \@rc_include_patterns, + 'substitute' => \@rc_subst_patterns, + 'omit_lines' => \@rc_omit_patterns, + 'erase_functions' => \@rc_erase_patterns, + 'context_script' => \@rc_contextCallback, + "version_script" => \@rc_version_script, + 'resolve_script' => \@rc_resolveCallback, + 'criteria_callback_data' => + \@CoverageCriteria::criteriaCallbackTypes, + 'criteria_callback_levels' => + \@CoverageCriteria::criteriaCallbackLevels, + 'criteria_script' => \@rc_criteria_script, + + "checksum" => \$lcovutil::verify_checksum, + 'compute_file_version' => \$lcovutil::compute_file_version, + "case_insensitive" => \$lcovutil::case_insensitive, + "forget_testcase_names" => \$TraceFile::ignore_testcase_name, + "split_char" => \$lcovutil::split_char, + + 'check_existence_before_callback' => + \$check_file_existence_before_callback, + + "demangle_cpp" => \@lcovutil::cpp_demangle, + 'excessive_count_threshold' => \$excessive_count_threshold, + + 'sort_input' => \$lcovutil::sort_inputs, + + "fail_under_lines" => \$fail_under_lines, + "fail_under_branches" => \$fail_under_branches, + 'lcov_filter_parallel' => \$lcovutil::lcov_filter_parallel, + 'lcov_filter_chunk_size' => \$lcovutil::lcov_filter_chunk_size,); + +# lcov needs to know the options which might get passed to geninfo in --capture mode +our $defaultChunkSize; # for performance tweaking +our $defaultInterval; # for performance tweaking +our @rc_gcov_tool; +our $geninfo_adjust_testname; +our $opt_external; +our $opt_follow = 0; +our $opt_follow_file_links = 0; +our $opt_compat_libtool; +our $opt_gcov_all_blocks = 1; +our $opt_adjust_unexecuted_blocks = 0; +our $geninfo_opt_compat; +our $rc_adjust_src_path; # Regexp specifying parts to remove from source path +our $rc_auto_base = 1; +our $rc_intermediate = "auto"; +our $geninfo_captureAll; # look for both .gcda and lone .gcno files + +our %geninfo_rc_opts = ( + "geninfo_gcov_tool" => \@rc_gcov_tool, + "geninfo_adjust_testname" => \$geninfo_adjust_testname, + "geninfo_checksum" => \$lcovutil::verify_checksum, + "geninfo_compat_libtool" => \$opt_compat_libtool, + "geninfo_external" => \$opt_external, + "geninfo_follow_symlinks" => \$opt_follow, + "geninfo_follow_file_links" => \$opt_follow_file_links, + "geninfo_gcov_all_blocks" => \$opt_gcov_all_blocks, + "geninfo_unexecuted_blocks" => \$opt_adjust_unexecuted_blocks, + "geninfo_compat" => \$geninfo_opt_compat, + "geninfo_adjust_src_path" => \$rc_adjust_src_path, + "geninfo_auto_base" => \$rc_auto_base, + "geninfo_intermediate" => \$rc_intermediate, + "geninfo_no_exception_branch" => \$lcovutil::exclude_exception_branch, + 'geninfo_chunk_size' => \$defaultChunkSize, + 'geninfo_interval_update' => \$defaultInterval, + 'geninfo_capture_all' => \$geninfo_captureAll); + +our %argCommon = ("tempdir=s" => \$tempdirname, + "version-script=s" => \@lcovutil::extractVersionScript, + "criteria-script=s" => + \@CoverageCriteria::coverageCriteriaScript, + + "checksum" => \$lcovutil::verify_checksum, + "no-checksum" => \$rc_no_checksum, + "quiet|q+" => \$quiet, + "verbose|v+" => \$lcovutil::verbose, + "debug+" => \$lcovutil::debug, + "help|h|?" => \$help, + "version" => \$version, + 'comment=s' => \@comments, + 'toolname=s' => \$lcovutil::tool_name, + + "function-coverage" => \$lcovutil::func_coverage, + "branch-coverage" => \$lcovutil::br_coverage, + 'mcdc-coverage' => \$lcovutil::mcdc_coverage, + "no-function-coverage" => \$rc_no_func_coverage, + "no-branch-coverage" => \$rc_no_branch_coverage, + + "fail-under-lines=s" => \$fail_under_lines, + "fail-under-branches=s" => \$fail_under_branches, + 'source-directory=s' => + \@ReadCurrentSource::source_directories, + 'build-directory=s' => \@lcovutil::build_directory, + + 'resolve-script=s' => \@lcovutil::resolveCallback, + 'context-script=s' => \@lcovutil::contextCallback, + "filter=s" => \@opt_filter, + "demangle-cpp:s" => \@lcovutil::cpp_demangle, + "ignore-errors=s" => \@opt_ignore_errors, + "expect-message-count=s" => \@opt_expected_message_counts, + 'msg-log:s' => \$message_log, + "keep-going" => \$keepGoing, + "config-file=s" => \@unsupported_config, + "rc=s%" => \%unsupported_rc, + "profile:s" => \$lcovutil::profile, + "exclude=s" => \@lcovutil::exclude_file_patterns, + "include=s" => \@lcovutil::include_file_patterns, + "erase-functions=s" => \@lcovutil::exclude_function_patterns, + "omit-lines=s" => \@lcovutil::omit_line_patterns, + "substitute=s" => \@lcovutil::file_subst_patterns, + "parallel|j:i" => \$lcovutil::maxParallelism, + "memory=i" => \$lcovutil::maxMemory, + "forget-test-names" => \$TraceFile::ignore_testcase_name, + "preserve" => \$lcovutil::preserve_intermediates, + 'sort-input' => \$lcovutil::sort_inputs,); + +sub warnDeprecated +{ + my ($key, $replacement) = @_; + my $opt_used = defined($replacement); + my $suffix = + $opt_used ? + ". Consider using '$replacement'. instead. (Backward-compatible support will be removed in the future.)" + : + ' and ignored.'; + + push(@deferred_rc_errors, + [0, $lcovutil::ERROR_DEPRECATED, + "RC option '$key' is deprecated$suffix" + ]); + return $opt_used; +} + +sub _set_config($$$) +{ + # write an RC configuration value - array or scalar + my ($ref, $key, $value) = @_; + my $r = $ref->{$key}; + my $t = ref($r); + if ('ARRAY' eq $t) { + info(2, " append $value to list $key\n"); + if ('ARRAY' eq ref($value)) { + push(@$r, @$value); + } else { + push(@$r, $value); + } + } else { + # opt is a scalar or not defined + # only way for $value to NOT be an array is if there is a bug in + # the caller such that a scalar ref was passed where a prior call + # had passed a list ref for the same RC option name + die("unexpected ARRAY for $key value") + if ('ARRAY' eq ref($value)); + $$r = $value; + info(2, " assign $$r to $key\n"); + } +} + +# +# read_config(filename, $optionsHash) +# +# Read configuration file FILENAME and write supported key/values into +# RC options hash +# Return: 1 if some config value was set, 0 if not (used for error messaging) + +sub read_config($$); # forward decl, to make perl happy about recursive call +my %included_config_files; +my @include_stack; + +sub read_config($$) +{ + my ($filename, $opts) = @_; + my $key; + my $value; + local *HANDLE; + + my $set_value = 0; + info(1, "read_config: $filename\n"); + if (exists($included_config_files{abs_path($filename)})) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + 'config file inclusion loop detected: "' . + join('" -> "', @include_stack) . + '" -> "' . $filename . '"'); + # this line is unreachable as we can't ignore the 'usage' error + # because it is generated when we parse the config-file options + # but the '--ignore-errors' option isn't parsed until later, after + # the GetOptions call. + # This could be fixed by doing some early processing on the command + # line (similar to how config file options are handled) - but that + # seems like overkill. Just force the user to fix the issues. + return 0; # LCOV_UNREACHABLE_LINE + } + + if (!open(HANDLE, "<", $filename)) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "cannot read configuration file '$filename': $!"); + # similarly, this line is also unreachable for the same reasons as + # described above. + return 0; # didn't set anything LCOV_UNREACHABLE_LINE + } + $included_config_files{abs_path($filename)} = 1; + push(@include_stack, $filename); + VAR: while () { + chomp; + # Skip comments + s/#.*//; + # Remove leading blanks + s/^\s+//; + # Remove trailing blanks + s/\s+$//; + next unless length; + ($key, $value) = split(/\s*=\s*/, $_, 2); + # is this an environment variable? + while (defined($value) && + $value =~ /\$ENV\{([^}]+)\}/) { + my $varname = $1; + if (!exists($ENV{$varname})) { + push( + @deferred_rc_errors, + [ 1, + $lcovutil::ERROR_USAGE, + "\"$filename\": $.: variable '$key' uses environment variable '$varname' - which is not set (ignoring '$_')." + ]); + next VAR; + } + $value =~ s/^\$ENV\{$varname\}/$ENV{$varname}/g; + } + if (defined($key) && + exists($deprecated_rc{$key})) { + next unless warnDeprecated($key, $deprecated_rc{$key}); + $key = $deprecated_rc{$key}; + } + if (defined($key) && defined($value)) { + info(2, " set: $key = $value\n"); + # special case: read included file + if ($key eq 'config_file') { + $set_value |= read_config($value, $opts); + next; + } + # skip if application doesn't use this setting + next unless exists($opts->{$key}); + _set_config($opts, $key, $value); + $set_value = 1; + } else { + my $context = MessageContext::context(); + push( + @deferred_rc_errors, + [ 1, + $lcovutil::ERROR_FORMAT, + "\"$filename\": $.: malformed configuration file statement '$_': expected \"key = value\"/" + ]); + } + } + close(HANDLE) or die("unable to close $filename: $!\n"); + delete $included_config_files{abs_path($filename)}; + pop(@include_stack); + return $set_value; +} + +# common utility used by genhtml, geninfo, lcov to clean up RC options, +# check for various possible system-wide RC files, and apply the result +# return 1 if we set something +sub apply_rc_params($) +{ + my $rcHash = shift; + + # merge common RC values with the ones passed in + my %rcHash = (%$rcHash, %rc_common); + + # Check command line for a configuration file name + # have to set 'verbosity' flag from environment - otherwise, it isn't + # set (from GetOpt) when we parse the RC file + Getopt::Long::Configure("pass_through", "no_auto_abbrev"); + my $quiet = 0; + Getopt::Long::GetOptions("config-file=s" => \@opt_config_files, + "rc=s%" => \@opt_rc, + "quiet|q+" => \$quiet, + "verbose|v+" => \$lcovutil::verbose, + "debug+" => \$lcovutil::debug,); + init_verbose_flag($quiet); + Getopt::Long::Configure("default"); + + my $set_value = 0; + + if (0 != scalar(@opt_config_files)) { + foreach my $f (@opt_config_files) { + $set_value |= read_config($f, \%rcHash); + } + } else { + foreach my $v (['HOME', '.lcovrc'], ['LCOV_HOME', 'etc', 'lcovrc']) { + next unless exists($ENV{$v->[0]}); + my $f = File::Spec->catfile($ENV{$v->[0]}, splice(@$v, 1)); + if (-r $f) { + $set_value |= read_config($f, \%rcHash); + last; + } + } + } + + my $first; + foreach my $v (@opt_rc) { + my $index = index($v, '='); + if ($index == -1) { + push(@deferred_rc_errors, + [1, $lcovutil::ERROR_USAGE, + "malformed --rc option '$v' - should be 'key=value'" + ]); + next; + } + my $key = substr($v, 0, $index); + my $value = substr($v, $index + 1); + $key =~ s/^\s+|\s+$//g; + unless (exists($rcHash{$key})) { + push( + @deferred_rc_errors, + [ 1, + $lcovutil::ERROR_USAGE, + "unknown/unsupported key '$key' found in '--rc $v' - see 'man lcovrc(5)' for the list of valid options" + ]); + next; + } + info(1, "apply --rc overrides\n") + unless defined($first); + $first = 1; + # can't complain about deprecated uses here because the user + # might have suppressed that message - but we haven't looked at + # the suppressions in the parameter list yet. + if (exists($deprecated_rc{$key})) { + next unless warnDeprecated($key, $deprecated_rc{$key}); + } + # strip spaces + $value =~ s/^\s+|\s+$//g; + _set_config(\%rcHash, $key, $value); + $set_value = 1; + } + foreach my $d (['rtl', $rtlExtensions], + ['c', $cExtensions], + ['perl', $perlExtensions], + ['python', $pythonExtensions], + ['java', $javaExtensions] + ) { + lcovutil::set_extensions(@$d) if $d->[1]; + } + return $set_value; +} + +sub parseOptions +{ + my ($rcOptions, $cmdLineOpts, $output_arg) = @_; + + apply_rc_params($rcOptions); + + my %options = (%argCommon, %$cmdLineOpts); + if (!GetOptions(%options)) { + return 0; + } + foreach my $d (['--config-file', scalar(@unsupported_config)], + ['--rc', scalar(%unsupported_rc)]) { + die("'" . $d->[0] . "' option name cannot be abbreviated\n") + if ($d->[1]); + } + if ($help) { + main::print_usage(*STDOUT); + exit(0); + } + # Check for version option + if ($version) { + print("$tool_name: $lcov_version\n"); + exit(0); + } + if (defined($message_log)) { + if (!$message_log) { + # base log file name on output arg (if specified) or tool name otherwise + $message_log = ( + defined($$output_arg) ? + substr($$output_arg, 0, rindex($$output_arg, '.')) : + $tool_name) . + ".msg"; + } + $message_filename = $message_log; + open(LOG, ">", $message_log) or + die("unable to write message log '$message_log': $!"); + $message_log = \*LOG; + } + + lcovutil::init_verbose_flag($quiet); + # apply the RC file settings if no command line arg + foreach my $rc ([\@opt_filter, \@rc_filter], + [\@opt_ignore_errors, \@rc_ignore], + [\@opt_expected_message_counts, \@rc_expected_msg_counts], + [\@lcovutil::exclude_file_patterns, \@rc_exclude_patterns], + [\@lcovutil::include_file_patterns, \@rc_include_patterns], + [\@lcovutil::file_subst_patterns, \@rc_subst_patterns], + [\@lcovutil::omit_line_patterns, \@rc_omit_patterns], + [\@lcovutil::exclude_function_patterns, \@rc_erase_patterns + ], + [\@lcovutil::extractVersionScript, \@rc_version_script], + [\@CoverageCriteria::coverageCriteriaScript, + \@rc_criteria_script + ], + [\@ReadCurrentSource::source_directories, + \@rc_source_directories + ], + [\@lcovutil::build_directory, \@rc_build_dir], + [\@lcovutil::resolveCallback, \@rc_resolveCallback], + [\@lcovutil::contextCallback, \@rc_contextCallback], + ) { + @{$rc->[0]} = @{$rc->[1]} unless (@{$rc->[0]}); + } + + $ReadCurrentSource::searchPath = + SearchPath->new('source directory', + @ReadCurrentSource::source_directories); + + $lcovutil::stop_on_error = 0 + if (defined $keepGoing); + + push(@lcovutil::exclude_file_patterns, @ARGV) + if $lcov_remove; + push(@lcovutil::include_file_patterns, @ARGV) + if $lcov_extract; + + # Merge options + $lcovutil::func_coverage = 0 + if ($rc_no_func_coverage); + $lcovutil::br_coverage = 0 + if ($rc_no_branch_coverage); + + $lcovutil::verify_checksum = 0 + if (defined($rc_no_checksum)); + + foreach my $cb ([\$versionCallback, \@lcovutil::extractVersionScript], + [\$resolveCallback, \@lcovutil::resolveCallback], + [\$CoverageCriteria::criteriaCallback, + \@CoverageCriteria::coverageCriteriaScript + ], + [\$contextCallback, \@lcovutil::contextCallback], + ) { + lcovutil::configure_callback($cb->[0], @{$cb->[1]}) + if (@{$cb->[1]}); + } + # perhaps warn that date/owner and directory are only supported by genhtml? + foreach my $data (['criteria_callback_levels', + \@CoverageCriteria::criteriaCallbackLevels, + ['top', 'directory', 'file'] + ], + ['criteria_callback_data', + \@CoverageCriteria::criteriaCallbackTypes, + ['date', 'owner'] + ] + ) { + my ($rc, $user, $valid) = @$data; + @$user = split(',', join(',', @$user)); + foreach my $x (@$user) { + die("invalid '$rc' value \"$x\" - expected (" . + join(", ", @$valid) . ")") + unless grep(/^$x$/, @$valid); + } + } + # context only gets grabbed/stored with '--profile' + $lcovutil::profile = '' + if ($contextCallback && !defined($lcovutil::profile)); + + if (!$lcov_capture) { + if ($lcovutil::compute_file_version && + !defined($versionCallback)) { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "'compute_file_version=1' option has no effect without either '--version-script' or 'version_script=...'." + ); + } + lcovutil::munge_file_patterns(); + lcovutil::init_parallel_params(); + # Determine which errors the user wants us to ignore + parse_ignore_errors(@opt_ignore_errors); + parse_expected_message_counts(@opt_expected_message_counts); + # Determine what coverpoints the user wants to filter + push(@opt_filter, 'exception') if $lcovutil::exclude_exception_branch; + parse_cov_filters(@opt_filter); + + # Ensure that the c++filt tool is available when using --demangle-cpp + lcovutil::do_mangle_check(); + + foreach my $entry (@deferred_rc_errors) { + my ($isErr, $type, $msg) = @$entry; + if ($isErr) { + lcovutil::ignorable_error($type, $msg); + } else { + lcovutil::ignorable_warning($type, $msg); + } + } + } + + return 1; +} + +# +# transform_pattern(pattern) +# +# Transform shell wildcard expression to equivalent Perl regular expression. +# Return transformed pattern. +# + +sub transform_pattern($) +{ + my $pattern = $_[0]; + + # Escape special chars + + $pattern =~ s/\\/\\\\/g; + $pattern =~ s/\//\\\//g; + $pattern =~ s/\^/\\\^/g; + $pattern =~ s/\$/\\\$/g; + $pattern =~ s/\(/\\\(/g; + $pattern =~ s/\)/\\\)/g; + $pattern =~ s/\[/\\\[/g; + $pattern =~ s/\]/\\\]/g; + $pattern =~ s/\{/\\\{/g; + $pattern =~ s/\}/\\\}/g; + $pattern =~ s/\./\\\./g; + $pattern =~ s/\,/\\\,/g; + $pattern =~ s/\|/\\\|/g; + $pattern =~ s/\+/\\\+/g; + $pattern =~ s/\!/\\\!/g; + + # Transform ? => (.) and * => (.*) + + $pattern =~ s/\*/\(\.\*\)/g; + $pattern =~ s/\?/\(\.\)/g; + $pattern = "/$pattern/i" + if ($lcovutil::case_insensitive); + return qr($pattern); +} + +sub verify_regexp_patterns +{ + my ($flag, $list, $checkInsensitive) = @_; + PAT: foreach my $pat (@$list) { + my $text = 'abc'; + my $str = eval "\$text =~ $pat ;"; + die("Invalid regexp \"$flag $pat\":\n$@") + if $@; + + if ($checkInsensitive) { + for (my $i = length($pat) - 1; $i >= 0; --$i) { + my $char = substr($pat, $i, 1); + next PAT + if ($char eq 'i'); + last # didn't see the 'i' character + if ($char =~ /[\/#!@%]/); + } + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + "$flag pattern '$pat' does not seem to be case insensitive - but you asked for case insensitive matching" + ); + } + } +} + +sub munge_file_patterns +{ + # Need perlreg expressions instead of shell pattern + if (@exclude_file_patterns) { + @exclude_file_patterns = + map({ [transform_pattern($_), $_, 0]; } @exclude_file_patterns); + } + + if (@include_file_patterns) { + @include_file_patterns = + map({ [transform_pattern($_), $_, 0]; } @include_file_patterns); + } + + # precompile match patterns and check for validity + foreach my $p (['omit-lines', \@omit_line_patterns], + ['exclude-functions', \@exclude_function_patterns]) { + my ($flag, $list) = @$p; + next unless (@$list); + # keep track of number of times pattern was applied + # regexp compile will die if pattern is invalid + eval { + @$list = map({ [qr($_), $_, 0]; } @$list); + }; + die("Invalid $flag regexp in ('" . join('\' \'', @$list) . "'):\n$@") + if $@; + } + # sadly, substitutions aren't regexps and can't be precompiled + if (@file_subst_patterns) { + verify_regexp_patterns('--substitute', \@file_subst_patterns, + \$lcovutil::case_insensitive); + + # keep track of number of times this was applied + @file_subst_patterns = map({ [$_, 0]; } @file_subst_patterns); + } + + # and check for valid region patterns + for my $regexp (['lcov_excl_line', $lcovutil::EXCL_LINE], + ['lcov_excl_br_line', $lcovutil::EXCL_BR_LINE], + ['lcov_excl_exception_br_line', + $lcovutil::EXCL_EXCEPTION_LINE + ], + ["lcov_excl_start", \$lcovutil::EXCL_START], + ["lcov_excl_stop", \$lcovutil::EXCL_STOP], + ["lcov_excl_br_start", \$lcovutil::EXCL_BR_START], + ["lcov_excl_br_stop", \$lcovutil::EXCL_BR_STOP], + ["lcov_excl_exception_br_start", + \$lcovutil::EXCL_EXCEPTION_BR_START + ], + ["lcov_excl_exception_br_stop", + \$lcovutil::EXCL_EXCEPTION_BR_STOP + ], + ["lcov_unreachable_start", \$lcovutil::UNREACHABLE_START], + ["lcov_unreachable_stop", \$lcovutil::UNREACHABLE_STOP], + ["lcov_excl_line", \$lcovutil::UNREACHABLE_LINE], + ) { + eval 'qr/' . $regexp->[1] . '/'; + my $error = $@; + chomp($error); + $error =~ s/at \(eval.*$//; + die("invalid '" . $regexp->[0] . "' exclude pattern: $error") + if $error; + } + @suppress_function_patterns = map({ $_->[0] } @exclude_function_patterns); +} + +sub warn_file_patterns +{ + foreach my $p (['include', \@include_file_patterns], + ['exclude', \@exclude_file_patterns], + ['substitute', \@file_subst_patterns], + ['omit-lines', \@omit_line_patterns], + ['exclude-functions', \@exclude_function_patterns], + ) { + my ($type, $patterns) = @$p; + foreach my $pat (@$patterns) { + my $count = $pat->[scalar(@$pat) - 1]; + if (0 == $count) { + my $str = $pat->[scalar(@$pat) - 2]; + lcovutil::ignorable_error($ERROR_UNUSED, + "'$type' pattern '$str' is unused."); + } + } + } +} + +# +# subst_file_name($path) +# +# apply @file_subst_patterns to $path and return +# +sub subst_file_name($) +{ + my $name = shift; + foreach my $p (@file_subst_patterns) { + my $old = $name; + # sadly, no support for pre-compiled patterns + eval '$name =~ ' . $p->[0] . ';'; # apply pattern that user provided... + # $@ should never match: we already checked pattern validity during + # initialization - above. Still: belt and braces. + die("invalid 'subst' regexp '" . $p->[0] . "': $@") + if ($@); + $p->[-1] += 1 + if $old ne $name; + } + return $name; +} + +# +# strip_directories($path, $depth) +# +# Remove DEPTH leading directory levels from PATH. +# + +sub strip_directories($$) +{ + my $filename = $_[0]; + my $depth = $_[1]; + my $i; + + if (!defined($depth) || ($depth < 1)) { + return $filename; + } + my $d = $lcovutil::dirseparator; + for ($i = 0; $i < $depth; $i++) { + if ($lcovutil::case_insensitive) { + $filename =~ s/^[^$d]*$d+(.*)$/$1/i; + } else { + $filename =~ s/^[^$d]*$d+(.*)$/$1/; + } + } + return $filename; +} + +sub define_errors() +{ + my $id = 0; + foreach my $d (@lcovErrs) { + my ($k, $ref) = @$d; + $$ref = $id; + $lcovErrors{$k} = $id; + $ERROR_ID{$k} = $id; + $ERROR_NAME{$id} = $k; + $ignore[$id] = 0; + $message_count[$id] = 0; + $expected_message_count[$id] = undef; # no expected count, by default + ++$id; + } +} + +sub summarize_messages +{ + my $silent = shift; + return if $lcovutil::in_child_process; + + # first check for expected message count constraints + for (my $idx = 0; $idx <= $#expected_message_count; ++$idx) { + my $expr = $expected_message_count[$idx]; + next unless defined($expr); + my $t = $message_count[$idx]; + $expr =~ s/%C/$t/g; + my $v; + eval { $v = eval $expr; }; + if ($@ || !defined($v)) { + # we checked the syntax of the message - so should not be able to fail + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "evaluation of '$expr' failed: $@"); + next; + } + unless ($v) { + my $type = $ERROR_NAME{$idx}; + lcovutil::ignorable_error($lcovutil::ERROR_COUNT, + "'$type' constraint '$expr' is not true (see '--expect_message_count' for details)." + ); + } + } + + # now summarize + my %total = ('error' => 0, + 'warning' => 0, + 'ignore' => 0,); + # use verbosity level -1: so print unless user says "-q -q"...really quiet + + my $found = 0; + while (my ($type, $hash) = each(%message_types)) { + while (my ($name, $count) = each(%$hash)) { + $total{$type} += $count; + $found = 1; + } + } + my $header = "Message summary:\n"; + foreach my $type ('error', 'warning', 'ignore') { + next unless $total{$type}; + $found = 1; + my $leader = $header . ' ' . $total{$type} . " $type message" . + ($total{$type} > 1 ? 's' : '') . ":\n"; + my $h = $message_types{$type}; + foreach my $k (sort keys %$h) { + info(-1, $leader . ' ' . $k . ": " . $h->{$k} . "\n"); + $leader = ''; + } + $header = ''; + } + info(-1, "$header no messages were reported\n") unless $found || $silent; +} + +sub parse_ignore_errors(@) +{ + my @ignore_errors = split($split_char, join($split_char, @_)); + + # first, mark that all known errors are not ignored + foreach my $item (keys(%ERROR_ID)) { + my $id = $ERROR_ID{$item}; + $ignore[$id] = 0 + unless defined($ignore[$id]); + } + + return if (!@ignore_errors); + + foreach my $item (@ignore_errors) { + die("unknown argument for --ignore-errors: '$item'") + unless exists($ERROR_ID{lc($item)}); + my $item_id = $ERROR_ID{lc($item)}; + $ignore[$item_id] += 1; + } +} + +sub parse_expected_message_counts(@) +{ + my @constraints = split($split_char, join($split_char, @_)); + # parse the list and look for errors.. + foreach my $c (@constraints) { + if ($c =~ /^s*(\S+?)\s*:\s*((\d+)|(.+?))\s*$/) { + unless (exists($ERROR_ID{lc($1)})) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "unknown 'expected-message-count' message type \"$1\"."); + next; + } + + my $id = $ERROR_ID{lc($1)}; + if (defined($expected_message_count[$id])) { + my $ignore = $lcovutil::ignore[$lcovutil::ERROR_USAGE]; + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "duplicate 'expected' constraint '$c'" . + ($ignore ? ': ignoring.' : '')); + next; + } + # check if syntax look reasonable + my $expr = $2; + if (Scalar::Util::looks_like_number($expr)) { + $expected_message_count[$id] = "%C == $expr"; + next; + } + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "expect-message-count constraint '$c' does not appear to depend on message count: '%C' substitution not found." + ) unless ($expr =~ /%C/); + + # now lets try an eval + my $v = $expr; + $v =~ s/%C/0/g; + $v = eval $v; + if (defined($v)) { + $expected_message_count[$id] = $expr; + } else { + my $ignore = $lcovutil::ignore[$lcovutil::ERROR_USAGE]; + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "eval error in 'expect-message-count' constraint '$c': $@" + . ($ignore ? ': ignoring.' : '')); + } + } else { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "malformed expected-message-count constraint \"$c\". Expected 'msg_type = expr'." + ); + } + } +} + +sub message_count($) +{ + my $code = shift; + + return $message_count[$code]; +} + +sub is_ignored($) +{ + my $code = shift; + die("invalid error code $code") + unless 0 <= $code && $code < scalar(@ignore); + return $ignore[$code] || (defined($stop_on_error) && 0 == $stop_on_error); +} + +our %explainOnce; # append explanation to first error/warning message (only) + +sub explain_once +{ + # NOTE: in parallel execution, the explanations may appear more than + # once - e.g., when two or more child processes generate them + # simultaneously. + # They will eventually update the parent process state such that + # subsequent children won't report the issues. + my $key = shift; + if (!exists($explainOnce{$key})) { + $explainOnce{$key} = 1; + my $msg = ''; + # each element is either a string or a pair of [string, predicate] + foreach my $e (@_) { + if ('ARRAY' eq ref($e)) { + $msg .= $e->[0] if defined($e->[1]) && $e->[1]; + } else { + $msg .= $e; + } + } + return $msg; + } + return ''; +} + +our %warnOnlyOnce; +our $deferWarnings = 0; +# if 'stop_on_error' is false, then certain errors should be emitted at most once +# (not relevant if stop_on_error is true - as we will exit after the error. +sub warn_once +{ + my ($msgType, $key) = @_; + return 0 + if (exists($warnOnlyOnce{$msgType}) && + exists($warnOnlyOnce{$msgType}{$key})); + $warnOnlyOnce{$msgType}{$key} = 1; + return 1; +} + +sub store_deferred_message +{ + my ($msgType, $isError, $key, $msg) = @_; + die( + "unexpected deferred value of $msg->$key: $warnOnlyOnce{$msgType}{$key}") + unless 1 == $warnOnlyOnce{$msgType}{$key}; + if ($deferWarnings) { + $warnOnlyOnce{$msgType}{$key} = [$msg, $isError]; + } else { + if ($isError) { + lcovutil::ignorable_error($msgType, $msg); + } else { + lcovutil::ignorable_warning($msgType, $msg); + } + } +} + +sub merge_deferred_warnings +{ + my $hash = shift; + while (my ($type, $d) = each(%$hash)) { + while (my ($key, $m) = each(%$d)) { + if (!(exists($warnOnlyOnce{$type}) && + exists($warnOnlyOnce{$type}{$key}))) { + if ('ARRAY' eq ref($m)) { + # this is a + my ($msg, $isError) = @$m; + if ($isError) { + lcovutil::ignorable_error($type, $msg); + } else { + lcovutil::ignorable_warning($type, $msg); + } + } + $warnOnlyOnce{$type}{$key} = 1; + } + } + } +} + +sub initial_state +{ + # a bit of a hack: this method is called at the start of each + # child process - so use it to record that we are executing in a + # child. + # The flag is used to reduce verbosity from children - and possibly + # for other things later + $lcovutil::in_child_process = 1; + + # keep track of number of warnings, etc. generated in child - + # so we can merge back into parent. This may prevent us from + # complaining about the same thing in multiple children - but only + # if those children don't execute in parallel. + %message_types = (); #reset + $ReadCurrentSource::searchPath->reset(); + # clear profile - want only my contribution + %lcovutil::profileData = (); + %lcovutil::warnOnlyOnce = (); + + # clear pattern counts so we can update number found in children + foreach my $patType (\@lcovutil::exclude_file_patterns, + \@lcovutil::include_file_patterns, + \@lcovutil::file_subst_patterns, + \@lcovutil::omit_line_patterns, + \@lcovutil::exclude_function_patterns, + ) { + foreach my $p (@$patType) { + $p->[-1] = 0; + } + } + + return Storable::dclone([\@message_count, \%versionCache, \%resolveCache]); +} + +sub compute_update +{ + my $state = shift; + my @new_count; + my ($initialCount, $initialVersionCache, $initialResolveCache) = @$state; + my $id = 0; + foreach my $count (@message_count) { + my $v = $count - $initialCount->[$id++]; + push(@new_count, $v); + } + my %versionUpdate; + while (my ($f, $v) = each(%versionCache)) { + $versionUpdate{$f} = $v + unless exists($initialVersionCache->{$f}); + } + my %resolveUpdate; + while (my ($f, $v) = each(%resolveCache)) { + $resolveUpdate{$f} = $v + unless exists($initialResolveCache->{$f}); + } + my @rtn = (\@new_count, + \%versionUpdate, + \%resolveUpdate, + \%message_types, + $ReadCurrentSource::searchPath->current_count(), + \%lcovutil::profileData, + \%lcovutil::warnOnlyOnce, + \%lcovutil::explainOnce); + + foreach my $patType (\@lcovutil::exclude_file_patterns, + \@lcovutil::include_file_patterns, + \@lcovutil::file_subst_patterns, + \@lcovutil::omit_line_patterns, + \@lcovutil::exclude_function_patterns, + ) { + my @count; + foreach my $p (@$patType) { + push(@count, $p->[-1]); + } + push(@rtn, \@count); + } + + return \@rtn; +} + +sub update_state +{ + my $updateCount = shift; + my $id = 0; + foreach my $count (@$updateCount) { + $message_count[$id++] += $count; + } + my $updateVersionCache = shift; + while (my ($f, $v) = each(%$updateVersionCache)) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected version entry") + if exists($versionCache{$f}) && $versionCache{$f} ne $v; + $versionCache{$f} = $v; + } + my $updateResolveCache = shift; + while (my ($f, $v) = each(%$updateResolveCache)) { + lcovutil::ignorable_error($lcovutil::ERROR_INTERNAL, + "unexpected resolve entry") + if exists($resolveCache{$f}) && $resolveCache{$f} ne $v; + $resolveCache{$f} = $v; + } + my $msgTypes = shift; + while (my ($type, $h) = each(%$msgTypes)) { + while (my ($err, $count) = each(%$h)) { + if (exists($message_types{$type}) && + exists($message_types{$type}{$err})) { + $message_types{$type}{$err} += $count; + } else { + $message_types{$type}{$err} = $count; + } + } + } + my $searchCount = shift; + $ReadCurrentSource::searchPath->update_count(@$searchCount); + + my $profile = shift; + lcovutil::merge_child_profile($profile); + my $warnOnce = shift; + lcovutil::merge_deferred_warnings($warnOnce); + my $explainOnce = shift; + while (my ($key, $v) = each(%$explainOnce)) { + $lcovutil::explainOnce{$key} = $v; + } + + foreach my $patType (\@lcovutil::exclude_file_patterns, + \@lcovutil::include_file_patterns, + \@lcovutil::file_subst_patterns, + \@lcovutil::omit_line_patterns, + \@lcovutil::exclude_function_patterns, + ) { + my $count = shift; + die("unexpected pattern count") unless $#$count == $#$patType; + foreach my $p (@$patType) { + $p->[-1] += shift @$count; + } + } + die("unexpected update data") unless -1 == $#_; # exhausted list +} + +sub warnSuppress($$) +{ + my ($code, $errName) = @_; + + if ($ignore[$code] <= 1 && # don't warn if already suppressed + $message_count[$code] == ($suppressAfter + 1) + ) { + # explain once per error type, if verbose - else only once + my $explain = explain_once( + 'error_count' . ($lcovutil::verbose ? $errName : ''), + "\n\tTo increase or decrease this limit use '--rc max_message_count=value'." + ); + ignorable_warning($ERROR_COUNT, + "max_message_count=$suppressAfter reached for '$errName' messages: no more will be reported.$explain" + ); + } +} + +sub _count_message($$) +{ + my ($type, $name) = @_; + + $message_types{$type}{$name} = 0 + unless (exists($message_types{$type}) && + exists($message_types{$type}{$name})); + ++$message_types{$type}{$name}; +} + +sub saw_error +{ + # true if we saw at least one error when 'stop_on_error' is false + # enables us to return non-zero exit status if any errors were detected + return exists($message_types{error}); +} + +sub ignorable_error($$;$) +{ + my ($code, $msg, $quiet) = @_; + die("undefined error code for '$msg'") unless defined($code); + + my $errName = "code_$code"; + $errName = $ERROR_NAME{$code} + if exists($ERROR_NAME{$code}); + + if ($message_count[$code]++ >= $suppressAfter && + 0 < $suppressAfter) { + # safe to just continue without checking anything else - as either + # this message is not fatal and we emitted it some number of times, + # or the message is fatal - and this is the first time we see it + + _count_message('ignore', $errName); + # warn that we are suppressing from here on - for the first skipped + # message of this type + warnSuppress($code, $errName); + return; + } + + chomp($msg); # we insert the newline + if ($code >= scalar(@ignore) || + !$ignore[$code]) { + my $ignoreOpt = + "\t(use \"$tool_name --ignore-errors $errName ...\" to bypass this error)\n"; + $ignoreOpt = '' + if ($lcovutil::in_child_process || + !($lcovutil::verbose || $message_count[$code] == 1)); + if (defined($stop_on_error) && 0 == $stop_on_error) { + _count_message('error', $errName); + warn_handler("($errName) $msg\n$ignoreOpt", 1); + return; + } + _count_message('error', $errName); + die_handler("($errName) $msg\n$ignoreOpt"); + } + # only tell the user how to suppress this on the first occurrence + my $ignoreOpt = + "\t(use \"$tool_name --ignore-errors $errName,$errName ...\" to suppress this warning)\n"; + $ignoreOpt = '' + if ($lcovutil::in_child_process || + !($lcovutil::verbose || $message_count[$code] == 1)); + + if ($ignore[$code] > 1 || (defined($quiet) && $quiet)) { + _count_message('ignore', $errName); + } else { + _count_message('warning', $errName); + warn_handler("($errName) $msg\n$ignoreOpt", 0); + } +} + +sub ignorable_warning($$;$) +{ + my ($code, $msg, $quiet) = @_; + if ($lcovutil::treat_warning_as_error) { + ignorable_error($code, $msg, $quiet); + return; + } + die("undefined error code for '$msg'") unless defined($code); + + my $errName = "code_$code"; + $errName = $ERROR_NAME{$code} + if exists($ERROR_NAME{$code}); + if ($message_count[$code]++ >= $suppressAfter && + 0 < $suppressAfter) { + # warn that we are suppressing from here on - for the first skipped + # message of this type + warnSuppress($code, $errName); + _count_message('ignore', $errName); + return; + } + chomp($msg); # we insert the newline + if ($code >= scalar(@ignore) || + !$ignore[$code]) { + # only tell the user how to suppress this on the first occurrence + my $ignoreOpt = + "\t(use \"$tool_name --ignore-errors $errName,$errName ...\" to suppress this warning)\n"; + $ignoreOpt = '' + if ($lcovutil::in_child_process || + !($lcovutil::verbose || $message_count[$code] == 1)); + warn_handler("($errName) $msg\n$ignoreOpt", 0); + _count_message('warning', $errName); + } else { + _count_message('ignore', $errName); + } +} + +sub report_unknown_child +{ + my $child = shift; + # this can happen if the user loads a callback module which starts a chaild + # process when it is loaded or initialized and fails to wait for that child + # to finish. How it manifests is an orphan PID which is smaller (older) + # than any of the children that this parent actually scheduled + lcovutil::ignorable_error($lcovutil::ERROR_CHILD, + "found unknown process $child while waiting for parallel child:\n perhaps you forgot to close a process in your callback?" + ); +} + +sub report_fork_failure +{ + my ($when, $errcode, $failedAttempts) = @_; + if ($failedAttempts > $lcovutil::max_fork_fails) { + lcovutil::ignorable_error($lcovutil::ERROR_PARALLEL, + "$failedAttempts consecutive fork() failures: consider reduced parallelism or increase the max_fork_fails limit. See man(5) lcovrc." + ); + } + my $explain = explain_once('fork_fail', + ["\n\tUse '$tool_name --ignore_errors " . + $ERROR_NAME{$ERROR_FORK} . + "' to bypass error and retry.", + $ignore[$lcovutil::ERROR_FORK] == 0 + ]); + my $retry = + lcovutil::is_ignored($lcovutil::ERROR_FORK) ? ' (retrying)' : ''; + lcovutil::ignorable_error($lcovutil::ERROR_FORK, + "fork() syscall failed while trying to $when: " . + $errcode . $retry . $explain); + # if errors were ignored, then we wait for a while (in parent) + # before re-trying. + sleep($lcovutil::fork_fail_timeout); +} + +sub report_exit_status +{ + my ($errType, $message, $exitstatus, $prefix, $suffix) = @_; + my $status = $exitstatus >> 8; + my $signal = $exitstatus & 0xFF; + my $explain = + "$prefix " . + ($exitstatus ? "returned non-zero exit status $status" : 'failed') . + MessageContext::context(); + if ($signal) { + $explain = + "$prefix died died due to signal $signal (SIG" . + (split(' ', $Config{sig_name}))[$signal] . + ')' . MessageContext::context() . + ': possibly killed by OS due to out-of-memory'; + $explain .= + lcovutil::explain_once('out_of_memory', + ' - see --memory and --parallel options for throttling'); + } + ignorable_error($errType, "$message: $explain$suffix"); +} + +sub report_parallel_error +{ + my $operation = shift; + my $errno = shift; + my $pid = shift; + my $childstatus = shift; + my $msg = shift; + # kill all my remaining children so user doesn't see unexpected console + # messages from dangling children (who cannot open files because the + # temp directory has been deleted, and so forth) + kill(9, @_) if @_ && !is_ignored($errno); + report_exit_status($errno, "$operation: '$msg'", + $childstatus, "child $pid", + " (try removing the '--parallel' option)"); +} + +sub report_format_error($$$$) +{ + my ($errType, $countType, $count, $obj) = @_; + my $context = MessageContext::context(); + my $explain = + explain_once( + 'err_negative', + ["\n\tPerhaps you need to compile with '-fprofile-update=atomic'.", + ($lcovutil::ERROR_NEGATIVE == $errType && + 'geninfo' eq $lcovutil::tool_name) + ]); + my $errStr = + $lcovutil::ERROR_NEGATIVE == $errType ? 'negative' : + ($lcovutil::ERROR_FORMAT == $errType ? 'non-integer' : 'excessive'); + lcovutil::ignorable_error($errType, + "Unexpected $errStr $countType count '$count' for $obj$context.$explain" + ); +} + +sub check_parent_process +{ + die("must call from child process") unless $lcovutil::in_child_process; + # if parent PID changed to 1 (init) - then my parent went away so + # I should exit now + # for reasons which are unclear to me: the PPID is sometimes unchanged + # after the parent process dies - to also check if we can send it a signal + my $ppid = getppid(); + lcovutil::info(2, "check_parent_process($$) = $ppid\n"); + if (1 == getppid() || + 1 != kill(0, $ppid)) { + lcovutil::ignorable_error($lcovutil::ERROR_PARENT, + "parent process died during '--parallel' execution - child $$ cannot continue." + ); + exit(0); + } +} + +sub is_filter_enabled +{ + # return true of there is an opportunity for filtering + return (grep({ defined($_) } @lcovutil::cov_filter) || + 0 != scalar(@lcovutil::omit_line_patterns) || + 0 != scalar(@lcovutil::exclude_function_patterns)); +} + +sub init_filters +{ + # initialize filter index numbers and mark that all filters are disabled. + my $idx = 0; + foreach my $item (sort keys(%COVERAGE_FILTERS)) { + my $ref = $COVERAGE_FILTERS{$item}; + $COVERAGE_FILTERS{$item} = $idx; + $$ref = $idx; + $cov_filter[$idx++] = undef; + } +} + +sub parse_cov_filters(@) +{ + my @filters = split($split_char, join($split_char, @_)); + + goto final if (!@filters); + + foreach my $item (@filters) { + die("unknown argument for --filter: '$item'\n") + unless exists($COVERAGE_FILTERS{lc($item)}); + my $item_id = $COVERAGE_FILTERS{lc($item)}; + + $cov_filter[$item_id] = [$item, 0, 0]; + } + if ($cov_filter[$FILTER_LINE]) { + # when line filtering is enabled, turn on brace and blank filtering as well + # (backward compatibility) + $cov_filter[$FILTER_LINE_CLOSE_BRACE] = ['brace', 0, 0]; + $cov_filter[$FILTER_BLANK_LINE] = ['blank', 0, 0]; + } + if ((defined($cov_filter[$FILTER_BRANCH_NO_COND]) || + defined($cov_filter[$FILTER_EXCLUDE_BRANCH])) && + !($br_coverage || $mcdc_coverage) + ) { + lcovutil::ignorable_warning($ERROR_USAGE, + "branch filter enabled but neither branch or condition coverage is enabled" + ); + } + lcovutil::ignorable_warning($ERROR_USAGE, + "'mcdc' filter enabled but MC/DC coverage is not enabled.") + if (defined($cov_filter[$FILTER_MCDC_SINGLE]) && + !$mcdc_coverage); + if ($cov_filter[$FILTER_BRANCH_NO_COND]) { + # turn on exception and orphan filtering too + $cov_filter[$FILTER_EXCEPTION_BRANCH] = ['exception', 0, 0]; + $cov_filter[$FILTER_ORPHAN_BRANCH] = ['orphan', 0, 0]; + } + final: + if (@lcovutil::omit_line_patterns) { + $lcovutil::FILTER_OMIT_PATTERNS = scalar(@lcovutil::cov_filter); + push(@lcovutil::cov_filter, ['omit_lines', 0, 0]); + $lcovutil::COVERAGE_FILTERS{'omit_lines'} = + $lcovutil::FILTER_OMIT_PATTERNS; + } +} + +sub summarize_cov_filters +{ + # use verbosity level -1: so print unless user says "-q -q"...really quiet + + my $leader = "Filter suppressions:\n"; + for my $key (keys(%COVERAGE_FILTERS)) { + my $id = $COVERAGE_FILTERS{$key}; + next unless defined($lcovutil::cov_filter[$id]); + my $histogram = $lcovutil::cov_filter[$id]; + next if 0 == $histogram->[-2]; + my $points = ''; + if ($histogram->[-2] != $histogram->[-1]) { + $points = ' ' . $histogram->[-1] . ' coverpoint' . + ($histogram->[-1] > 1 ? 's' : '') . "\n"; + } + info(-1, + "$leader $key:\n " . $histogram->[-2] . " instance" . + ($histogram->[-2] > 1 ? "s" : "") . "\n" . $points); + $leader = ''; + } + foreach my $q (['omit-lines', 'line', \@omit_line_patterns], + ['erase-functions', 'function', \@exclude_function_patterns]) { + my ($opt, $type, $patterns) = @$q; + my $patternCount = scalar(@$patterns); + if ($patternCount) { + my $omitCount = 0; + foreach my $p (@$patterns) { + $omitCount += $p->[-1]; + } + info(-1, + "Omitted %d total $type%s matching %d '--$opt' pattern%s\n", + $omitCount, + $omitCount == 1 ? '' : 's', + $patternCount, + $patternCount == 1 ? '' : 's'); + } + } +} + +sub disable_cov_filters +{ + # disable but return current status - so they can be re-enabled + my @filters = @lcovutil::cov_filter; + foreach my $f (@lcovutil::cov_filter) { + $f = undef; + } + my @omit = @lcovutil::omit_line_patterns; + @lcovutil::omit_line_patterns = (); + my @erase = @lcovutil::exclude_function_patterns; + @lcovutil::exclude_function_patterns = (); + return [\@filters, \@omit, \@erase]; +} + +sub reenable_cov_filters +{ + my $data = shift; + my $filters = $data->[0]; + # re-enable in the same order + for (my $i = 0; $i < scalar(@$filters); $i++) { + $cov_filter[$i] = $filters->[$i]; + } + @lcovutil::omit_line_patterns = @{$data->[1]}; + @lcovutil::exclude_function_patterns = @{$data->[2]}; +} + +sub filterStringsAndComments +{ + my $src_line = shift; + + # remove compiler directives + $src_line =~ s/^\s*#.*$//g; + # remove comments + $src_line =~ s#(/\*.*?\*/|//.*$)##g; + # remove strings + $src_line =~ s/\\"//g; + $src_line =~ s/"[^"]*"//g; + + return $src_line; +} + +sub simplifyCode +{ + my $src_line = shift; + + # remove comments + $src_line = filterStringsAndComments($src_line); + # remove some keywords.. + $src_line =~ s/\b(const|volatile|typename)\b//g; + #collapse nested class names + # remove things that look like template names + my $id = '(::)?\w+\s*(::\s*\w+\s*)*'; + while (1) { + my $current = $src_line; + $src_line =~ s/<\s*${id}(,\s*${id})*([*&]\s*)?>//g; + last if $src_line eq $current; + } + # remove ref and pointer decl + $src_line =~ s/^\s*$id[&*]\s*($id)/$3/g; + # cast which contains optional location spec + my $cast = "\\s*${id}(\\s+$id)?[*&]\\s*"; + # C-style cast - with optional location spec + $src_line =~ s/\($cast\)//g; + $src_line =~ s/\b(reinterpret|dynamic|const)_cast<$cast>//g; + # remove addressOf that follows an open paren or a comma + #$src_line =~ s/([(,])\s*[&*]\s*($id)/$1 $2/g; + + # remove some characters which might look like conditionals + $src_line =~ s/(->|>>|<<|::)//g; + + return $src_line; +} + +sub balancedParens +{ + my $line = shift; + + my $open = 0; + my $close = 0; + + foreach my $char (split('', $line)) { + if ($char eq '(') { + ++$open; + } elsif ($char eq ')') { + ++$close; + } + } + return ($open == $close || + # lambda code may have trailing parens after the function... + ($close > $open && $line =~ /{lambda\(/) + ); # this is a C++-specific check +} + +# +# is_external(filename) +# +# Determine if a file is located outside of the specified data directories. +# + +sub is_external($) +{ + my $filename = shift; + + # nothing is 'external' unless the user has requested "--no-external" + return 0 unless (defined($opt_no_external) && $opt_no_external); + + foreach my $dir (@internal_dirs) { + return 0 + if (($lcovutil::case_insensitive && $filename =~ /^\Q$dir\E/i) || + (!$lcovutil::case_insensitive && $filename =~ /^\Q$dir\E/)); + } + return 1; +} + +# +# rate(hit, found[, suffix, precision, width]) +# +# Return the coverage rate [0..100] for HIT and FOUND values. 0 is only +# returned when HIT is 0. 100 is only returned when HIT equals FOUND. +# PRECISION specifies the precision of the result. SUFFIX defines a +# string that is appended to the result if FOUND is non-zero. Spaces +# are added to the start of the resulting string until it is at least WIDTH +# characters wide. +# + +sub rate($$;$$$) +{ + my ($hit, $found, $suffix, $precision, $width) = @_; + + # Assign defaults if necessary + $precision = $default_precision + if (!defined($precision)); + $suffix = "" if (!defined($suffix)); + $width = 0 if (!defined($width)); + + return sprintf("%*s", $width, "-") if (!defined($found) || $found == 0); + my $rate = sprintf("%.*f", $precision, $hit * 100 / $found); + + # Adjust rates if necessary + if ($rate == 0 && $hit > 0) { + $rate = sprintf("%.*f", $precision, 1 / 10**$precision); + } elsif ($rate == 100 && $hit != $found) { + $rate = sprintf("%.*f", $precision, 100 - 1 / 10**$precision); + } + + return sprintf("%*s", $width, $rate . $suffix); +} + +# +# get_overall_line(found, hit, type) +# +# Return a string containing overall information for the specified +# found/hit data. +# + +sub get_overall_line($$$) +{ + my ($found, $hit, $name) = @_; + return "no data found" if (!defined($found) || $found == 0); + + my $plural = + ($found == 1) ? "" : (('ch' eq substr($name, -2, 2)) ? 'es' : 's'); + + return lcovutil::rate($hit, $found, "% ($hit of $found $name$plural)"); +} + +# Make sure precision is within valid range [1:4] +sub check_precision() +{ + die("specified precision is out of range (1 to 4)\n") + if ($default_precision < 1 || $default_precision > 4); +} + +# use vanilla color palette. +sub use_vanilla_color() +{ + for my $tla (('CBC', 'GNC', 'GIC', 'GBC')) { + $lcovutil::tlaColor{$tla} = "#CAD7FE"; + $lcovutil::tlaTextColor{$tla} = "#98A0AA"; + } + for my $tla (('UBC', 'UNC', 'UIC', 'LBC')) { + $lcovutil::tlaColor{$tla} = "#FF6230"; + $lcovutil::tlaTextColor{$tla} = "#AA4020"; + } + for my $tla (('EUB', 'ECB')) { + $lcovutil::tlaColor{$tla} = "#FFFFFF"; + $lcovutil::tlaTextColor{$tla} = "#AAAAAA"; + } +} + +my $didFirstExistenceCheck; + +sub fileExistenceBeforeCallbackError +{ + my $filename = shift; + if ($lcovutil::check_file_existence_before_callback && + !-e $filename) { + + my $explanation = + $didFirstExistenceCheck ? '' : + ' Use \'check_existence_before_callback = 0\' config file option to remove this check.'; + lcovutil::ignorable_error($lcovutil::ERROR_SOURCE, + "\"$filename\" does not exist." . $explanation); + $didFirstExistenceCheck = 1; + return 1; + } + return 0; +} + +# figure out what file version we see +sub extractFileVersion +{ + my $filename = shift; + + return undef + unless $versionCallback; + return $versionCache{$filename} if exists($versionCache{$filename}); + + return undef if fileExistenceBeforeCallbackError($filename); + + my $start = Time::HiRes::gettimeofday(); + my $version; + eval { $version = $versionCallback->extract_version($filename); }; + if ($@) { + my $context = MessageContext::context(); + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "extract_version($filename) failed$context: $@"); + } + my $end = Time::HiRes::gettimeofday(); + if (exists($lcovutil::profileData{version}) && + exists($lcovutil::profileData{version}{$filename})) { + $lcovutil::profileData{version}{$filename} += $end - $start; + } else { + $lcovutil::profileData{version}{$filename} = $end - $start; + } + $versionCache{$filename} = $version; + return $version; +} + +sub checkVersionMatch +{ + my ($filename, $me, $you, $reason, $silent) = @_; + + return 1 + if defined($me) && defined($you) && $me eq $you; # simple string compare + + if ($versionCallback) { + # work harder + my $status; + eval { + $status = $versionCallback->compare_version($you, $me, $filename); + }; + if ($@) { + my $context = MessageContext::context(); + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "compare_version($you, $me, $filename) failed$context: $@"); + $status = 1; + } + lcovutil::info(1, "compare_version: $status\n"); + return 1 unless $status; # match if return code was zero + } + lcovutil::ignorable_error($ERROR_VERSION, + (defined($reason) ? ($reason . ' ') : '') . + "$filename: revision control version mismatch: " . + (defined($me) ? $me : 'undef') . + ' <- ' . (defined($you) ? $you : 'undef')) + unless $silent; + # claim mismatch unless $me and $you are both undef + return !(defined($me) || defined($you)); +} + +# +# parse_w3cdtf(date_string) +# +# Parse date string in W3CDTF format into DateTime object. +# +my $have_w3cdtf; + +sub parse_w3cdtf($) +{ + if (!defined($have_w3cdtf)) { + # check to see if the package is here for us to use.. + $have_w3cdtf = 1; + eval { + require DateTime::Format::W3CDTF; + DateTime::Format::W3CDTF->import(); + }; + if ($@) { + # package not there - fall back + lcovutil::ignorable_warning($lcovutil::ERROR_PACKAGE, + 'package DateTime::Format::W3CDTF is not available - falling back to local implementation' + ); + $have_w3cdtf = 0; + } + } + my $str = shift; + if ($have_w3cdtf) { + return DateTime::Format::W3CDTF->parse_datetime($str); + } + + my ($year, $month, $day, $hour, $min, $sec, $ns, $tz) = + (0, 1, 1, 0, 0, 0, 0, "Z"); + + if ($str =~ /^(\d\d\d\d)$/) { + # YYYY + $year = $1; + } elsif ($str =~ /^(\d\d\d\d)-(\d\d)$/) { + # YYYY-MM + $year = $1; + $month = $2; + } elsif ($str =~ /^(\d\d\d\d)-(\d\d)-(\d\d)$/) { + # YYYY-MM-DD + $year = $1; + $month = $2; + $day = $3; + } elsif ( + $str =~ /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d)(Z|[+-]\d\d:\d\d)?$/) { + # YYYY-MM-DDThh:mmTZD + $year = $1; + $month = $2; + $day = $3; + $hour = $4; + $min = $5; + $tz = $6 if defined($6); + } elsif ($str =~ + /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)(Z|[+-]\d\d:\d\d)?$/) { + # YYYY-MM-DDThh:mm:ssTZD + $year = $1; + $month = $2; + $day = $3; + $hour = $4; + $min = $5; + $sec = $6; + $tz = $7 if (defined($7)); + } elsif ($str =~ + /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)\.(\d+)(Z|[+-]\d\d:\d\d)?$/ + ) { + # YYYY-MM-DDThh:mm:ss.sTZD + $year = $1; + $month = $2; + $day = $3; + $hour = $4; + $min = $5; + $sec = $6; + $ns = substr($7 . "00000000", 0, 9); + $tz = $8 if (defined($8)); + } else { + die("Invalid W3CDTF date format: $str\n"); + } + + return + DateTime->new(year => $year, + month => $month, + day => $day, + hour => $hour, + minute => $min, + second => $sec, + nanosecond => $ns, + time_zone => $tz,); +} + +package HTML_fileData; + +use constant { + NAME => 0, + PARENT => 1, + HREFS => 2, + ANCHORS => 3, +}; + +sub new +{ + my ($class, $parentDir, $filename) = @_; + + my $self = [$parentDir, $filename, [], {}]; + + my $name = File::Spec->catfile($parentDir, $filename); + + open(HTML, '<', $name) or die("unable to open $name: $!"); + while () { + if (/<(a|span) .*id=\"([^\"]+)\"/) { + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "\"$name\":$.: duplicate anchor '$2' original at " . + $self->[ANCHORS]->{$2} . '.') + if exists($self->[ANCHORS]->{$2}); + $self->[ANCHORS]->{$2} = $.; + } elsif (/[HREFS]}, [$., $1, $3]); # lineNo, filename, anchor + } elsif (/[HREFS]}, [$., $1, $3]); # lineNo, filename, anchor + } + } + close(HTML) or die("unable to close $name: $!"); + + return bless $self, $class; +} + +sub verifyAnchor +{ + my ($self, $anchor) = @_; + + return exists($self->[ANCHORS]->{$anchor}); +} + +sub hrefs +{ + my $self = shift; + return $self->[HREFS]; +} + +package ValidateHTML; + +sub new +{ + my ($class, $topDir, $htmlExt) = @_; + my $self = {}; + + $htmlExt = '.html' unless defined($htmlExt); + + my @dirstack = ($topDir); + my %visited; + while (@dirstack) { + my $top = pop(@dirstack); + die("unexpected link $top") if -l $top; + opendir(my $dh, $top) or die("can't open directory $top: $!"); + while (my $e = readdir($dh)) { + next if $e eq '.' || $e eq '..'; + my $p = File::Spec->catfile($top, $e); + die("unexpected link $p") if -l $p; + if (-d $p) { + die("already visited $p") if exists($visited{$p}); + $visited{$p} = [$top, $e]; + push(@dirstack, $p); + } elsif (-f $p && + $p =~ /.+$htmlExt$/) { + die("duplicate file $p??") if exists($self->{$p}); + lcovutil::info(1, "schedule $p\n"); + $self->{$p} = HTML_fileData->new($top, $e); + } + } + closedir($dh); + } + my %fileReferred; + while (my ($filename, $data) = each(%$self)) { + my $dir = File::Basename::dirname($filename); + lcovutil::info(1, "verify $filename:\n"); + foreach my $href (@{$data->hrefs()}) { + my ($lineNo, $link, $anchor) = @$href; + my $path = File::Spec->catfile($dir, $link); + $path = File::Spec->abs2rel(Cwd::realpath($path), $main::cwd) + unless exists($self->{$path}); + lcovutil::info(1, + " $lineNo: $link" . ($anchor ? "#$anchor" : '') . "\n"); + unless (exists($self->{$path})) { + lcovutil::ignorable_error($lcovutil::ERROR_PATH, + "\"$filename\":$lineNo: non-existent file '$link'."); + next; + } + if (exists($fileReferred{$path})) { + # keep only one use + push(@{$fileReferred{$path}}, $filename) + if ($fileReferred{$path}->[-1] ne $filename); + } else { + $fileReferred{$path} = [$filename]; + } + + if (defined($anchor)) { + my $a = $self->{$path}; + unless ($a->verifyAnchor($anchor)) { + lcovutil::ignorable_error($lcovutil::ERROR_PATH, + "\"$filename\":$lineNo: \"$link#$anchor\" doesn't point to valid anchor." + ); + } + } + } + } + + while (my ($filename, $data) = each(%$self)) { + lcovutil::ignorable_error($lcovutil::ERROR_UNUSED, + "HTML file \"$filename\" is not referenced.") + unless (exists($fileReferred{$filename}) || + ($topDir eq File::Basename::dirname($filename) && + "index$htmlExt" eq File::Basename::basename($filename))); + } + return bless $self, $class; +} + +package CoverageCriteria; + +our @coverageCriteriaScript; +our $criteriaCallback; +our %coverageCriteria; # hash of name->(type, success 0/1, string) +our $coverageCriteriaStatus = 0; # set to non-zero if we see any errors +our @criteriaCallbackTypes; # include date, owner bin info +our @criteriaCallbackLevels; # call back at (top, directory, file) levels + +sub executeCallback +{ + my ($type, $name, $data) = @_; + + my ($status, $msgs); + eval { + ($status, $msgs) = + $criteriaCallback->check_criteria($name, $type, $data); + }; + if ($@) { + my $context = MessageContext::context(); + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "check_criteria failed$context: $@"); + $status = 2; + $msgs = [$@]; + } + + $coverageCriteria{$name} = [$type, $status, $msgs] + if (0 != $status || + (defined $msgs && + 0 != scalar(@$msgs))); + $coverageCriteriaStatus = $status + if $status != 0; +} + +sub check_failUnder +{ + my $info = shift; + my $msg = $info->check_fail_under_criteria(); + if ($msg) { + $coverageCriteriaStatus |= 1; + $coverageCriteria{'top'} = ['top', 1, [$msg]]; + } +} + +sub summarize +{ + # print the criteria summary to stdout: + # all criteria fails + any non-empty messages + # In addition: print fails to stderr + # This way: Jenkins script can log failure if stderr is not empty + my $leader = ''; + if ($coverageCriteriaStatus != 0) { + print("Failed coverage criteria:\n"); + } else { + $leader = "Coverage criteria:\n"; + } + # sort to print top-level report first, then directories, then files. + foreach my $name (sort({ + my $da = $coverageCriteria{$a}; + my $db = $coverageCriteria{$b}; + my $ta = $da->[0]; + my $tb = $db->[0]; + return -1 if ($ta eq 'top'); + return 1 if ($tb eq 'top'); + if ($ta ne $tb) { + return $ta eq 'file' ? 1 : -1; + } + $a cmp $b + } + keys(%coverageCriteria)) + ) { + my $criteria = $coverageCriteria{$name}; + my $v = $criteria->[1]; + next if (!$v || $v == 0) && 0 == scalar(@{$criteria->[2]}); # passed + + my $msg = $criteria->[0]; + if ($criteria->[0] ne 'top') { + $msg .= " \"" . $name . "\""; + } + $msg .= ": \"" . join(' ', @{$criteria->[2]}) . "\"\n"; + print($leader); + $leader = ''; + print(" " . $msg); + if (0 != $criteria->[1]) { + print(STDERR $msg); + } + } +} + +package MessageContext; + +our @message_context; + +sub new +{ + my ($class, $str) = @_; + push(@message_context, $str); + my $self = [$str]; + return bless $self, $class; +} + +sub context +{ + my $context = join(' while ', @message_context); + $context = ' while ' . $context if $context; + return $context; +} + +sub DESTROY +{ + my $self = shift; + die('unbalanced context "' . $self->[0] . '" not head of ("' . + join('" "', @message_context) . '")') + unless scalar(@message_context) && $self->[0] eq $message_context[-1]; + pop(@message_context); +} + +package PipeHelper; + +sub new +{ + my $class = shift; + my $reason = shift; + + # backward compatibility: see if the arguments were passed in a + # one long string + my $args = \@_; + my $arglen = 'criteria' eq $reason ? 4 : 2; + if ($arglen == scalar(@_) && !-e $_[0]) { + # two arguments: a string (which seems not to be executable) and the + # file we are acting on + # After next release, issue 'deprecated' warning here. + my @args = split(' ', $_[0]); + push(@args, splice(@_, 1)); # append the rest of the args + $args = \@args; + } + + my $self = [$reason, join(' ', @$args)]; + bless $self, $class; + if (open(PIPE, "-|", @$args)) { + push(@$self, \*PIPE); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "$reason: 'open(-| " . $self->[1] . ")' failed: \"$!\""); + return undef; + } + return $self; +} + +sub next +{ + my $self = shift; + die("no handle") unless scalar(@$self) == 3; + my $hdl = $self->[2]; + return scalar <$hdl>; +} + +sub close +{ + # close pipe and return exit status + my ($self, $checkError) = @_; + close($self->[2]); + if (0 != $? && $checkError) { + # $reason: $cmd returned non-zero exit... + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + $self->[0] . ' \'' . $self->[1] . + "\' returned non-zero exit code: '$!'"); + } + pop(@$self); + return $?; +} + +sub DESTROY +{ + my $self = shift; + # FD can be undef if 'open' failed for any reason (e.g., filesystem issues) + # otherwise: don't close if FD was STDIN or STDOUT + CORE::close($self->[2]) + if 3 == scalar(@$self); +} + +package ScriptCaller; + +sub new +{ + my $class = shift; + my $self = [@_]; + return bless $self, $class; +} + +sub call +{ + my ($self, $reason, @args) = @_; + my $cmd = join(' ', @$self) . ' ' . join(' ', @args); + lcovutil::info(1, "$reason: \"$cmd\"\n"); + my $rtn = `$cmd`; + return $?; +} + +sub pipe +{ + my $self = shift; + my $reason = shift; + return PipeHelper->new($reason, @$self, @_); +} + +sub context +{ + my $self = shift; + lcovutil::info(1, 'context ' . join(' ', @$self) . "\n"); + my $iter = $self->pipe('context'); + return unless defined($iter); + my %context; + while (my $line = $iter->next()) { + chomp($line); + $line =~ s/\r//g; # remove CR from line-end + # first word on line is the key.. + my ($key, $value) = split(/ +/, $line, 2); + if (exists($context{key})) { + $context{key} .= "\n" . $value; + } else { + $context{key} = $value; + } + } + my $status = $iter->close(1); # check error return + + return \%context; +} + +sub extract_version +{ + my ($self, $filename) = @_; + my $version; + my $pipe = $self->pipe('extract_version', $filename); + if (defined $pipe && + ($version = $pipe->next())) { + chomp($version); + $version =~ s/\r//; + lcovutil::info(1, " version: $version\n"); + } + return $version; +} + +sub resolve +{ + my ($self, $filename) = @_; + my $path; + my $pipe = $self->pipe('resolve_filename', $filename); + if ($pipe && + ($path = $pipe->next())) { + chomp($path); + $path =~ s/\r//; + lcovutil::info(1, " resolve: $path\n"); + } + return $path; +} + +sub compare_version +{ + my ($self, $yours, $mine, $file) = @_; + return + $self->call('compare_version', '--compare', + "'$yours'", "'$mine'", + "'$file'"); +} + +# annotate callback is passed filename (as munged) - +# should return reference to array of line data, +# line data of the form list of: +# source_text: the content on that line +# abbreviated author name: (must be set to something - possibly NONE +# full author name: some string or undef +# date string: when this line was last changed +# commit ID: something meaningful to you +sub annotate +{ + my ($self, $filename) = @_; + lcovutil::info(1, 'annotate ' . join(' ', @$self) . ' ' . $filename . "\n"); + my $iter = $self->pipe('annotate', $filename); + return unless defined($iter); + my @lines; + while (my $line = $iter->next()) { + chomp $line; + $line =~ s/\r//g; # remove CR from line-end + + my ($commit, $author, $when, $text) = split(/\|/, $line, 4); + # semicolon is not a legal character in email address - + # so we use that to delimit the 'abbreviated name' and + # the 'full name' - in case they are different. + # this is an attempt to be backward-compatible with + # existing annotation scripts which return only one name + my ($abbrev, $full) = split(/;/, $author, 2); + push(@lines, [$text, $abbrev, $full, $when, $commit]); + } + my $status = $iter->close(); + + return ($status, \@lines); +} + +sub check_criteria +{ + my ($self, $name, $type, $data) = @_; + + my $iter = + $self->pipe('criteria', $name, $type, JsonSupport::encode($data)); + return (0) unless $iter; # constructor will have given error message + my @messages; + while (my $line = $iter->next()) { + chomp $line; + $line =~ s/\r//g; # remove CR from line-end + next if '' eq $line; + push(@messages, $line); + } + return ($iter->close(), \@messages); +} + +sub select +{ + my ($self, $lineData, $annotateData, $filename, $lineNo) = @_; + + my @params = ('select', + defined($lineData) ? + JsonSupport::encode($lineData->to_list()) : '', + defined($annotateData) ? + JsonSupport::encode($annotateData->to_list()) : '', + $filename, + $lineNo); + return $self->call(@params); +} + +sub simplify +{ + my ($self, $func) = @_; + + my $name; + my $pipe = $self->pipe('simplify', $func); + die("broken 'simplify' callback") + unless ($pipe && + ($name = $pipe->next())); + + chomp($name); + $name =~ s/\r//; + lcovutil::info(1, " simplify: $name\n"); + return $name; +} + +package JsonSupport; + +our $rc_json_module = 'auto'; + +our $did_init; + +# +# load_json_module(rc) +# +# If RC is "auto", load best available JSON module from a list of alternatives, +# otherwise load the module specified by RC. +# +sub load_json_module($) +{ + my ($rc) = shift; + # List of alternative JSON modules to try + my @alternatives = ("JSON::XS", # Fast, but not always installed + "Cpanel::JSON::XS", # Fast, a more recent fork + "JSON::PP", # Slow, part of core-modules + "JSON", # Not available in all distributions + ); + + # Determine JSON module + if (lc($rc) eq "auto") { + for my $m (@alternatives) { + if (Module::Load::Conditional::check_install(module => $m)) { + $did_init = $m; + last; + } + } + + if (!defined($did_init)) { + die("No Perl JSON module found on your system. Please install of of the following supported modules: " + . join(" ", @alternatives) + . " - for example (as root):\n \$ perl -MCPAN -e 'install " + . $alternatives[0] + . "'\n"); + } + } else { + $did_init = $rc; + } + + eval "use $did_init qw(encode_json decode_json);"; + if ($@) { + die("Module is not installed: " . "'$did_init':$@\n"); + } + lcovutil::info(1, "Using JSON module $did_init\n"); + my ($index) = + grep { $alternatives[$_] eq $did_init } (0 .. @alternatives - 1); + warn( + "using JSON module \"$did_init\" - which is much slower than some alternatives. Consider installing one of " + . join(" or ", @alternatives[0 .. $index - 1])) + if (defined($index) && $index > 1); +} + +sub encode($) +{ + my $data = shift; + + load_json_module($rc_json_module) + unless defined($did_init); + + return encode_json($data); +} + +sub decode($) +{ + my $text = shift; + load_json_module($rc_json_module) + unless defined($did_init); + + return decode_json($text); +} + +sub load($) +{ + my $filename = shift; + my $f = InOutFile->in($filename); + my $h = $f->hdl(); + my @lines = <$h>; + return decode(join("\n", @lines)); +} + +package InOutFile; + +our $checkedGzipAvail; + +sub checkGzip +{ + # Check for availability of GZIP tool + lcovutil::system_no_output(1, "gzip", "-h") and + die("gzip command not available!\n"); + $checkedGzipAvail = 1; +} + +sub out +{ + my ($class, $f, $mode, $demangle) = @_; + $demangle = 0 unless defined($demangle); + + my $self = [undef, $f]; + bless $self, $class; + my $m = (defined($mode) && $mode eq 'append') ? ">>" : ">"; + + if (!defined($f) || + '-' eq $f) { + if ($demangle) { + open(HANDLE, '|-', $lcovutil::demangle_cpp_cmd) or + die("unable to demangle: $!\n"); + $self->[0] = \*HANDLE; + } else { + $self->[0] = \*STDOUT; + } + } else { + my $cmd = $demangle ? "$lcovutil::demangle_cpp_cmd " : ''; + if ($f =~ /\.gz$/) { + checkGzip() + unless defined($checkedGzipAvail); + $cmd .= '| ' if $cmd; + # Open compressed file + $cmd .= "gzip -c $m'$f'"; + open(HANDLE, "|-", $cmd) or + die("cannot start gzip to compress to file $f: $!\n"); + } else { + if ($demangle) { + $cmd .= "$m '$f'"; + } else { + $cmd .= $f; + } + open(HANDLE, $demangle ? '|-' : $m, $cmd) or + die("cannot write to $f: $!\n"); + } + $self->[0] = \*HANDLE; + } + return $self; +} + +sub in +{ + my ($class, $f, $demangle) = @_; + $demangle = 0 unless defined($demangle); + + my $self = [undef, $f]; + bless $self, $class; + + if (!defined($f) || + '-' eq $f) { + $self->[0] = \*STDIN; + } else { + if ($f =~ /\.gz$/) { + + checkGzip() + unless defined($checkedGzipAvail); + + die("file '$f' does not exist\n") + unless -f $f; + die("'$f': unsupported empty gzipped file\n") + if (-z $f); + # Check integrity of compressed file - fails for zero size file + lcovutil::system_no_output(1, "gzip", "-dt", $f) and + die("integrity check failed for compressed file $f!\n"); + + # Open compressed file + my $cmd = "gzip -cd '$f'"; + $cmd .= " | " . $lcovutil::demangle_cpp_cmd + if ($demangle); + open(HANDLE, "-|", $cmd) or + die("cannot start gunzip to decompress file $f: $!\n"); + + } elsif ($demangle && + defined($lcovutil::demangle_cpp_cmd)) { + open(HANDLE, "-|", "cat '$f' | $lcovutil::demangle_cpp_cmd") or + die("cannot start demangler for file $f: $!\n"); + } else { + # Open decompressed file + open(HANDLE, "<", $f) or + die("cannot read file $f: $!\n"); + } + $self->[0] = \*HANDLE; + } + return $self; +} + +sub DESTROY +{ + my $self = shift; + # FD can be undef if 'open' failed for any reason (e.g., filesystem issues) + # otherwise: don't close if FD was STDIN or STDOUT + close($self->[0]) + unless !defined($self->[1]) || + '-' eq $self->[1] || + !defined($self->[0]); +} + +sub hdl +{ + my $self = shift; + return $self->[0]; +} + +package SearchPath; + +sub new +{ + my $class = shift; + my $option = shift; + my $self = []; + bless $self, $class; + foreach my $p (@_) { + if (-d $p) { + push(@$self, [$p, 0]); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_PATH, + "$option '$p' is not a directory"); + } + } + return $self; +} + +sub patterns +{ + my $self = shift; + return $self; +} + +sub resolve +{ + my ($self, $filename, $applySubstitutions) = @_; + $filename = lcovutil::subst_file_name($filename) if $applySubstitutions; + return $filename if -e $filename; + if (!File::Spec->file_name_is_absolute($filename)) { + foreach my $d (@$self) { + my $path = File::Spec->catfile($d->[0], $filename); + if (-e $path) { + lcovutil::info(1, "found $filename at $path\n"); + ++$d->[1]; + return $path; + } + } + } + return resolveCallback($filename, 0); +} + +sub resolveCallback +{ + my ($filename, $applySubstitutions, $returnCbValue) = @_; + $filename = lcovutil::subst_file_name($filename) if $applySubstitutions; + + if ($lcovutil::resolveCallback) { + return $lcovutil::resolveCache{$filename} + if exists($lcovutil::resolveCache{$filename}); + my $start = Time::HiRes::gettimeofday(); + my $path; + eval { $path = $resolveCallback->resolve($filename); }; + if ($@) { + my $context = MessageContext::context(); + lcovutil::ignorable_error($lcovutil::ERROR_CALLBACK, + "resolve($filename) failed$context: $@"); + } + # look up particular path at most once... + $lcovutil::resolveCache{$filename} = $path if $path; + my $cost = Time::HiRes::gettimeofday() - $start; + if (!$returnCbValue) { + $path = $filename unless $path; + } + my $p = $path ? $path : $filename; + if (exists($lcovutil::profileData{resolve}) && + exists($lcovutil::profileData{resolve}{$p})) { + # might see multiple aliases for the same source file + $lcovutil::profileData{resolve}{$p} += $cost; + } else { + $lcovutil::profileData{resolve}{$p} = $cost; + } + return $path; + } + return $filename; +} + +sub warn_unused +{ + my ($self, $optName) = @_; + foreach my $d (@$self) { + my $name = $d->[0]; + $name = "'$name'" if $name =~ /\s/; + if (0 == $d->[1]) { + lcovutil::ignorable_error($lcovutil::ERROR_UNUSED, + "\"$optName $name\" is unused."); + } else { + lcovutil::info(1, + "\"$optName $name\" used " . $d->[1] . " times\n"); + } + } +} + +sub reset +{ + my $self = shift; + foreach my $d (@$self) { + $d->[1] = 0; + } +} + +sub current_count +{ + my $self = shift; + my @rtn; + foreach my $d (@$self) { + push(@rtn, $d->[1]); + } + return \@rtn; +} + +sub update_count +{ + my $self = shift; + die("invalid update count: " . scalar(@$self) . ' ' . scalar(@_)) + unless ($#$self == $#_); + foreach my $d (@$self) { + $d->[1] += shift; + } +} + +package MapData; + +sub new +{ + my $class = shift; + my $self = {}; + bless $self, $class; + + return $self; +} + +sub is_empty +{ + my $self = shift; + return 0 == scalar(keys %$self); +} + +sub append_if_unset +{ + my $self = shift; + my $key = shift; + my $data = shift; + + if (!defined($self->{$key})) { + $self->{$key} = $data; + } + return $self; +} + +sub replace +{ + my $self = shift; + my $key = shift; + my $data = shift; + + $self->{$key} = $data; + + return $self; +} + +sub value +{ + my $self = shift; + my $key = shift; + + if (!exists($self->{$key})) { + return undef; + } + + return $self->{$key}; +} + +sub remove +{ + my ($self, $key, $check_is_present) = @_; + + if (!defined($check_is_present) || exists($self->{$key})) { + delete $self->{$key}; + return 1; + } + return 0; +} + +sub mapped +{ + my $self = shift; + my $key = shift; + + return defined($self->{$key}) ? 1 : 0; +} + +sub keylist +{ + my $self = shift; + return keys(%$self); +} + +sub entries +{ + my $self = shift; + return scalar(keys(%$self)); +} + +# Class definitions +package CountData; + +our $UNSORTED = 0; +our $SORTED = 1; + +use constant { + HASH => 0, + SORTABLE => 1, + FOUND => 2, + HIT => 3, + FILENAME => 4, +}; + +sub new +{ + my $class = shift; + my $filename = shift; + my $sortable = defined($_[0]) ? shift : $UNSORTED; + my $self = [{}, + $sortable, + 0, # found + 0, # hit + $filename, # for error messaging + ]; + bless $self, $class; + + return $self; +} + +sub filename +{ + my $self = shift; + return $self->[FILENAME]; +} + +sub append +{ + # return 1 if we hit something new, 0 if not (count was already non-zero) + # using $suppressErrMsg to avoid reporting same thing for bot the + # 'testcase' entry and the 'summary' entry + my ($self, $key, $count, $suppressErrMsg) = @_; + my $changed = 0; # hit something new or not + + if (!Scalar::Util::looks_like_number($count)) { + lcovutil::report_format_error($lcovutil::ERROR_FORMAT, 'hit', $count, + 'line "' . $self->filename() . ":$key\"") + unless $suppressErrMsg; + $count = 0; + } elsif ($count < 0) { + lcovutil::report_format_error($lcovutil::ERROR_NEGATIVE, + 'hit', + $count, + 'line ' . $self->filename() . ":$key\"" + ) unless $suppressErrMsg; + $count = 0; + } elsif (defined($lcovutil::excessive_count_threshold) && + $count > $lcovutil::excessive_count_threshold) { + lcovutil::report_format_error($lcovutil::ERROR_EXCESSIVE_COUNT, + 'hit', + $count, + 'line ' . $self->filename() . ":$key\"" + ) unless $suppressErrMsg; + } + my $data = $self->[HASH]; + if (!exists($data->{$key})) { + $changed = 1; # something new - whether we hit it or not + $data->{$key} = $count; + ++$self->[FOUND]; # found + ++$self->[HIT] if ($count > 0); # hit + } else { + my $current = $data->{$key}; + if ($count > 0 && + $current == 0) { + ++$self->[HIT]; + $changed = 1; + } + $data->{$key} = $count + $current; + } + return $changed; +} + +sub value +{ + my $self = shift; + my $key = shift; + + my $data = $self->[HASH]; + if (!exists($data->{$key})) { + return undef; + } + return $data->{$key}; +} + +sub remove +{ + my ($self, $key, $check_if_present) = @_; + + my $data = $self->[HASH]; + if (!defined($check_if_present) || + exists($data->{$key})) { + + die("$key not found") + unless exists($data->{$key}); + --$self->[FOUND]; # found; + --$self->[HIT] # hit + if ($data->{$key} > 0); + + delete $data->{$key}; + return 1; + } + + return 0; +} + +sub found +{ + my $self = shift; + + return $self->[FOUND]; +} + +sub hit +{ + my $self = shift; + + return $self->[HIT]; +} + +sub keylist +{ + my $self = shift; + return keys(%{$self->[HASH]}); +} + +sub entries +{ + my $self = shift; + return scalar(keys(%{$self->[HASH]})); +} + +sub union +{ + my $self = shift; + my $info = shift; + + my $changed = 0; + foreach my $key ($info->keylist()) { + if ($self->append($key, $info->value($key))) { + $changed = 1; + } + } + return $changed; +} + +sub intersect +{ + my $self = shift; + my $you = shift; + my $changed = 0; + my $yourData = $you->[HASH]; + foreach my $key ($self->keylist()) { + if (exists($yourData->{$key})) { + # append your count to mine + if ($self->append($key, $you->value($key))) { + # returns true if appended count was not zero + $changed = 1; + } + } else { + $self->remove($key); + $changed = 1; + } + } + return $changed; +} + +sub difference +{ + my $self = shift; + my $you = shift; + my $changed = 0; + my $yourData = $you->[HASH]; + foreach my $key ($self->keylist()) { + if (exists($yourData->{$key})) { + $self->remove($key); + $changed = 1; + } + } + return $changed; +} + +# +# get_found_and_hit(hash) +# +# Return the count for entries (found) and entries with an execution count +# greater than zero (hit) in a hash (linenumber -> execution count) as +# a list (found, hit) +# +sub get_found_and_hit +{ + my $self = shift; + return ($self->[FOUND], $self->[HIT]); +} + +package BranchBlock; +# branch element: index, taken/not-taken count, optional expression +# for baseline or current data, 'taken' is just a number (or '-') +# for differential data: 'taken' is an array [$taken, tla] + +use constant { + ID => 0, + TAKEN => 1, + EXPR => 2, + EXCEPTION => 3, +}; + +sub new +{ + my ($class, $id, $taken, $expr, $is_exception) = @_; + # if branchID is not an expression - go back to legacy behaviour + my $self = [$id, $taken, + (defined($expr) && $expr eq $id) ? undef : $expr, + defined($is_exception) && $is_exception ? 1 : 0 + ]; + bless $self, $class; + my $c = $self->count(); + if (!Scalar::Util::looks_like_number($c)) { + lcovutil::report_format_error($lcovutil::ERROR_FORMAT, + 'taken', $c, 'branch ' . $self->id()); + $self->[TAKEN] = 0; + + } elsif ($c < 0) { + lcovutil::report_format_error($lcovutil::ERROR_NEGATIVE, + 'taken', $c, 'branch ' . $self->id()); + $self->[TAKEN] = 0; + } elsif (defined($lcovutil::excessive_count_threshold) && + $c > $lcovutil::excessive_count_threshold) { + lcovutil::report_format_error($lcovutil::ERROR_EXCESSIVE_COUNT, + 'taken', $c, 'branch ' . $self->id()); + } + return $self; +} + +sub isTaken +{ + my $self = shift; + return $self->[TAKEN] ne '-'; +} + +sub id +{ + my $self = shift; + return $self->[ID]; +} + +sub data +{ + my $self = shift; + return $self->[TAKEN]; +} + +sub count +{ + my $self = shift; + return $self->[TAKEN] eq '-' ? 0 : $self->[TAKEN]; +} + +sub expr +{ + my $self = shift; + return $self->[EXPR]; +} + +sub exprString +{ + my $self = shift; + my $e = $self->[EXPR]; + return defined($e) ? $e : 'undef'; +} + +sub is_exception +{ + my $self = shift; + return $self->[EXCEPTION]; +} + +sub merge +{ + # return 1 if something changed, 0 if nothing new covered or discovered + my ($self, $that, $filename, $line) = @_; + # should have called 'iscompatible' first + die('attempt to merge incompatible expressions for id' . + $self->id() . ', ' . $that->id() . + ": '" . $self->exprString() . "' -> '" . $that->exprString() . "'") + if ($self->exprString() ne $that->exprString()); + + if ($self->is_exception() != $that->is_exception()) { + my $loc = defined($filename) ? "\"$filename\":$line: " : ''; + lcovutil::ignorable_error($lcovutil::ERROR_MISMATCH, + "${loc}mismatched exception tag for id " . + $self->id() . ", " . $that->id() . + ": '" . $self->is_exception() . + "' -> '" . $that->is_exception() . "'"); + # set 'self' to 'not related to exception' - to give a consistent + # answer for the merge operation. Otherwise, we pick whatever + # was seen first - which is unpredictable during threaded execution. + $self->[EXCEPTION] = 0; + } + my $t = $that->[TAKEN]; + return 0 if $t eq '-'; # no new news + + my $count = $self->[TAKEN]; + my $changed; + if ($count ne '-') { + $count += $t; + $changed = $count == 0 && $t != 0; + } else { + $count = $t; + $changed = $t != 0; + } + $self->[TAKEN] = $count; + return $changed; +} + +package BranchEntry; +# hash of blockID -> array of BranchBlock refs for each sequential branch ID + +sub new +{ + my ($class, $line) = @_; + my $self = [$line, {}]; + bless $self, $class; + return $self; +} + +sub line +{ + my $self = shift; + return $self->[0]; +} + +sub hasBlock +{ + my ($self, $id) = @_; + return exists($self->[1]->{$id}); +} + +sub removeBlock +{ + my ($self, $id, $branchData) = @_; + $self->hasBlock($id) or die("unknown block $id"); + + # remove list of branches and adjust counts + $branchData->removeBranches($self->[1]->{$id}); + delete($self->[1]->{$id}); +} + +sub getBlock +{ + my ($self, $id) = @_; + $self->hasBlock($id) or die("unknown block $id"); + return $self->[1]->{$id}; +} + +sub blocks +{ + my $self = shift; + return keys %{$self->[1]}; +} + +sub addBlock +{ + my ($self, $blockId) = @_; + + !exists($self->[1]->{$blockId}) or die "duplicate block $blockId"; + my $blockData = []; + $self->[1]->{$blockId} = $blockData; + return $blockData; +} + +sub totals +{ + my $self = shift; + # return (found, hit) counts of coverpoints in this entry + my $found = 0; + my $hit = 0; + foreach my $blockId ($self->blocks()) { + my $bdata = $self->getBlock($blockId); + + foreach my $br (@$bdata) { + my $count = $br->count(); + ++$found; + ++$hit if (0 != $count); + } + } + return ($found, $hit); +} + +package MCDC_Block; + +# there may be more than one MCDC groups on a particular line - +# we hold the groups in a hash, keyed by size (number of MCDC_expressions) +# The particular group is a sorted list +use constant { + LINE => 0, + GROUPS => 1, +}; + +sub new +{ + my ($class, $line) = @_; + my $self = [$line, {}]; + + return bless $self, $class; +} + +sub insertExpr +{ + my ($self, $filename, $groupSize, $sense, $count, $idx, $expr) = @_; + my $groups = $self->[GROUPS]; + my $group; + if (exists($groups->{$groupSize})) { + $group = $groups->{$groupSize}; + } else { + $group = []; + $groups->{$groupSize} = $group; + } + my $cond; + if ($idx < scalar(@$group)) { + $cond = $group->[$idx]; + if ($cond->expression() ne $expr) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "\"$filename\":" . $self->line() . + ": MC/DC group $groupSize expression $idx changed from '" . + $cond->expression() . "' to '$expr'"); + } + } else { + if ($idx != scalar(@$group)) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$filename\":" . '":' . $self->line() . + ": MC/DC group $groupSize: non-contiguous expression '$idx' found - should be '" + . scalar(@$group) + . "'."); + } + $cond = MCDC_Expression->new($self, $groupSize, $idx, $expr); + push(@$group, $cond); + } + $cond->set($sense, $count); +} + +sub line +{ + return $_[0]->[LINE]; +} + +sub totals +{ + my $self = shift; + my $found = 0; + my $hit = 0; + while (my ($size, $group) = each(%{$self->groups()})) { + foreach my $expr (@$group) { + foreach my $sense (0, 1) { + my $count = $expr->count($sense); + if ('ARRAY' eq ref($count)) { + # differential number - report 'current' + next unless defined($count->[2]); # not in current + $count = $count->[2]; + } + ++$found; + ++$hit if 0 != $count; + } + } + } + return ($found, $hit); +} + +sub groups +{ + return $_[0]->[GROUPS]; +} + +sub num_groups +{ + return scalar(keys %{$_[0]->[GROUPS]}); +} + +sub expressions +{ + my ($self, $size) = @_; + return exists($self->[GROUPS]->{$size}) ? $self->[GROUPS]->{$size} : undef; +} + +sub expr +{ + my ($self, $groupSize, $idx) = @_; + return $self->[GROUPS]->{$groupSize}->[$idx]; +} + +sub is_compatible +{ + my ($self, $you) = @_; + + my $yours = $you->groups(); + my $groups = $self->groups(); + foreach my $size (keys %$groups) { + next unless exists($yours->{$size}); + my $idx = 0; + my $m = $groups->{$size}; + my $y = $yours->{$size}; + foreach my $e (@$m) { + my $ye = $y->[$idx++]; + return 0 if $e->expression() ne $ye->expression(); + } + } + return 1; +} + +sub merge +{ + # merge all groups from you into me + my ($self, $you) = @_; + + my $mine = $self->groups(); + my $yours = $you->groups(); + my $changed = 0; + while (my ($size, $group) = each(%$yours)) { + if (exists($mine->{$size})) { + my $m = $mine->{$size}; + my $idx = 0; + foreach my $e (@$m) { + my $y = $group->[$idx++]; + $changed += $e->set(1, $y->count(1)); + $changed += $e->set(0, $y->count(0)); + } + } else { + $mine->{$size} = Storable::dclone($group); + $changed = 1; + } + } + return $changed; +} + +package MCDC_Expression; + +use constant { + PARENT => 0, # MCDC_BLOCK + GROUP_SIZE => 1, # which group in parent + INDEX => 2, # index of this expression + + EXPRESSION => 3, + TRUE => 4, # hit count of sensitization of 'true' sense of expr + FALSE => 5, # hit count of sensitization of 'false' sense of expr +}; + +sub new +{ + my ($class, $parent, $groupSize, $idx, $expr) = @_; + + my $self = [$parent, $groupSize, $idx, $expr, 0, 0]; + return bless $self, $class; +} + +sub set +{ + # 'sense' should be 0 or 1 - for 'false' and 'true' sense, respectively + my ($self, $sense, $count) = @_; + return 0 if 0 == $count; + + if ('ARRAY' eq ref($count)) { + # recording a differential result + $self->[$sense ? TRUE : FALSE] = $count; + return 1; # assumed changed + } + my $changed = $count && $self->count($sense) == 0; + $self->[$sense ? TRUE : FALSE] += $count; + return $changed; +} + +sub parent +{ + return $_[0]->[PARENT]; +} + +sub groupSize +{ + return $_[0]->[GROUP_SIZE]; +} + +sub index +{ + return $_[0]->[INDEX]; +} + +sub expression +{ + return $_[0]->[EXPRESSION]; +} + +sub count +{ + my ($self, $sense) = @_; + return $_[0]->[$sense ? TRUE : FALSE]; +} + +package FunctionEntry; +# keep track of all the functions/all the function aliases +# at a particular line in the file. THey must all be the +# same function - perhaps just templatized differently. + +use constant { + NAME => 0, + ALIASES => 1, + MAP => 2, + FIRST => 3, # start line + COUNT => 4, + LAST => 5, +}; + +sub new +{ + my ($class, $name, $map, $startLine, $endLine) = @_; + die("unexpected type " . ref($map)) unless 'FunctionMap' eq ref($map); + my %aliases = ($name => 0); # not hit, yet + my $self = [$name, \%aliases, $map, $startLine, 0, $endLine]; + + bless $self, $class; + return $self; +} + +sub cloneWithEndLine +{ + my ($self, $withEnd, $cloneAliases) = @_; + my $fn = FunctionEntry->new($self->[NAME], $self->[MAP], $self->[FIRST], + $withEnd ? $self->[LAST] : undef); + if ($cloneAliases) { + my $count = 0; + while (my ($alias, $hit) = each(%{$self->aliases()})) { + $fn->[ALIASES]->{$alias} = $hit; + $count += $hit; + } + $fn->[COUNT] = $count; + } + return $fn; +} + +sub name +{ + my $self = shift; + return $self->[NAME]; +} + +sub filename +{ + my $self = shift; + return $self->[MAP]->filename(); +} + +sub hit +{ + # this is the hit count across all the aliases of the function + my $self = shift; + return $self->[COUNT]; +} + +sub isLambda +{ + my $self = shift; + # jacoco may show both a lambda and a function on the same line - which + # lcov then associates as an alias + # alias name selection above ensures that the 'master' name is lambda + # only if every alias is a lambda. + # -> this is a lambda only if there is only one alias + return ((TraceFile::is_language('c', $self->filename()) && + $self->name() =~ /{lambda\(/) || + (TraceFile::is_language('java', $self->filename()) && + $self->name() =~ /\.lambda\$/)); +} + +sub count +{ + my ($self, $alias, $merged) = @_; + + exists($self->aliases()->{$alias}) or + die("$alias is not an alias of " . $self->name()); + + return $self->[COUNT] + if (defined($merged) && $merged); + + return $self->aliases()->{$alias}; +} + +sub aliases +{ + my $self = shift; + return $self->[ALIASES]; +} + +sub numAliases +{ + my $self = shift; + return scalar(keys %{$self->[ALIASES]}); +} + +sub file +{ + my $self = shift; + return $self->[MAP]->filename(); +} + +sub line +{ + my $self = shift; + return $self->[FIRST]; +} + +sub set_line +{ + my ($self, $line) = @_; + return $self->[FIRST] = $line; +} + +sub end_line +{ + my $self = shift; + return $self->[LAST]; +} + +sub set_end_line +{ + my ($self, $line) = @_; + if ($line < $self->line()) { + my $suffix = + lcovutil::explain_once('derive_end_line', + " See lcovrc man entry for 'derive_function_end_line'."); + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $self->file() . '":' . $self->line() . + ': function ' . $self->name() . + " end line $line less than start line " . + $self->line() . + ". Cannot derive function end line.$suffix" + ); + return; + } + $self->[LAST] = $line; +} + +sub _format_error +{ + my ($self, $errno, $name, $count) = @_; + my $alias = + $name ne $self->name() ? " (alias of '" . $self->name() . "'" : ""; + lcovutil::report_format_error($errno, 'hit', $count, + "function '$name'$alias in " . $self->file() . ':' . $self->line()); +} + +sub addAlias +{ + my ($self, $name, $count) = @_; + + if (!Scalar::Util::looks_like_number($count)) { + $self->_format_error($lcovutil::ERROR_FORMAT, $name, $count); + $count = 0; + } elsif ($count < 0) { + $self->_format_error($lcovutil::ERROR_NEGATIVE, $name, $count); + $count = 0; + } elsif (defined($lcovutil::excessive_count_threshold) && + $count > $lcovutil::excessive_count_threshold) { + $self->_format_error($lcovutil::ERROR_EXCESSIVE_COUNT, $name, $count) + unless grep({ $name =~ $_ || $self->name() =~ $_ } + @lcovutil::suppress_function_patterns); + } + my $changed; + my $aliases = $self->[ALIASES]; + if (exists($aliases->{$name})) { + $changed = 0 == $aliases->{$name} && 0 != $count; + $aliases->{$name} += $count; + } else { + $aliases->{$name} = $count; + $changed = 1; + # keep track of the shortest name as the function representative + my $curlen = length($self->[NAME]); + my $len = length($name); + # penalize lambda functions so that their name is not chosen + # (java workaround or ugly hack, depending on your perspective) + $curlen += 1000 if $self->[NAME] =~ /(\{lambda\(|\.lambda\$)/; + $len += 1000 if $name =~ /(\{lambda\(|\.lambda\$)/; + $self->[NAME] = $name + if ($len < $curlen || # alias is shorter + ($len == $curlen && # alias is same length but lexically first + $name lt $self->[NAME])); + } + $self->[COUNT] += $count; + # perhaps should remove lambda aliases, if they exist - + # - Issue is that jacoco will show normal function and lambda on the + # same line - which lcov takes to mean that they are aliases + # could just delete the lambda in that case..pretend it doesn't exist. + return $changed; +} + +sub merge +{ + my ($self, $that) = @_; + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + $self->name() . " has different location than " . + $that->name() . " during merge") + if ($self->line() != $self->line()); + while (my ($name, $count) = each(%{$that->[ALIASES]})) { + $self->addAlias($name, $count); + } +} + +sub removeAliases +{ + my $self = shift; + my $aliases = $self->[ALIASES]; + my $rename = 0; + foreach my $name (@_) { + exists($aliases->{$name}) or die("removing non-existent alias $name"); + + my $count = $aliases->{$name}; + delete($aliases->{$name}); + $self->[COUNT] -= $count; + if ($self->[NAME] eq $name) { + $rename = 1; + } + } + if ($rename && + %$aliases) { + my $name; + foreach my $alias (keys %$aliases) { + $name = $alias if !defined($name) || length($alias) < length($name); + } + $self->[NAME] = $name; + } + return %$aliases; # true if this function still exists +} + +sub addAliasDifferential +{ + my ($self, $name, $data) = @_; + die("alias $name exists") + if exists($self->[ALIASES]->{$name}) && $name ne $self->name(); + die("expected array") + unless ref($data) eq "ARRAY" && 2 == scalar(@$data); + $self->[ALIASES]->{$name} = $data; +} + +sub setCountDifferential +{ + my ($self, $data) = @_; + die("expected array") + unless ref($data) eq "ARRAY" && 2 == scalar(@$data); + $self->[COUNT] = $data; +} + +sub findMyLines +{ + # use my start/end location to find my list of line coverpoints within + # this function. + # return sorted list of [ [lineNo, hitCount], ...] + my ($self, $lineData) = @_; + return undef unless $self->end_line(); + my @lines; + for (my $lineNo = $self->line(); $lineNo <= $self->end_line(); ++$lineNo) { + my $hit = $lineData->value($lineNo); + push(@lines, [$lineNo, $hit]) + if (defined($hit)); + } + return \@lines; +} + +sub _findConditionals +{ + my ($self, $data) = @_; + return undef unless $self->end_line(); + my @list; + for (my $lineNo = $self->line(); $lineNo <= $self->end_line(); ++$lineNo) { + my $entry = $data->value($lineNo); + push(@list, $entry) + if (defined($entry)); + } + return \@list; +} + +sub findMyBranches +{ + # use my start/end location to list of branch entries within this function + # return sorted list [ branchEntry, ..] sorted by line + my ($self, $branchData) = @_; + die("expected BranchData") unless ref($branchData) eq "BranchData"; + return $self->_findConditionals($branchData); +} + +sub findMyMcdc +{ + # use my start/end location to list of MC/DC entries within this function + # return list [ MCDC_Block, ..] sorted by line + my ($self, $mcdcData) = @_; + die("expected MCDC_Data") unless ref($mcdcData) eq "MCDC_Data"; + return $self->_findConditionals($mcdcData); +} + +package FunctionMap; + +sub new($$) +{ + my ($class, $filename) = @_; + my $self = [{}, {}, $filename]; # [locationMap, nameMap] + bless $self, $class; +} + +sub filename +{ + my $self = shift; + return $self->[2]; +} + +sub keylist +{ + # return list of file:lineNo keys.. + my $self = shift; + return keys(%{$self->[0]}); +} + +sub valuelist +{ + # return list of FunctionEntry elements we know about + my $self = shift; + return values(%{$self->[0]}); +} + +sub list_functions +{ + # return list of all the functions/function aliases that we know about + my $self = shift; + return keys(%{$self->[1]}); +} + +sub define_function +{ + my ($self, $fnName, $start_line, $end_line, $location) = @_; + #lcovutil::info("define: $fnName " . $self->$filename() . ":$start_line->$end_line\n"); + # could check that function ranges within file are non-overlapping + my ($locationMap, $nameMap) = @$self; + + my $data = $self->findName($fnName); + if (defined($data) && + #TraceFile::is_language('c', $self->filename()) && + $data->line() != $start_line + ) { + $location = '"' . $self->filename() . '":' . $start_line + unless defined($location); + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "$location: duplicate function '$fnName' starts on line \"" . + $data->filename() . + "\":$start_line but previous definition started on " . + $data->line() . MessageContext::context() . '.') + unless + grep({ $fnName =~ $_ } @lcovutil::suppress_function_patterns); + # if ignored, just return the function we already have - + # record the function location as the smallest line number we saw + if ($start_line < $data->line()) { + delete $self->[0]->{$data->line()}; + $data->set_line($start_line); + $self->[0]->{$start_line} = $data; + } + return $data; + } + + if (exists($locationMap->{$start_line})) { + $data = $locationMap->{$start_line}; + unless ((defined($end_line) && + defined($data->end_line()) && + $end_line == $data->end_line()) || + (!defined($end_line) && !defined($data->end_line())) + ) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "mismatched end line for $fnName at " . + $self->filename() . ":$start_line: " + . + (defined($data->end_line()) ? + $data->end_line() : 'undef') . + " -> " + . + (defined($end_line) ? $end_line : + 'undef') . + MessageContext::context()) + unless + grep({ $fnName =~ $_ } @lcovutil::suppress_function_patterns); + # pick the highest end line if we didn't error out + $data->set_end_line($end_line) + if (defined($end_line) && + (!defined($data->end_line()) || + $end_line > $data->end_line())); + } + } else { + $data = FunctionEntry->new($fnName, $self, $start_line, $end_line); + $locationMap->{$start_line} = $data; + } + if (!exists($nameMap->{$fnName})) { + $nameMap->{$fnName} = $data; + $data->addAlias($fnName, 0); + } + return $data; +} + +sub findName +{ + my ($self, $name) = @_; + my $nameMap = $self->[1]; + return exists($nameMap->{$name}) ? $nameMap->{$name} : undef; +} + +sub findKey +{ + my ($self, $key) = @_; # key is the start line of the function + my $locationMap = $self->[0]; + return exists($locationMap->{$key}) ? $locationMap->{$key} : undef; +} + +sub numFunc +{ + my ($self, $merged) = @_; + + if (defined($merged) && $merged) { + return scalar($self->keylist()); + } + my $n = 0; + foreach my $key ($self->keylist()) { + my $data = $self->findKey($key); + $n += $data->numAliases(); + } + return $n; +} + +sub numHit +{ + my ($self, $merged) = @_; + + my $n = 0; + foreach my $key ($self->keylist()) { + my $data = $self->findKey($key); + if (defined($merged) && $merged) { + ++$n + if $data->hit() > 0; + } else { + my $aliases = $data->aliases(); + foreach my $alias (keys(%$aliases)) { + my $c = $aliases->{$alias}; + ++$n if $c > 0; + } + } + } + return $n; +} + +sub get_found_and_hit +{ + my $self = shift; + my $merged = + defined($lcovutil::cov_filter[$lcovutil::FILTER_FUNCTION_ALIAS]); + return ($self->numFunc($merged), $self->numHit($merged)); +} + +sub add_count +{ + my ($self, $fnName, $count) = @_; + my $nameMap = $self->[1]; + if (exists($nameMap->{$fnName})) { + my $data = $nameMap->{$fnName}; + $data->addAlias($fnName, $count); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_MISMATCH, + "unknown function '$fnName'"); + } +} + +sub union +{ + my ($self, $that) = @_; + + my $changed = 0; + my $myData = $self->[0]; + my $yourData = $that->[0]; + while (my ($key, $thatData) = each(%$yourData)) { + my $thisData; + if (!exists($myData->{$key})) { + $thisData = + $self->define_function($thatData->name(), + $thatData->line(), $thatData->end_line()); + $changed = 1; # something new... + } else { + $thisData = $myData->{$key}; + if (!($thisData->line() == $thatData->line() + && ($thisData->file() eq $thatData->file() || + ($lcovutil::case_insensitive && + lc($thisData->file()) eq lc($thatData->file()))) + )) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "function data mismatch at " . + $thatData->file() . ":" . $thatData->line()); + next; + } + } + # merge in all the new aliases + while (my ($alias, $count) = each(%{$thatData->aliases()})) { + if ($thisData->addAlias($alias, $count)) { + $changed = 1; + } + } + } + return $changed; +} + +sub intersect +{ + my ($self, $that) = @_; + + my $changed = 0; + my $myData = $self->[0]; + my $myNames = $self->[1]; + my $yourData = $that->[0]; + my $yourNames = $that->[1]; + foreach my $key (keys %$myData) { + my $me = $myData->{$key}; + if (exists($yourData->{$key})) { + my $yourFn = $yourData->{$key}; + # intersect operation: keep only the common aliases + my @remove; + my $yourAliases = $yourFn->aliases(); + while (my ($alias, $count) = each(%{$me->aliases()})) { + if (exists($yourAliases->{$alias})) { + if ($me->addAlias($alias, $yourAliases->{$alias})) { + $changed = 1; + } + } else { + # remove this alias from me.. + push(@remove, $alias); + delete($myNames->{$alias}); + $changed = 1; + } + } + if (!$me->removeAliases(@remove)) { + # no aliases left (no common aliases) - so remove this function + delete($myData->{$key}); + } + } else { + $self->remove($me); + $changed = 1; + } + } + return $changed; +} + +sub difference +{ + my ($self, $that) = @_; + + my $changed = 0; + my $myData = $self->[0]; + my $yourData = $that->[0]; + foreach my $key (keys %$myData) { + if (exists($yourData->{$key})) { + # just remove the common aliases... + my $me = $myData->{$key}; + my $you = $yourData->{$key}; + my @remove; + while (my ($alias, $count) = each(%{$you->aliases()})) { + if (exists($me->aliases()->{$alias})) { + push(@remove, $alias); + $changed = 1; + } + } + if (!$me->removeAliases(@remove)) { + # no aliases left (no disjoint aliases) - so remove this function + delete($myData->{$key}); + } + } + } + return $changed; +} + +sub remove +{ + my ($self, $entry) = @_; + die("expected FunctionEntry - " . ref($entry)) + unless 'FunctionEntry' eq ref($entry); + my ($locationMap, $nameMap) = @$self; + my $key = $entry->line(); + foreach my $alias (keys %{$entry->aliases()}) { + delete($nameMap->{$alias}); + } + delete($locationMap->{$key}); +} + +package BranchMap; + +use constant { + DATA => 0, + FOUND => 1, + HIT => 2, +}; + +sub new +{ + my $class = shift; + my $self = [{}, # hash of lineNo -> BranchEntry/MCDC_Element + # BranchEntry: + # hash of blockID -> + # array of 'taken' entries for each sequential + # branch ID + # MCDC_Element: + 0, # branches found + 0, # branches executed + ]; + return bless $self, $class; +} + +sub remove +{ + my ($self, $line, $check_if_present) = @_; + my $data = $self->[DATA]; + + return 0 if ($check_if_present && !exists($data->{$line})); + + my $branch = $data->{$line}; + my ($f, $h) = $branch->totals(); + $self->[FOUND] -= $f; + $self->[HIT] -= $h; + + delete($data->{$line}); + return 1; +} + +sub found +{ + my $self = shift; + + return $self->[FOUND]; +} + +sub hit +{ + my $self = shift; + + return $self->[HIT]; +} + +# return BranchEntry struct (or undef) +sub value +{ + my ($self, $lineNo) = @_; + + my $map = $self->[DATA]; + return exists($map->{$lineNo}) ? $map->{$lineNo} : undef; +} + +# return list of lines which contain branch data +sub keylist +{ + my $self = shift; + return keys(%{$self->[DATA]}); +} + +sub get_found_and_hit +{ + my $self = shift; + + return ($self->[FOUND], $self->[HIT]); +} + +package BranchData; + +use base 'BranchMap'; + +sub new +{ + my $class = shift; + my $self = $class->SUPER::new(); + return $self; +} + +sub append +{ + my ($self, $line, $block, $br, $filename) = @_; + # HGC: might be good idea to pass filename so we could give better + # error message if the data is inconsistent. + # OTOH: unclear what a normal user could do about it anyway. + # Maybe exclude that file? + my $data = $self->[BranchMap::DATA]; + $filename = '' if (defined($filename) && $filename eq '-'); + if (!defined($br)) { + lcovutil::ignorable_error($lcovutil::ERROR_BRANCH, + (defined $filename ? "\"$filename\":$line: " : "") + . "expected 'BranchEntry' or 'integer, BranchBlock'" + ) unless ('BranchEntry' eq ref($block)); + + die("line $line already contains element") + if exists($data->{$line}); + # this gets called from 'apply_diff' method: the new line number + # which was assigned might be different than the original - so we + # have to fix up the branch entry. + $block->[0] = $line; + my ($f, $h) = $block->totals(); + $self->[BranchMap::FOUND] += $f; + $self->[BranchMap::HIT] += $h; + $data->{$line} = $block; + return 1; # we added something + } + + # this cannot happen unless inconsistent branch data was generated by gcov + die((defined $filename ? "\"$filename\":$line: " : "") . + "BranchData::append expected BranchBlock got '" . + ref($br) . + "'.\nThis may be due to mismatched 'gcc' and 'gcov' versions.\n") + unless ('BranchBlock' eq ref($br)); + + my $branch = $br->id(); + my $branchElem; + my $changed = 0; + if (exists($data->{$line})) { + $branchElem = $data->{$line}; + $line == $branchElem->line() or die("wrong line mapping"); + } else { + $branchElem = BranchEntry->new($line); + $data->{$line} = $branchElem; + $changed = 1; # something new + } + + if (!$branchElem->hasBlock($block)) { + $branch == 0 + or + lcovutil::ignorable_error($lcovutil::ERROR_BRANCH, + "unexpected non-zero initial branch"); + $branch = 0; + my $l = $branchElem->addBlock($block); + push(@$l, + BranchBlock->new($branch, $br->data(), + $br->expr(), $br->is_exception())); + ++$self->[BranchMap::FOUND]; # found one + ++$self->[BranchMap::HIT] if 0 != $br->count(); # hit one + $changed = 1; # something new.. + } else { + $block = $branchElem->getBlock($block); + + if ($branch > scalar(@$block)) { + lcovutil::ignorable_error($lcovutil::ERROR_BRANCH, + (defined $filename ? "\"$filename\":$line: " : "") . + "unexpected non-sequential branch ID $branch for block $block" + . (defined($filename) ? "" : " of line $line: ") + . ": found " . + scalar(@$block) . " blocks"); + $branch = scalar(@$block); + } + + if (!exists($block->[$branch])) { + $block->[$branch] = + BranchBlock->new($branch, $br->data(), $br->expr(), + $br->is_exception()); + ++$self->[BranchMap::FOUND]; # found one + ++$self->[BranchMap::HIT] if 0 != $br->count(); # hit one + + $changed = 1; + } else { + my $me = $block->[$branch]; + if (0 == $me->count() && 0 != $br->count()) { + ++$self->[BranchMap::HIT]; # hit one + $changed = 1; + } + if ($me->merge($br, $filename, $line)) { + $changed = 1; + } + } + } + return $changed; +} + +sub removeBranches +{ + my ($self, $branchList) = @_; + + foreach my $b (@$branchList) { + --$self->[BranchMap::FOUND]; + --$self->[BranchMap::HIT] if 0 != $b->count(); + } +} + +sub _checkCounts +{ + # some consistency checking + my $self = shift; + + my $data = $self->[BranchMap::DATA]; + my $found = 0; + my $hit = 0; + + while (my ($line, $branch) = each(%$data)) { + $line == $branch->line() or die("lost track of line"); + my ($f, $h) = $branch->totals(); + $found += $f; + $hit += $h; + } + die("invalid counts: found:" . $self->[BranchMap::FOUND] . + "->$found, hit:" . $self->[BranchMap::HIT] . "->$hit") + unless ($self->[BranchMap::FOUND] == $found && + $self->[BranchMap::HIT] == $hit); +} + +sub compatible($$) +{ + my ($myBr, $yourBr) = @_; + + # same number of branches + return 0 unless ($#$myBr == $#$yourBr); + for (my $i = 0; $i <= $#$myBr; ++$i) { + my $me = $myBr->[$i]; + my $you = $yourBr->[$i]; + if ($me->exprString() ne $you->exprString()) { + # this one doesn't match + return 0; + } + } + return 1; +} + +sub union +{ + my ($self, $info, $filename) = @_; + my $changed = 0; + + my $mydata = $self->[BranchMap::DATA]; + while (my ($line, $yourBranch) = each(%{$info->[BranchMap::DATA]})) { + # check if self has corresponding line: + # no: just copy all the data for this line, from 'info' + # yes: check for matching blocks + my $myBranch = $self->value($line); + if (!defined($myBranch)) { + $mydata->{$line} = Storable::dclone($yourBranch); + my ($f, $h) = $yourBranch->totals(); + $self->[BranchMap::FOUND] += $f; + $self->[BranchMap::HIT] += $h; + $changed = 1; + next; + } + # keep track of which 'myBranch' blocks have already been merged in + # this pass. We don't want to merge multiple distinct blocks from $info + # into the same $self block (even if it appears compatible) - because + # those blocks were distinct in the input data + my %merged; + + # we don't expect there to be a huge number of distinct blocks + # in each branch: most often, just one - + # Thus, we simply walk the list to find a matching block, if one exists + # The matching block will have the same number of branches, and the + # branch expressions will be the same. + # - expression only used in Verilog code at the moment - + # other languages will just have a (matching) integer + # branch index + + # first: merge your blocks which seem to exist in me: + my @yourBlocks = sort($yourBranch->blocks()); + foreach my $yourId (@yourBlocks) { + my $yourBr = $yourBranch->getBlock($yourId); + + # Do I have a block with matching name, which is compatible? + my $myBr = $myBranch->getBlock($yourId) + if $myBranch->hasBlock($yourId); + if (defined($myBr) && # I have this one + compatible($myBr, $yourBr) + ) { + foreach my $br (@$yourBr) { + if ($self->append($line, $yourId, $br, $filename)) { + $changed = 1; + } + } + $merged{$yourId} = 1; + $yourId = undef; + } + } + # now look for compatible blocks that aren't identical + BLOCK: foreach my $yourId (@yourBlocks) { + next unless defined($yourId); + my $yourBr = $yourBranch->getBlock($yourId); + + # See if we can find a compatible block in $self + # if found: merge. + # no match: this is a different block - assign new ID + + foreach my $myId ($myBranch->blocks()) { + next if exists($merged{$myId}); + + my $myBr = $myBranch->getBlock($myId); + if (compatible($myBr, $yourBr)) { + # we match - so merge our data + $merged{$myId} = 1; # used this one + foreach my $br (@$yourBr) { + if ($self->append($line, $myId, $br, $filename)) { + $changed = 1; + } + } + next BLOCK; # merged this one - go to next + } + } # end search for your block in my blocklist + # we didn't find a match - so this needs to be a new block + my $newID = scalar($myBranch->blocks()); + $merged{$newID} = 1; # used this one + foreach my $br (@$yourBr) { + if ($self->append($line, $newID, $br, $filename)) { + $changed = 1; + } + } + } + } + if ($lcovutil::debug) { + $self->_checkCounts(); # some paranoia + } + return $changed; +} + +sub intersect +{ + my ($self, $info, $filename) = @_; + my $changed = 0; + + my $mydata = $self->[BranchMap::DATA]; + my $yourdata = $info->[BranchMap::DATA]; + foreach my $line (keys %$mydata) { + if (exists($yourdata->{$line})) { + # look at all my blocks. If you have a compatible block, merge them + # - else delete mine + my $myBranch = $mydata->{$line}; + my $yourBranch = $yourdata->{$line}; + my @myBlocks = $myBranch->blocks(); + foreach my $myId (@myBlocks) { + my $myBr = $myBranch->getBlock($myId); + + # Do you have a block with matching name, which is compatible? + my $yourBlock = $yourBranch->getBlock($myId) + if $yourBranch->hasBlock($myId); + if (defined($yourBlock) && # you have this one + compatible($myBr, $yourBlock) + ) { + foreach my $br (@$yourBlock) { + if ($self->append($line, $myId, $br, $filename)) { + $changed = 1; + } + } + } else { + # block not found...remove this one + $myBranch->removeBlock($myId, $self); + $changed = 1; + } + } # foreach block + } else { + # my line not found in your data - so remove this one + $changed = 1; + $self->remove($line); + } + } + return $changed; +} + +sub difference +{ + my ($self, $info, $filename) = @_; + my $changed = 0; + + my $mydata = $self->[BranchMap::DATA]; + my $yourdata = $info->[BranchMap::DATA]; + foreach my $line (keys %$mydata) { + # keep everything here if you don't have this line + next unless exists($yourdata->{$line}); + + # look at all my blocks. If you have a compatible block, remove it: + my $myBranch = $mydata->{$line}; + my $yourBranch = $yourdata->{$line}; + my @myBlocks = $myBranch->blocks(); + foreach my $myId (@myBlocks) { + my $myBr = $myBranch->getBlock($myId); + + # Do you have a block with matching name, which is compatible? + my $yourBlock = $yourBranch->getBlock($myId) + if $yourBranch->hasBlock($myId); + if (defined($yourBlock) && # you have this one + compatible($myBr, $yourBlock) + ) { + # remove common block + $myBranch->removeBlock($myId, $self); + $changed = 1; + } + } # foreach block + } + return $changed; +} + +package MCDC_Data; + +use base 'BranchMap'; + +sub new +{ + my $class = shift; + my $self = $class->SUPER::new(); + return $self; +} + +sub append_mcdc +{ + my ($self, $mcdc) = @_; + my $line = $mcdc->line(); + die("MCDC already defined for $line") + if exists($self->[BranchMap::DATA]->{$line}); + $self->[BranchMap::DATA]->{$line} = $mcdc; +} + +sub new_mcdc +{ + my ($self, $fileData, $line) = @_; + + return $self->[BranchMap::DATA]->{$line} + if exists($self->[BranchMap::DATA]->{$line}); + + my $mcdc = MCDC_Block->new($line); + $self->[BranchMap::DATA]->{$line} = $mcdc; + return $mcdc; +} + +sub close_mcdcBlock +{ + my ($self, $mcdc) = @_; + my $found = 0; + my $hit = 0; + while (my ($groupSize, $exprs) = each(%{$mcdc->groups()})) { + foreach my $e (@$exprs) { + $found += 2; + ++$hit if $e->count(0); + ++$hit if $e->count(1); + } + } + $self->[BranchMap::FOUND] += $found; + $self->[BranchMap::HIT] += $hit; +} + +sub _calculate_counts +{ + my $self = shift; + my $found = 0; + my $hit = 0; + while (my ($line, $block) = each(%{$self->[BranchMap::DATA]})) { + my ($f, $h) = $block->totals(); + $found += $f; + $hit += $h; + } + $self->[BranchMap::FOUND] = $found; + $self->[BranchMap::HIT] = $hit; +} + +sub union +{ + my ($self, $info) = @_; + my $changed = 0; + + my $mydata = $self->[BranchMap::DATA]; + while (my ($line, $yourBranch) = each(%{$info->[BranchMap::DATA]})) { + # check if self has corresponding line: + # no: just copy all the data for this line, from 'info' + # yes: check for matching blocks + my $myBranch = $self->value($line); + if (!defined($myBranch)) { + my $c = Storable::dclone($yourBranch); + $mydata->{$line} = $c; + $self->close_mcdcBlock($c); + $changed = 1; + next; + } + + # check if we are compatible. + if ($myBranch->is_compatible($yourBranch)) { + $changed += $myBranch->merge($yourBranch); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "cannot merge iconsistent MC/DC record"); + # possibly remove this record? + } + } + $self->_calculate_counts(); + return $changed; +} + +sub intersect +{ + my ($self, $info, $filename) = @_; + my $changed = 0; + + my $yourData = $info->[BranchMap::DATA]; + my $mydata = $self->[BranchMap::DATA]; + foreach my $line (keys %$mydata) { + if (exists($yourData->{$line})) { + # append your count to mine + my $yourBranch = $yourData->{$line}; + my $myBranch = $mydata->{$line}; + + if ($myBranch->is_compatible($yourBranch)) { + $changed += $myBranch->merge($yourBranch); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + "cannot merge iconsistent MC/DC record"); + # possibly remove this record? + } + } else { + $self->remove($line); + $changed = 1; + } + } + $self->_calculate_counts(); + return $changed; +} + +sub difference +{ + my ($self, $info, $filename) = @_; + my $changed = 0; + + my $yourData = $info->[BranchMap::DATA]; + my $mydata = $self->[BranchMap::DATA]; + foreach my $line (keys %$mydata) { + if (exists($yourData->{$line})) { + $self->remove($line); + $changed = 1; + } + } + $self->_calculate_counts(); + return $changed; +} + +package FilterBranchExceptions; + +use constant { + EXCEPTION_f => 0, + ORPHAN_f => 1, + REGION_f => 2, + BRANCH_f => 3 # branch filter +}; + +sub new +{ + my $class = shift; + my $self = [$lcovutil::cov_filter[$lcovutil::FILTER_EXCEPTION_BRANCH], + $lcovutil::cov_filter[$lcovutil::FILTER_ORPHAN_BRANCH], + $lcovutil::cov_filter[$lcovutil::FILTER_EXCLUDE_REGION], + $lcovutil::cov_filter[$lcovutil::FILTER_EXCLUDE_BRANCH] + ]; + bless $self, $class; + return grep({ defined($_) } @$self) ? $self : undef; +} + +sub removeBranches +{ + my ($self, $line, $branches, $filter, $unreachable, $isMasterData) = @_; + + my $brdata = $branches->value($line); + return 0 unless defined($brdata); + # 'unreachable' and 'excluded' branches have already been removed + # by 'region' filter along with their parent line - so no need to + # do anything here + die("unexpected unreachable branch") + if ($unreachable && 0 != $brdata->count()); + my $modified = 0; + my $count = 0; + foreach my $block_id ($brdata->blocks()) { + my $blockData = $brdata->getBlock($block_id); + my @replace; + foreach my $br (@$blockData) { + if (defined($filter) && $br->is_exception()) { + --$branches->[BranchMap::FOUND]; + --$branches->[BranchMap::HIT] if 0 != $br->count(); + #lcovutil::info($srcReader->fileanme() . ": $line: remove exception branch\n"); + $modified = 1; + ++$count; + } else { + push(@replace, $br); + } + } + if ($count) { + @$blockData = @replace; + ++$filter->[-2] if $isMasterData; + lcovutil::info(2, + "$line: remove $count exception branch" . + (1 == $count ? '' : 'es') . "\n") + if $isMasterData; + $filter->[-1] += $count; + } + # If there is only one branch left - then this is not a conditional + if (0 == scalar(@replace)) { + lcovutil::info(2, "$line: remove exception block $block_id\n"); + lcovutil::info("$line: remove exception block $block_id\n"); + $brdata->removeBlock($block_id, $branches); + } elsif (1 == scalar(@replace) && + defined($self->[ORPHAN_f])) { # filter orphan + lcovutil::info(2, + "$line: remove orphan exception block $block_id\n"); + $brdata->removeBlock($block_id, $branches); + + ++$self->[ORPHAN_f]->[-2] + if $isMasterData; + ++$self->[ORPHAN_f]->[-1]; + } + } + if (0 == scalar($brdata->blocks())) { + lcovutil::info(2, "$line: no branches remain\n"); + $branches->remove($line); + $modified = 1; + } + return $modified; +} + +sub applyFilter +{ + my ($self, $filter, $line, $branches, $perTestBranches, $unreachable) = @_; + my $modified = + $self->removeBranches($line, $branches, $filter, $unreachable, 1); + foreach my $tn ($perTestBranches->keylist()) { + # want to remove matching branches everytwhere - so we don't want short-circuit evaluation + my $m = $self->removeBranches($line, $perTestBranches->value($tn), + $filter, $unreachable, 0); + $modified ||= $m; + } + return $modified; +} + +sub filter +{ + my ($self, $line, $srcReader, $branches, $perTestBranches) = @_; + + my $reason; + if (0 != ($reason = $srcReader->isExcluded($line, $srcReader->e_EXCEPTION))) + { + # exception branch excluded.. + if (defined($self->[REGION_f])) { # exclude region + # don't filter out if this line is "unreachable" and + # some branch here is hit + return + $self->applyFilter($self->[REGION_f], + $line, + $branches, + $perTestBranches, + 0 != ($reason & $srcReader->e_UNREACHABLE)); + } elsif (defined($self->[BRANCH_f])) { # exclude branches + # filter out bogus branches - even if this region is unreachable + return + $self->applyFilter($self->[BRANCH_f], $line, $branches, + $perTestBranches, 0); + } + } + # apply if filtering exceptions, orphans, or both + if (defined($self->[EXCEPTION_f]) || defined($self->[ORPHAN_f])) { + # filter exceptions and orphans - even if the region is "unreachable" + return + $self->applyFilter($self->[EXCEPTION_f], $line, $branches, + $perTestBranches, 0); + } + return 0; +} + +package TraceInfo; +# coveage data for a particular source file +use constant { + VERSION => 0, + LOCATION => 1, + FILENAME => 2, + CHECKSUM => 3, + LINE_DATA => 4, # per-testcase data + BRANCH_DATA => 5, + FUNCTION_DATA => 6, + MCDC_DATA => 7, + + UNION => 0, + INTERSECT => 1, + DIFFERENCE => 2, +}; + +sub new +{ + my ($class, $filename) = @_; + my $self = []; + bless $self, $class; + + $self->[VERSION] = undef; # version ID from revision control (if any) + + # keep track of location in .info file that this file data was found + # - useful in error messages + $self->[LOCATION] = []; # will fill with file/line + + $self->[FILENAME] = $filename; + # _checkdata : line number -> source line checksum + $self->[CHECKSUM] = MapData->new(); + # each line/branch/function element is a list of [summaryData, perTestcaseData] + + # line: [ line number -> execution count - merged over all testcases, + # testcase_name -> CountData -> line_number -> execution_count ] + $self->[LINE_DATA] = + [CountData->new($filename, $CountData::SORTED), MapData->new()]; + + # branch: [ BranchData: line number -> branch coverage - for all tests + # testcase_name -> BranchData] + $self->[BRANCH_DATA] = [BranchData->new(), MapData->new()]; + + # function: [FunctionMap: function_name->FunctionEntry, + # tescase_name -> FucntionMap ] + $self->[FUNCTION_DATA] = [FunctionMap->new($filename), MapData->new()]; + + $self->[MCDC_DATA] = [MCDC_Data->new(), MapData->new()]; + + return $self; +} + +sub filename +{ + my $self = shift; + return $self->[FILENAME]; +} + +sub set_filename +{ + my ($self, $name) = @_; + $self->[FILENAME] = $name; +} + +# return true if no line, branch, or function coverage data +sub is_empty +{ + my $self = shift; + return ($self->test()->is_empty() && # line cov + $self->testbr()->is_empty() && $self->testfnc()->is_empty()); +} + +sub location +{ + my ($self, $filename, $lineNo) = @_; + my $l = $self->[LOCATION]; + if (defined($filename)) { + $l->[0] = $filename; + $l->[1] = $lineNo; + } + return $l; +} + +sub version +{ + # return the version ID that we found + my ($self, $version) = @_; + (!defined($version) || !defined($self->[VERSION])) or + die("expected to set version ID at most once: " . + (defined($version) ? $version : "undef") . " " . + (defined($self->[VERSION]) ? $self->[VERSION] : "undef")); + $self->[VERSION] = $version + if defined($version); + return $self->[VERSION]; +} + +# line coverage data +sub test +{ + my ($self, $testname) = @_; + + my $data = $self->[LINE_DATA]->[1]; + if (!defined($testname)) { + return $data; + } + + if (!$data->mapped($testname)) { + $data->append_if_unset($testname, CountData->new($self->filename(), 1)); + } + + return $data->value($testname); +} + +sub sum +{ + # return MapData of line -> hit count + # data merged over all testcases + my $self = shift; + return $self->[LINE_DATA]->[0]; +} + +sub func +{ + # return FunctionMap of function name or location -> FunctionEntry + # data is merged over all testcases + my $self = shift; + return $self->[FUNCTION_DATA]->[0]; +} + +sub found +{ + my $self = shift; + return $self->sum()->found(); +} + +sub hit +{ + my $self = shift; + return $self->sum()->hit(); +} + +sub function_found +{ + my $self = shift; + return $self->func() + ->numFunc( + defined($lcovutil::cov_filter[$lcovutil::FILTER_FUNCTION_ALIAS])); +} + +sub function_hit +{ + my $self = shift; + return $self->func() + ->numHit( + defined($lcovutil::cov_filter[$lcovutil::FILTER_FUNCTION_ALIAS])); +} + +sub branch_found +{ + my $self = shift; + return $self->sumbr()->found(); +} + +sub branch_hit +{ + my $self = shift; + return $self->sumbr()->hit(); +} + +sub mcdc_found +{ + return $_[0]->mcdc()->found(); +} + +sub mcdc_hit +{ + return $_[0]->mcdc()->hit(); +} + +sub check +{ + my $self = shift; + return $self->[CHECKSUM]; +} + +# function coverage +sub testfnc +{ + my ($self, $testname) = @_; + + my $data = $self->[FUNCTION_DATA]->[1]; + if (!defined($testname)) { + return $data; + } + + if (!$data->mapped($testname)) { + $data->append_if_unset($testname, FunctionMap->new($self->filename())); + } + + return $data->value($testname); +} + +# branch coverage +sub testbr +{ + my ($self, $testname) = @_; + + my $data = $self->[BRANCH_DATA]->[1]; + if (!defined($testname)) { + return $data; + } + + if (!$data->mapped($testname)) { + $data->append_if_unset($testname, BranchData->new()); + } + + return $data->value($testname); +} + +sub sumbr +{ + # return BranchData map of line number -> BranchEntry + # data is merged over all testcases + my $self = shift; + return $self->[BRANCH_DATA]->[0]; +} + +# MCDC coverage +sub testcase_mcdc +{ + my ($self, $testname) = @_; + + my $data = $self->[MCDC_DATA]->[1]; + if (!defined($testname)) { + return $data; + } + + if (!$data->mapped($testname)) { + $data->append_if_unset($testname, MCDC_Data->new()); + } + + return $data->value($testname); +} + +sub mcdc +{ + # return MCDC_Data map of line number -> MCDC_Block + # data is merged over all testcases + my $self = shift; + return $self->[MCDC_DATA]->[0]; +} + +# +# check_data +# some paranoia checks + +sub check_data($) +{ + my $self = shift; + + # some paranoia checking... + if (1 || $lcovutil::debug) { + my ($brSum, $brTest) = @{$self->[BRANCH_DATA]}; + $brSum->_checkCounts(); + foreach my $t ($brTest->keylist()) { + $brTest->value($t)->_checkCounts(); + } + } +} + +# +# get_info(hash_ref) +# +# Retrieve data from an entry of the structure generated by TraceFile::_read_info(). +# Return a list of references to hashes: +# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash +# ref, testfncdata hash ref, testbranchdata hash ref, branch summary hash ref) +# + +sub get_info($) +{ + my $self = shift; + my ($sumcount_ref, $testdata_ref) = @{$self->[LINE_DATA]}; + my ($funcdata_ref, $testfncdata) = @{$self->[FUNCTION_DATA]}; + my ($sumbrcount, $testbrdata) = @{$self->[BRANCH_DATA]}; + my ($mcdccount, $testcasemcdc) = @{$self->[MCDC_DATA]}; + my $checkdata_ref = $self->[CHECKSUM]; + + return ($testdata_ref, $sumcount_ref, $funcdata_ref, + $checkdata_ref, $testfncdata, $testbrdata, + $sumbrcount, $mcdccount, $testcasemcdc); +} + +sub _merge_checksums +{ + my $self = shift; + my $info = shift; + my $filename = shift; + + my $mine = $self->check(); + my $yours = $info->check(); + foreach my $line ($yours->keylist()) { + if ($mine->mapped($line) && + $mine->value($line) ne $yours->value($line)) { + lcovutil::ignorable_error($lcovutil::ERROR_MISMATCH, + "checksum mismatch at $filename:$line: " . + $mine->value($line), + ' -> ' . $yours->value($line)); + } + $mine->replace($line, $yours->value($line)); + } +} + +sub merge +{ + my ($self, $info, $op, $filename) = @_; + + my $me = defined($self->version()) ? $self->version() : ""; + my $you = defined($info->version()) ? $info->version() : ""; + + my ($countOp, $funcOp, $brOp, $mcdcOp); + + if ($op == UNION) { + $countOp = \&CountData::union; + $funcOp = \&FunctionMap::union; + $brOp = \&BranchData::union; + $mcdcOp = \&MCDC_Data::union; + } elsif ($op == INTERSECT) { + $countOp = \&CountData::intersect; + $funcOp = \&FunctionMap::intersect; + $brOp = \&BranchData::intersect; + $mcdcOp = \&MCDC_Data::intersect; + } else { + die("unexpected op $op") unless $op == DIFFERENCE; + $countOp = \&CountData::difference; + $funcOp = \&FunctionMap::difference; + $brOp = \&BranchData::difference; + $mcdcOp = \&MCDC_Data::difference; + } + + lcovutil::checkVersionMatch($filename, $me, $you, 'merge'); + my $changed = 0; + + foreach my $name ($info->test()->keylist()) { + if (&$countOp($self->test($name), $info->test($name))) { + $changed = 1; + } + } + # if intersect and I contain some test that you don't, need to remove my data + if (&$countOp($self->sum(), $info->sum())) { + $changed = 1; + } + + if (&$funcOp($self->func(), $info->func())) { + $changed = 1; + } + $self->_merge_checksums($info, $filename); + + foreach my $name ($info->testfnc()->keylist()) { + if (&$funcOp($self->testfnc($name), $info->testfnc($name))) { + $changed = 1; + } + } + + foreach my $name ($info->testbr()->keylist()) { + if (&$brOp($self->testbr($name), $info->testbr($name), $filename)) { + $changed = 1; + } + } + if (&$brOp($self->sumbr(), $info->sumbr(), $filename)) { + $changed = 1; + } + + foreach my $name ($info->testcase_mcdc()->keylist()) { + if ( + &$mcdcOp($self->testcase_mcdc($name), $info->testcase_mcdc($name), + $filename) + ) { + $changed = 1; + } + } + if (&$mcdcOp($self->mcdc(), $info->mcdc(), $filename)) { + $changed = 1; + } + return $changed; +} + +# this package merely reads sourcefiles as they are found on the current +# filesystem - ie., the baseline version might have been modified/might +# have diffs - but the current version does not. +package ReadCurrentSource; + +our @source_directories; +our $searchPath; +our @dirs_used; +use constant { + FILENAME => 0, + PATH => 1, + SOURCE => 2, + EXCLUDE => 3, + + # reasons: (bitfield) + EXCLUDE_REGION => 0x10, + EXCLUDE_BRANCH_REGION => 0x20, + EXCLUDE_DIRECTIVE => 0x40, + OMIT_LINE => 0x80, + + # recorded exclusion markers + e_LINE => 0x1, + e_BRANCH => 0x2, + e_EXCEPTION => 0x4, + e_UNREACHABLE => 0x8, +}; + +sub new +{ + my ($class, $filename) = @_; + + # additional layer of indirection so derived class can hold its own data + my $self = [[]]; + bless $self, $class; + + $self->open($filename) if defined($filename); + return $self; +} + +sub close +{ + my $self = shift; + my $data = $self->[0]; + while (scalar(@$data)) { + pop(@$data); + } +} + +sub resolve_path +{ + my ($filename, $applySubstitutions) = @_; + $filename = lcovutil::subst_file_name($filename) if $applySubstitutions; + return $filename + if (-e $filename || + (!@lcovutil::resolveCallback && + (File::Spec->file_name_is_absolute($filename) || + 0 == scalar(@source_directories)))); + + # don't pass 'applySubstitutions' flag as we already did that, above + return $searchPath->resolve($filename, 0); +} + +sub warn_sourcedir_patterns +{ + $searchPath->warn_unused( + @source_directories ? '--source-directory' : 'source_directory = '); +} + +sub _load +{ + my ($self, $filename, $version) = @_; + my $data = $self->[0]; + + $version = "" unless defined($version); + my $path = resolve_path($filename); + if (open(SRC, "<", $path)) { + lcovutil::info(1, + "read $version$filename" . + ($path ne $filename ? " (at $path)" : '') . "\n"); + $data->[PATH] = $path; + my @sourceLines = ; + CORE::close(SRC) or die("unable to close $filename: $!\n"); + $data->[FILENAME] = $filename; + return \@sourceLines; + } else { + lcovutil::ignorable_error($lcovutil::ERROR_SOURCE, + "unable to open $filename: $!\n"); + $self->close(); + return undef; + } +} + +sub open +{ + my ($self, $filename, $version) = @_; + + my $srcLines = $self->_load($filename, $version); + if (defined($srcLines)) { + return $self->parseLines($filename, $srcLines); + } + return undef; +} + +sub path +{ + my $self = shift; + return $self->[0]->[PATH]; +} + +sub parseLines +{ + my ($self, $filename, $sourceLines) = @_; + + my @excluded; + my $exclude_region; + my $exclude_br_region; + my $exclude_exception_region; + my $line = 0; + my $excl_start = qr(\b$lcovutil::EXCL_START\b); + my $excl_stop = qr(\b$lcovutil::EXCL_STOP\b); + my $excl_line = qr(\b$lcovutil::EXCL_LINE\b); + my $excl_br_start = qr(\b$lcovutil::EXCL_BR_START\b); + my $excl_br_stop = qr(\b$lcovutil::EXCL_BR_STOP\b); + my $excl_br_line = qr(\b$lcovutil::EXCL_BR_LINE\b); + my $excl_ex_start = qr(\b$lcovutil::EXCL_EXCEPTION_BR_START\b); + my $excl_ex_stop = qr(\b$lcovutil::EXCL_EXCEPTION_BR_STOP\b); + my $excl_ex_line = qr(\b$lcovutil::EXCL_EXCEPTION_LINE\b); + my $unreachable_start = qr(\b$lcovutil::UNREACHABLE_START\b); + my $unreachable_stop = qr(\b$lcovutil::UNREACHABLE_STOP\b); + my $unreachable_line = qr(\b$lcovutil::UNREACHABLE_LINE\b); + # @todo: if we had annotated data here, then we could whine at the + # author of the unmatched start, extra end, etc. + + my $exclude_directives = + qr/^\s*#\s*((else|endif)|((ifdef|ifndef|if|elif|include|define|undef)\s+))/ + if (TraceFile::is_language('c', $filename) && + defined($lcovutil::cov_filter[$lcovutil::FILTER_DIRECTIVE])); + + my @excludes; + if (defined($lcovutil::cov_filter[$lcovutil::FILTER_EXCLUDE_REGION])) { + push(@excludes, + [$excl_start, $excl_stop, + \$exclude_region, e_LINE | e_BRANCH | EXCLUDE_REGION, + $lcovutil::EXCL_START, $lcovutil::EXCL_STOP + ]); + push(@excludes, + [$unreachable_start, $unreachable_stop, + \$exclude_region, e_UNREACHABLE | EXCLUDE_REGION, + $lcovutil::UNREACHABLE_START, $lcovutil::UNREACHABLE_STOP + ]); + } else { + $excl_line = undef; + $unreachable_line = undef; + } + + if (defined($lcovutil::cov_filter[$lcovutil::FILTER_EXCLUDE_BRANCH])) { + push(@excludes, + [$excl_ex_start, $excl_ex_stop, + \$exclude_exception_region, e_EXCEPTION | EXCLUDE_BRANCH_REGION, + $lcovutil::EXCL_BR_START, $lcovutil::EXCL_BR_STOP, + ], + [$excl_br_start, + $excl_br_stop, + \$exclude_br_region, + e_BRANCH | EXCLUDE_BRANCH_REGION, + $lcovutil::EXCL_EXCEPTION_BR_START, + $lcovutil::EXCL_EXCEPTION_BR_STOP, + ]); + } else { + $excl_br_line = undef; + $excl_ex_line = undef; + } + LINES: foreach (@$sourceLines) { + $line += 1; + my $exclude_branch_line = 0; + my $exclude_exception_branch_line = 0 + ; # per-line exception excludion not implemented at present. Probably unnecessary. + chomp($_); + s/\r//; # remove carriage return + if (defined($exclude_directives) && + $_ =~ $exclude_directives) { + # line contains compiler directive - exclude everything + push(@excluded, e_LINE | e_BRANCH | EXCLUDE_DIRECTIVE); + lcovutil::info(2, "directive '#$1' on $filename:$line\n"); + next; + } + + foreach my $d (@excludes) { + # note: $d->[3] is the exclude reason (mask) + # $d->[4] is the 'start' string (not converted to perl regexp) + # $d->[5] is the 'stop' string + my ($start, $stop, $ref, $reason) = @$d; + if ($_ =~ $start) { + lcovutil::ignorable_error($lcovutil::ERROR_MISMATCH, + "$filename: overlapping exclude directives. Found " . + $d->[4] . + " at line $line - but no matching " . $d->[5] . + ' for ' . $d->[4] . ' at line ' . $$ref->[0]) + if $$ref; + $$ref = [$line, $reason, $d->[4], $d->[5]]; + last; + } elsif ($_ =~ $stop) { + lcovutil::ignorable_error($lcovutil::ERROR_MISMATCH, + "$filename: found " . $d->[5] . + " directive at line $line without matching " . + ($$ref ? $$ref->[2] : $d->[4]) . ' directive') + unless $$ref && + $$ref->[2] eq $d->[4] && + $$ref->[3] eq $d->[5]; + $$ref = undef; + last; + } + } + if (defined($excl_line) && + $_ =~ $excl_line) { + push(@excluded, e_LINE | e_BRANCH | EXCLUDE_REGION) + ; #everything excluded + next; + } elsif (defined($unreachable_line) && + $_ =~ $unreachable_line) { + push(@excluded, e_UNREACHABLE | EXCLUDE_REGION) + ; #everything excluded + next; + } elsif (defined($excl_br_line) && + $_ =~ $excl_br_line) { + $exclude_branch_line = e_BRANCH | EXCLUDE_BRANCH_REGION; + } elsif (defined($excl_ex_line) && + $_ =~ $excl_ex_line) { + $exclude_branch_line = e_EXCEPTION | EXCLUDE_BRANCH_REGION; + } elsif (0 != scalar(@lcovutil::omit_line_patterns)) { + foreach my $p (@lcovutil::omit_line_patterns) { + my $pat = $p->[0]; + if ($_ =~ $pat) { + push(@excluded, e_LINE | e_BRANCH | OMIT_LINE) + ; #everything excluded + #lcovutil::info("'" . $p->[-2] . "' matched \"$_\", line \"$filename\":"$line\n"); + ++$p->[-1]; + next LINES; + } + } + } + push(@excluded, + ($exclude_region ? $exclude_region->[1] : 0) | + ($exclude_br_region ? $exclude_br_region->[1] : 0) | ( + $exclude_exception_region ? $exclude_exception_region->[1] : 0 + ) | $exclude_branch_line | $exclude_exception_branch_line); + } + my @dangling; + if ($exclude_region) { + if ($exclude_region->[1] & e_UNREACHABLE) { + push(@dangling, + [$exclude_region, $lcovutil::UNREACHABLE_START, + $lcovutil::UNREACHABLE_STOP + ]); + } else { + push(@dangling, + [$exclude_region, $lcovutil::EXCL_START, $lcovutil::EXCL_STOP] + ); + } + } + foreach my $t (@dangling, + [$exclude_br_region, $lcovutil::EXCL_BR_START, + $lcovutil::EXCL_BR_STOP + ], + [$exclude_exception_region, + $lcovutil::EXCL_EXCEPTION_BR_START, + $lcovutil::EXCL_EXCEPTION_BR_STOP + ] + ) { + my ($key, $start, $stop) = @$t; + lcovutil::ignorable_error($lcovutil::ERROR_MISMATCH, + "$filename: unmatched $start at line " . + $key->[0] . + " - saw EOF while looking for matching $stop" + ) if ($key); + } + my $data = $self->[0]; + $data->[FILENAME] = $filename; + $data->[SOURCE] = $sourceLines; + $data->[EXCLUDE] = \@excluded; + return $self; +} + +sub notEmpty +{ + my $self = shift; + return 0 != scalar(@{$self->[0]}); +} + +sub filename +{ + return $_[0]->[0]->[FILENAME]; +} + +sub numLines +{ + my $self = shift; + return scalar(@{$self->[0]->[SOURCE]}); +} + +sub getLine +{ + my ($self, $line) = @_; + + return $self->isOutOfRange($line) ? + undef : + $self->[0]->[SOURCE]->[$line - 1]; +} + +sub getExpr +{ + my ($self, $startLine, $startCol, $endLine, $endCol) = @_; + die("bad range [$startLine:$endLine]") unless $endLine >= $startLine; + return 'NA' unless $endLine <= $self->numLines(); + + my $line = $self->getLine($startLine); + my $expr; + if ($startLine == $endLine) { + $expr = substr($line, $startCol - 1, $endCol - $startCol); + } else { + $expr = substr($line, $startCol - 1); + for (my $l = $startLine + 1; $l < $endLine; ++$l) { + $expr .= $self->getLine($l); + } + $line = $self->getLine($endLine); + $expr .= substr($line, 0, $endCol); + } + $expr =~ /^\s*(.+?)\s*$/; + return $1; +} + +sub isOutOfRange +{ + my ($self, $lineNo, $context) = @_; + my $data = $self->[0]; + if (defined($data->[EXCLUDE]) && + scalar(@{$data->[EXCLUDE]}) < $lineNo) { + + # Can happen due to version mismatches: data extracted with + # version N of the file, then generating HTML with version M + # "--version-script callback" option can be used to detect this. + # Another case happens due to apparent bugs in some old 'gcov' + # versions - which sometimes inserts out-of-range line numbers + # when macro is used as last line in file. + + my $filt = $lcovutil::cov_filter[$lcovutil::FILTER_LINE_RANGE]; + if (defined($filt)) { + my $c = ($context eq 'line') ? 'line' : "$context at line"; + lcovutil::info(2, + "filter out-of-range $c $lineNo in " . + $self->filename() . " (" . + scalar(@{$data->[EXCLUDE]}) . + " lines in file)\n"); + ++$filt->[-2]; # applied in 1 location + ++$filt->[-1]; # one coverpoint suppressed + return 1; + } + my $key = $self->filename(); + $key .= $lineNo unless $lcovutil::warn_once_per_file; + if (lcovutil::warn_once($lcovutil::ERROR_RANGE, $key)) { + my $c = ($context eq 'line') ? 'line' : "$context at line"; + my $msg = + "unknown $c '$lineNo' in " . + $self->filename() . ": there are only " . + scalar(@{$data->[EXCLUDE]}) . " lines in the file."; + if ($lcovutil::verbose || + 0 == lcovutil::message_count($lcovutil::ERROR_RANGE)) { + # only print verbose addition on first message + $msg .= lcovutil::explain_once( + 'version_script', + [ "\n Issue can be caused by code changes/version mismatch: see the \"--version-script script_file\" discussion in the genhtml man page.", + $lcovutil::tool_name ne 'geninfo' + ], + "\n Use '$lcovutil::tool_name --filter range' to remove out-of-range lines." + ); + } + # some versions of gcov seem to make up lines that do not exist - + # this appears to be related to macros on last line in file + lcovutil::store_deferred_message($lcovutil::ERROR_RANGE, + 1, $key, $msg); + } + # Note: if user ignored the error, then we return 'not out of range'. + # The line is out of range/something is wrong - but the user did not + # ask us to filter it out. + } + return 0; +} + +sub excludeReason +{ + my ($self, $lineNo) = @_; + my $data = $self->[0]; + die("missing data at $lineNo") + unless (defined($data->[EXCLUDE]) && + scalar(@{$data->[EXCLUDE]}) >= $lineNo); + return $data->[EXCLUDE]->[$lineNo - 1] & 0xFF0; +} + +sub isExcluded +{ + # returns: the value of the matched flags + # - non-zero if the line is excluded (in an excluded or unreachable + # region), or if '$flags" is set and the exclusion reason includes + # at least one of the flags. + # - The latter condition is used to check for branch-only or execption- + # only exclusions, as well as to check whether this line is + # unreachable (as opposed to excluded). + my ($self, $lineNo, $flags, $skipRangeCheck) = @_; + my $data = $self->[0]; + if (!defined($data->[EXCLUDE]) || scalar(@{$data->[EXCLUDE]}) < $lineNo) { + # this can happen due to version mismatches: data extracted with + # version N of the file, then generating HTML with version M + # "--version-script callback" option can be used to detect this + + # if we are just checking whether this line in in an unreachable region, + # then don't check for out-of-range (that check happens later) + return 0 + if $skipRangeCheck; + my $key = $self->filename(); + $key .= $lineNo unless ($lcovutil::warn_once_per_file); + my $suffix = lcovutil::explain_once( + 'version-script', + [ "\n Issue can be caused by code changes/version mismatch; see the \"--version-script script_file\" discussion in the genhtml man page.", + $lcovutil::verbose || + lcovutil::message_count($lcovutil::ERROR_RANGE) == 0 + ]); + lcovutil::store_deferred_message( + $lcovutil::ERROR_RANGE, + 1, $key, + "unknown line '$lineNo' in " . $self->filename() + . + ( + defined($data->[EXCLUDE]) ? + (" there are only " . + scalar(@{$data->[EXCLUDE]}) . " lines in the file.") : + "") . + $suffix) if lcovutil::warn_once($lcovutil::ERROR_RANGE, $key); + return 0; # even though out of range - this is not excluded by filter + } + my $reason; + if ($flags && + 0 != ($reason = ($data->[EXCLUDE]->[$lineNo - 1] & $flags))) { + return $reason; + } + return $data->[EXCLUDE]->[$lineNo - 1] & (e_LINE | e_UNREACHABLE); +} + +sub removeComments +{ + my $line = shift; + $line =~ s|//.*$||; + $line =~ s|/\*.*\*/||g; + return $line; +} + +sub isCharacter +{ + my ($self, $line, $char) = @_; + + my $code = $self->getLine($line); + return 0 + unless defined($code); + $code = removeComments($code); + return ($code =~ /^\s*${char}\s*$/); +} + +# is line empty +sub isBlank +{ + my ($self, $line) = @_; + + my $code = $self->getLine($line); + return 0 + unless defined($code); + $code = removeComments($code); + return ($code =~ /^\s*$/); +} + +sub is_initializerList +{ + my ($self, $line) = @_; + return 0 unless defined($self->[0]->[SOURCE]) && $line < $self->numLines(); + my $code = ''; + my $l = $line; + my $foundExpr = 0; + while ($l < $self->numLines()) { + my $src = $self->getLine($l); + # append to string until we find close brace...then look for next one... + $code = removeComments($code . $src); + # believe that initialization expressions are either numeric or C strings + while ($code =~ + s/\s+("[^"]*"|0x[0-9a-fA-F]+|[-+]?[0-9]+((\.[0-9]+)([eE][-+][0-9]+)?)?)\s*,?// + ) { + $foundExpr = 1; + } + # remove matching {} brace pairs - assume a sub-object initializer + $code =~ s/\s*{\s*,?\s*}\s*,?\s*//; + last if $code =~ /[};]/; # unmatched close or looks like statement + last unless $code =~ /^\s*([{}]\s*)*$/; + ++$l; + } + return $foundExpr ? $l - $line : 0; # return number of consecutive lines +} + +sub containsConditional +{ + my ($self, $line) = @_; + + # special case - maybe C++ exception handler on close brace at end of function? + return 0 + if $self->isCharacter($line, '}'); + my $src = $self->getLine($line); + return 1 + unless defined($src); + + my $code = ""; + for (my $next = $line + 1; + defined($src) && ($next - $line) < $lcovutil::source_filter_lookahead; + ++$next) { + + $src = lcovutil::simplifyCode($src); + + my $bitwiseOperators = + $lcovutil::source_filter_bitwise_are_conditional ? '&|~' : ''; + + return 1 + if ($src =~ + /([?!><$bitwiseOperators]|&&|\|\||==|!=|\b(if|switch|case|while|for)\b)/ + ); + $code = $code . $src; + + if (lcovutil::balancedParens($code)) { + return 0; # got to the end and didn't see conditional + } elsif ($src =~ /[{;]\s*$/) { + # assume we got to the end of the statement if we see semicolon + # or brace. + # parens weren't balanced though - so assume this might be + # a conditional + return 1; + } + $src = $self->getLine($next); + $src = '' unless defined($src); + } + return 1; # not sure - so err on side of caution +} + +sub containsTrivialFunction +{ + my ($self, $start, $end) = @_; + return 0 + if (1 + $end - $start >= $lcovutil::trivial_function_threshold); + my $text = ''; + for (my $line = $start; $line <= $end; ++$line) { + my $src = $self->getLine($line); + $src = '' unless defined($src); + chomp($src); + $src =~ s/\s+$//; # whitespace + $src =~ s#//.*$##; # remove end-of-line comments + $text .= $src; + } + # remove any multiline comments that were present: + $text =~ s#/\*.*\*/##g; + # remove whitespace + $text =~ s/\s//g; + # remove :: C++ separator + $text =~ s/:://g; + if ($text =~ /:/) { + return 0; + } + + # does code end with '{}', '{;}' or '{};'? + # Or: is this just a close brace? + if ($text =~ /(\{;?|^)\};?$/) { + return 1; + } + return 0; +} + +# check if this line is a close brace with zero hit count that should be +# suppressed. We want to ignore spurious zero on close brace; depending +# on what gcov did the last time (zero count, no count, nonzero count) - +# it might be interpreted as UIC - which will violate our coverage criteria. +# We want to ignore this line if: +# - the line contain only a closing brace and +# - previous line is hit, OR +# - previous line is not an open-brace which has no associated +# count - i.e., this is not an empty block where the zero +# count is tagged to the closing brace, OR +# is line empty (no code) and +# - count is zero, and +# - either previous or next non-blank lines have an associated count +# +sub suppressCloseBrace +{ + my ($self, $lineNo, $count, $lineCountData) = @_; + + my $suppress = 0; + if ($self->isCharacter($lineNo, '}')) { + for (my $prevLine = $lineNo - 1; $prevLine >= 0; --$prevLine) { + my $prev = $lineCountData->value($prevLine); + if (defined($prev)) { + # previous line was executable + $suppress = 1 + if ($prev == $count || + ($count == 0 && + $prev > 0)); + + lcovutil::info(3, + "not skipping brace line $lineNo because previous line $prevLine hit count didn't match: $prev != $count" + ) unless $suppress; + last; + } elsif ($count == 0 && + # previous line not executable - was it an open brace? + $self->isCharacter($prevLine, '{') + ) { + # look 'up' from the open brace to find the first + # line which has an associated count - + my $code = ""; + for (my $l = $prevLine - 1; $l >= 0; --$l) { + $code = $self->getLine($l) . $code; + my $prevCount = $lineCountData->value($l); + if (defined($prevCount)) { + # don't suppress if previous line not hit either + last + if $prevCount == 0; + # if first non-whitespace character is a colon - + # then this looks like a C++ initialization list. + # suppress. + if ($code =~ /^\s*:(\s|[^:])/) { + $suppress = 1; + } else { + $code = lcovutil::filterStringsAndComments($code); + $code = lcovutil::simplifyCode($code); + # don't suppress if this looks like a conditional + $suppress = 1 + unless ( + $code =~ /\b(if|switch|case|while|for)\b/); + } + last; + } + } # for each prior line (looking for statement before block) + last; + } # if (line was an open brace) + } # foreach prior line + } # if line was close brace + return $suppress; +} + +package TraceFile; + +our $ignore_testcase_name; # use default name, if set +use constant { + FILES => 0, + COMMENTS => 1, + STATE => 2, # operations performed: don't do them again + + DID_FILTER => 1, + DID_DERIVE => 2, +}; + +sub load +{ + my ($class, $tracefile, $readSource, $verify_checksum, + $ignore_function_exclusions) + = @_; + my $self = $class->new(); + my $context = MessageContext->new("loading $tracefile"); + + $self->_read_info($tracefile, $readSource, $verify_checksum); + + $self->applyFilters($readSource); + return $self; +} + +sub new +{ + my $class = shift; + my $self = [{}, [], 0]; + bless $self, $class; + + return $self; +} + +sub serialize +{ + my ($self, $filename) = @_; + + my $data = Storable::store($self, $filename); + die("serialize failed") unless defined($data); +} + +sub deserialize +{ + my ($class, $file) = @_; + my $self = Storable::retrieve($file) or + die("unable to deserialize $file\n"); + ref($self) eq $class or die("did not deserialize a $class"); + return $self; +} + +sub empty +{ + my $self = shift; + + return !keys(%{$self->[FILES]}); +} + +sub files +{ + my $self = shift; + + # for case-insensitive support: need to store the file keys in + # lower case (so they can be found) - but return the actual + # names of the files (mixed case) + + return keys %{$self->[FILES]}; +} + +sub directories +{ + my $self = shift; + # return hash of directories which contain source files + my %dirs; + foreach my $f ($self->files()) { + my $d = File::Basename::dirname($f); + $dirs{$d} = [] unless exists($dirs{$d}); + push(@{$dirs{$d}}, $f); + } + return \%dirs; +} + +sub file_exists +{ + my ($self, $name) = @_; + $name = lc($name) if $lcovutil::case_insensitive; + return exists($self->[FILES]->{$name}); +} + +sub count_totals +{ + my $self = shift; + # return list of (number files, [#lines, #hit], [#branches, #hit], [#functions,#hit]) + my @data = (0, [0, 0], [0, 0], [0, 0], [0, 0]); + foreach my $filename ($self->files()) { + my $entry = $self->data($filename); + ++$data[0]; + $data[1]->[0] += $entry->found(); # lines + $data[1]->[1] += $entry->hit(); + $data[2]->[0] += $entry->branch_found(); # branch + $data[2]->[1] += $entry->branch_hit(); + $data[3]->[0] += $entry->function_found(); # function + $data[3]->[1] += $entry->function_hit(); + + if ($lcovutil::mcdc_coverage) { + $data[4]->[0] += $entry->mcdc_found(); # mcdc + $data[4]->[1] += $entry->mcdc_hit(); + } + } + return @data; +} + +sub check_fail_under_criteria +{ + my ($self, $type) = @_; + my @types; + if (!defined($type)) { + push(@types, 'line'); + push(@types, 'branch', 'condition') if $lcovutil::br_coverage; + } else { + push(@types, $type); + } + + foreach my $t (@types) { + my ($rate, $plural, $idx); + if ($t eq 'line') { + next unless defined($lcovutil::fail_under_lines); + $rate = $lcovutil::fail_under_lines; + $idx = 1; # lines + $plural = 'lines'; + } else { + next unless defined($lcovutil::fail_under_branches); + $rate = $lcovutil::fail_under_branches; + $idx = 2; + $plural = 'branches'; + } + next if $rate <= 0; + my @counts = $self->count_totals(); + my ($found, $hit) = @{$counts[$idx]}; + if ($found == 0) { + lcovutil::info(1, "No $plural found\n"); + return "No $plural found"; + } + my $actual_rate = ($hit / $found); + my $expected_rate = $rate / 100; + if ($actual_rate < $expected_rate) { + my $msg = + sprintf("Failed '$t' coverage criteria: %0.2f < %0.2f", + $actual_rate, $expected_rate); + lcovutil::info("$msg\n"); + return $msg; + } + } + return 0; +} + +sub checkCoverageCriteria +{ + my $self = shift; + + CoverageCriteria::check_failUnder($self); + + return unless defined($CoverageCriteria::criteriaCallback); + + my $perFile = 0 == scalar(@CoverageCriteria::criteriaCallbackLevels) || + grep(/file/, @CoverageCriteria::criteriaCallbackLevels); + my %total = ('line' => { + 'found' => 0, + 'hit' => 0 + }, + 'branch' => { + 'found' => 0, + 'hit' => 0 + }, + 'condition' => { + 'found' => 0, + 'hit' => 0 + }, + 'function' => { + 'found' => 0, + 'hit' => 0 + }); + my %data; + foreach my $filename ($self->files()) { + my $entry = $self->data($filename); + my @data = ($entry->found(), $entry->hit(), + $entry->branch_found(), $entry->branch_hit(), + $entry->function_found(), $entry->function_hit()); + my $idx = 0; + foreach my $t ('line', 'branch', 'function') { + foreach my $x ('found', 'hit') { + $data{$t}->{$t} = $data[$idx] if $perFile; + $total{$t}->{$x} += $data[$idx++]; + } + } + if ($perFile) { + CoverageCriteria::executeCallback('file', $filename, \%data); + } + } + CoverageCriteria::executeCallback('top', 'top', \%total); +} + +# +# print_summary(fn_do, br_do) +# +# Print overall coverage rates for the specified coverage types. +# $countDat is the array returned by 'TraceFile->count_totals()' + +sub print_summary +{ + my ($self, $fn_do, $br_do, $mcdc_do) = @_; + + $br_do = $lcovutil::br_coverage unless defined($br_do); + $mcdc_do = $lcovutil::mcdc_coverage unless defined($mcdc_do); + $fn_do = $lcovutil::func_coverage unless defined($fn_do); + my @counts = $self->count_totals(); + lcovutil::info("Summary coverage rate:\n"); + lcovutil::info(" source files: %d\n", $counts[0]); + lcovutil::info(" lines.......: %s\n", + lcovutil::get_overall_line( + $counts[1]->[0], $counts[1]->[1], "line" + )); + lcovutil::info(" functions...: %s\n", + lcovutil::get_overall_line( + $counts[3]->[0], $counts[3]->[1], "function" + )) if ($fn_do); + lcovutil::info(" branches....: %s\n", + lcovutil::get_overall_line( + $counts[2]->[0], $counts[2]->[1], "branch" + )) if ($br_do); + lcovutil::info(" conditions..: %s\n", + lcovutil::get_overall_line( + $counts[4]->[0], $counts[4]->[1], "conditions" + )) if ($mcdc_do); +} + +sub skipCurrentFile +{ + my $filename = shift; + + my $filt = $lcovutil::cov_filter[$lcovutil::FILTER_MISSING_FILE]; + if ($filt) { + my $missing = !-r $filename; + if ($missing && + $lcovutil::resolveCallback) { + + my $path = SearchPath::resolveCallback($filename, 0, 1); + $missing = !defined($path) || '' eq $path; + } + + if ($missing) { + lcovutil::info( + "Excluding \"$filename\": does not exist/is not readable\n"); + ++$filt->[-2]; + ++$filt->[-1]; + return 1; + } + } + + # check whether this file should be excluded or not... + foreach my $p (@lcovutil::exclude_file_patterns) { + my $pattern = $p->[0]; + if ($filename =~ $pattern) { + lcovutil::info(1, "exclude $filename: matches '" . $p->[1] . "\n"); + ++$p->[-1]; + return 1; # all done - explicitly excluded + } + } + if (@lcovutil::include_file_patterns) { + foreach my $p (@lcovutil::include_file_patterns) { + my $pattern = $p->[0]; + if ($filename =~ $pattern) { + lcovutil::info(1, + "include: $filename: matches '" . $p->[1] . "\n"); + ++$p->[-1]; + return 0; # explicitly included + } + } + lcovutil::info(1, "exclude $filename: no include matches\n"); + return 1; # not explicitly included - so exclude + } + return 0; +} + +sub comments +{ + my $self = shift; + return @{$self->[COMMENTS]}; +} + +sub add_comments +{ + my $self = shift; + foreach (@_) { + push(@{$self->[COMMENTS]}, $_); + } +} + +sub data +{ + my $self = shift; + my $file = shift; + my $checkMatchingBasename = shift; + + my $key = $lcovutil::case_insensitive ? lc($file) : $file; + my $files = $self->[FILES]; + if (!exists($files->{$key})) { + if (defined $checkMatchingBasename) { + # check if there is a file in the map that has the same basename + # as the lone we are looking for. + # this can happen if the 'udiff' file refers to paths in the repo + # whereas the .info files refer to paths in the build area. + my $base = File::Basename::basename($file); + $base = lc($base) if $lcovutil::case_insensitive; + my $count = 0; + my $found; + foreach my $f (keys %$files) { + my $b = File::Basename::basename($f); + $b = lc($b) if $lcovutil::case_insensitive; + if ($b eq $base) { + $count++; + $found = $files->{$f}; + } + } + return $found + if $count == 1; + } + $files->{$key} = TraceInfo->new($file); + } + + return $files->{$key}; +} + +sub contains +{ + my ($self, $file) = @_; + my $key = $lcovutil::case_insensitive ? lc($file) : $file; + my $files = $self->[FILES]; + return exists($files->{$key}); +} + +sub remove +{ + my ($self, $filename) = @_; + $filename = lc($filename) if $lcovutil::case_insensitive; + $self->file_exists($filename) or + die("remove nonexistent file $filename"); + delete($self->[FILES]->{$filename}); +} + +sub insert +{ + my ($self, $filename, $data) = @_; + $filename = lc($filename) if $lcovutil::case_insensitive; + die("insert existing file $filename") + if $self->file_exists($filename); + die("expected TraceInfo got '" . ref($data) . "'") + unless (ref($data) eq 'TraceInfo'); + $self->[FILES]->{$filename} = $data; +} + +sub merge_tracefile +{ + my ($self, $trace, $op) = @_; + die("expected TraceFile") + unless (defined($trace) && 'TraceFile' eq ref($trace)); + + my $changed = 0; + my $mine = $self->[FILES]; + my $yours = $trace->[FILES]; + foreach my $filename (keys %$mine) { + + if (exists($yours->{$filename})) { + # this file in both me and you...merge as appropriate + #lcovutil::info(1, "merge common $filename\n"); + if ($self->data($filename) + ->merge($yours->{$filename}, $op, $filename)) { + $changed = 1; + } + } else { + # file in me and not you - remove mine if intersect operation + if ($op == TraceInfo::INTERSECT) { + #lcovutil::info(1, "removing my $filename: intersect\n"); + delete $mine->{$filename}; + $changed = 1; + } + } + } + if ($op == TraceInfo::UNION) { + # now add in any files from you that are not present in me... + while (my ($filename, $data) = each(%$yours)) { + if (!exists($mine->{$filename})) { + $mine->{$filename} = $data; + $changed = 1; + } + } + } + $self->add_comments($trace->comments()); + return $changed; +} + +sub _eraseFunction +{ + my ($fcn, $name, $end_line, $source_file, $functionMap, + $lineData, $branchData, $mcdcData, $checksum) = @_; + if (defined($end_line)) { + for (my $line = $fcn->line(); $line <= $end_line; ++$line) { + + if (defined($checksum)) { + $checksum->remove($line, 1); # remove if present + } + if ($lineData->remove($line, 1)) { + lcovutil::info(2, + "exclude DA in FN '$name' on $source_file:$line\n"); + } + if (defined($branchData) && $branchData->remove($line, 1)) { + lcovutil::info(2, + "exclude BRDA in FN '$name' on $source_file:$line\n"); + } + if (defined($mcdcData) && $mcdcData->remove($line, 1)) { + lcovutil::info(2, + "exclude MCDC in FN '$name' on $source_file:$line\n"); + } + } # foreach line + } + # remove this function and all its aliases... + $functionMap->remove($fcn); +} + +sub _eraseFunctions +{ + my ($source_file, $srcReader, $functionMap, $lineData, $branchData, + $mcdcData, $checksum, $state, $isMasterList) = @_; + + my $modified = 0; + my $removeTrivial = $cov_filter[$FILTER_TRIVIAL_FUNCTION]; + FUNC: foreach my $key ($functionMap->keylist()) { + my $fcn = $functionMap->findKey($key); + my $end_line = $fcn->end_line(); + my $name = $fcn->name(); + if (!defined($end_line)) { + ++$state->[0]->[1]; # mark that we don't have an end line + # we can skip out of processing if we don't know the end line + # - there is no way for us to remove line and branch points in + # the function region + # Or we can keep going and at least remove the matched function + # coverpoint. + #last; # at least for now: keep going + lcovutil::info(1, "no end line for '$name' at $key\n"); + } elsif ( + defined($removeTrivial) && + is_language('c', $source_file) && + (defined($srcReader) && + $srcReader->containsTrivialFunction($fcn->line(), $end_line)) + ) { + # remove single-line functions which has no body + # Only count what we removed from the top level/master list - + # - otherwise, we double count for every testcase. + ++$removeTrivial->[-2] if $isMasterList; + foreach my $alias (keys %{$fcn->aliases()}) { + lcovutil::info(1, + "\"$source_file\":$end_line: filter trivial FN $alias\n"); + _eraseFunction($fcn, $alias, $end_line, + $source_file, $functionMap, $lineData, + $branchData, $mcdcData, $checksum); + ++$removeTrivial->[-1] if $isMasterList; + } + $modified = 1; + next FUNC; + } + foreach my $p (@lcovutil::exclude_function_patterns) { + my $pat = $p->[0]; + my $a = $fcn->aliases(); + foreach my $alias (keys %$a) { + if ($alias =~ $pat) { + ++$p->[-1] if $isMasterList; + if (defined($end_line)) { + # if user ignored the unsupported message, then the + # best we can do is to remove the matched function - + # and leave the lines and branches in place + lcovutil::info( + 1 + (0 == $isMasterList), + "exclude FN $name line range $source_file:[" . + $fcn->line() . + ":$end_line] due to '" . $p->[-2] . "'\n" + ); + } + _eraseFunction($fcn, $alias, $end_line, + $source_file, $functionMap, $lineData, + $branchData, $mcdcData, $checksum); + $modified = 1; + next FUNC; + } # if match + } # foreach alias + } # foreach pattern + # warn if the function is in an unreachable region but is hit - + # easiest to check here so we emit only one message per function + my $line; + my $reason; + if ($srcReader && + 0 != ($reason = + $srcReader->isExcluded(($line = $fcn->line()), + $srcReader->e_UNREACHABLE, 1)) && + 0 != ($reason & $srcReader->e_UNREACHABLE) && + 0 != $fcn->hit() + ) { + + lcovutil::ignorable_error($lcovutil::ERROR_UNREACHABLE, + "\"$source_file\":$line: function $name is executed but was marked unreachable." + ); + next + if $lcovutil::retainUnreachableCoverpointIfHit; + } + + } # foreach function + return $modified; +} + +sub _deriveFunctionEndLines +{ + my $traceInfo = shift; + my $modified = 0; + + my $start = Time::HiRes::gettimeofday(); + my $lineData = $traceInfo->sum(); + my @lines = sort { $a <=> $b } $lineData->keylist(); + # sort functions by start line number + # ignore lambdas - which we don't process correctly at the moment + # (would need to do syntactic search for the end line) + my @functions = sort { $a->line() <=> $b->line() } + grep({ !$_->isLambda() } $traceInfo->func()->valuelist()); + + my $currentLine = @lines ? shift(@lines) : 0; + my $funcData = $traceInfo->testfnc(); + FUNC: while (@functions) { + my $func = shift(@functions); + my $first = $func->line(); + my $end = $func->end_line(); + #unless (defined($lineData->value($first))) { + # lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + # '"' . $func->filename() . + # "\":$first: first line of function has no linecov."); + # $lineData->append($first, $func->hit()); + #} + while ($first > $currentLine) { + if (@lines) { + last if $lines[0] > $first; + $currentLine = shift @lines; + } else { + if (!defined($end)) { + my $suffix = + lcovutil::explain_once('derive_end_line', + " See lcovrc man entry for 'derive_function_end_line'." + ); + lcovutil::ignorable_error( + $lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $traceInfo->filename() . + "\":$first: function " . $func->name() . + " found on line but no corresponding 'line' coverage data point. Cannot derive function end line." + . $suffix); + } + next FUNC; + } + } + if (!defined($end)) { + # where is the next function? Find the last 'line' coverpoint + # less than the start line of that function.. + if (@lines) { + # if there are no more lines in this file - then everything + # must be ending on the last line we saw + if (@functions) { + my $next_func = $functions[0]; + my $start = $next_func->line(); + while (@lines && + $lines[0] < $start) { + $currentLine = shift @lines; + } + } else { + # last line in the file must be the last line + # of this function + if (@lines) { + $currentLine = $lines[-1]; + } else { + my $suffix = lcovutil::explain_once('derive_end_line', + " See lcovrc man entry for 'derive_function_end_line'." + ); + lcovutil::ignorable_error( + $lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $traceInfo->filename() . + "\":$first: function " . $func->name() . + ": last line in file is not last line of function.$suffix" + ); + next FUNC; + } + } + } elsif ($currentLine < $first) { + # we ran out of lines in the data...check for inconsistency + my $suffix = + lcovutil::explain_once('derive_end_line', + " See lcovrc man entry for 'derive_function_end_line'."); + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $traceInfo->filename() . + "\":$first: function " . $func->name() . + " found on line but no corresponding 'line' coverage data point. Cannot derive function end line." + . $suffix); + + # last FUNC; # quit looking here - all the other functions after this one will have same issue + next FUNC; # warn about them all + } + lcovutil::info(1, + '"' . $traceInfo->filename() . + "\":$currentLine: assign end_line " . + $func->name() . "\n"); + # warn that we are deriving end lines + _generate_end_line_message(); + $func->set_end_line($currentLine); + $modified = 1; + } + # we may not have set the end line above due to inconsistency + # but we also might not have line data + # - see .../tests/lcov/extract with gcc/4.8 + if (!defined($func->end_line())) { + my $suffix = + lcovutil::explain_once('derive_end_line', + " See lcovrc man entry for 'derive_function_end_line'."); + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $func->filename() . '":' . $func->line() . + ': failed to set end line for function ' . + $func->name() . '.' . $suffix); + next FUNC; + } + + # now look for this function in each testcase - + # set the same endline (if not already set) + my $key = $first; + foreach my $tn ($funcData->keylist()) { + my $d = $funcData->value($tn); + my $f = $d->findKey($key); + if (defined($f)) { + if (!defined($f->end_line())) { + $f->set_end_line($func->end_line()); + $modified = 1; + } else { + if ($f->end_line() != $func->end_line()) { + lcovutil::ignorable_error( + $lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $func->file() . + '":' . $first . ': function \'' . + $func->name() . ' last line is ' . + $func->end_line() . ' but is ' . + $f->end_line() . " in testcase '$tn'" + ); + } + } + } + } #foreach testcase + } # for each function + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{derive_end}{$traceInfo->filename()} = $end - $start; + return $modified; +} + +sub _consistencySuffix +{ + return lcovutil::explain_once('consistency_check', + "\n\tTo skip consistency checks, see the 'check_data_consistency' section in man lcovrc(5)." + ); +} + +sub _fixFunction +{ + my ($traceInfo, $func, $count) = @_; + + my @fix = ($func); + my $line = $func->line(); + my $per_testcase = $traceInfo->testfnc(); + foreach my $testname ($per_testcase->keylist()) { + my $data = $traceInfo->testfnc($testname); + my $f = $data->findKey($line); + push(@fix, $f) if defined($f); + } + + foreach my $f (@fix) { + $f->[FunctionEntry::COUNT] = $count; + + # and mark that each alias was hit... + my $aliases = $f->aliases(); + foreach my $alias (keys %$aliases) { + $aliases->{$alias} += $count; + } + } +} + +sub _checkConsistency +{ + return unless $lcovutil::check_data_consistency; + my $traceInfo = shift; + my $modified = 0; + + my $start = Time::HiRes::gettimeofday(); + + my @functions = sort { $a->line() <=> $b->line() } + grep({ defined($_->end_line()) } $traceInfo->func()->valuelist()); + my $lineData = $traceInfo->sum(); + my @lines = sort { $a <=> $b } $lineData->keylist() + if @functions; + my $currentLine = @lines ? shift(@lines) : 0; + FUNC: while (@functions) { + my $func = shift(@functions); + my $first = $func->line(); + my $end = $func->end_line(); + my $imHit = $func->hit() != 0; # I'm hit if any aliases is hit + my $lineHit = 0; + while ($first > $currentLine) { + # skip until we find the first line of the current function + if (@lines) { + $currentLine = shift(@lines); + } else { + # can only get here with really inconsistent data...would have + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $func->filename() . + "\":$first: file linecov does not match function cov data - skipping checks." + ); + last FUNC; + } + } + while ($end >= $currentLine) { + # look for first covered line in this function - + # sufficient to just look at the such line + die("bug: " . $func->filename() . " [$first:$end]: $currentLine") + unless $first <= $currentLine && $currentLine <= $end; + my $hit = $lineData->value($currentLine); + $lineHit = 1 if $hit; + if ($hit && !$imHit) { + # don't warn about the first line of a lambda: + # - the decl may executed even if the lambda function itself is + # not called + # - if no other lines are hit, then then the function is not + # covered, but the coverage DB is consistent + # - if some other line _is_ hit, then, the data is inconsistent + if ($func->isLambda() && $currentLine == $first) { + $lineHit = 0; + last unless @lines; + $currentLine = shift(@lines); + next; + } + my $suffix = + ($lcovutil::fix_inconsistency && lcovutil::is_ignored( + $lcovutil::ERROR_INCONSISTENT_DATA) + ) ? ": function marked 'hit'" : + ''; + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $func->filename() . + "\":$first: function '" . $func->name() . + "' is not hit but line $currentLine is$suffix." . + _consistencySuffix()); + if ($lcovutil::fix_inconsistency) { + # if message was ignored, then mark the function and all + # its aliases hit + $imHit = 1; + $modified = 1; + _fixFunction($traceInfo, $func, $hit); + } + last; # only warn on the first hit line in the function + } + last if $lineHit && $hit; # can stop looking at this function now + last unless (@lines); + $currentLine = shift @lines; + } + if ($imHit && !$lineHit) { + my $suffix = + ($lcovutil::fix_inconsistency && + lcovutil::is_ignored($lcovutil::ERROR_INCONSISTENT_DATA)) ? + ": function marked 'not hit'" : + ''; + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $traceInfo->filename() . + "\":$first: function '" . $func->name() . + "' is hit but no contained lines are hit$suffix." . + _consistencySuffix()); + if ($lcovutil::fix_inconsistency) { + # if message was ignored, then mark the function and its aliases + # not hit + $modified = 1; + _fixFunction($traceInfo, $func, 0); + } + } + } + + # check MC/DC consistency - + # Note that we might have an MC/DC block on a line which has no + # linecov data + # This can happen for template functions (and similar) where the + # expression is statically determned to be true or false - and elided + # by the compiler. In that case, generate a new line coverpoint + if ($lcovutil::mcdc_coverage) { + my $mcdc = $traceInfo->mcdc(); + my $testcase_mcdc = $traceInfo->testcase_mcdc(); + foreach my $line ($mcdc->keylist()) { + my $lineHit = $lineData->value($line); + next if defined($lineHit); + + lcovutil::info(1, + '"' . $traceInfo->filename() . + "\":$line: generating DA entry for orphan MC/DC\n" + ); + my $block = $mcdc->value($line); + my ($found, $hit) = $block->totals(); + $lineData->append($line, $hit); + + # create the entry in the per-testcase data + foreach my $testcase ($testcase_mcdc->keylist()) { + my $m = $testcase_mcdc->value($testcase); + if ($m->value($line)) { + $traceInfo->test($testcase)->append($line, $hit); + } + } + } + } + + # also check branch data consistency...should not have non-zero branch hit + # count if line is not hit - and vice versa + my $checkBranchConsistency = + !TraceFile::is_language('perl', $traceInfo->filename()); + if ($lcovutil::br_coverage) { + my $brData = $traceInfo->sumbr(); + + foreach my $line ($brData->keylist()) { + # we expect to find a line everywhere there is a branch + + my $lineHit = $lineData->value($line); + unless (defined($lineHit)) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $traceInfo->filename() . + "\":$line: location has branchcov but no linecov data" + . _consistencySuffix()); + } + + my $brHit = 0; + my $brd = $brData->value($line); + BLOCK: foreach my $id ($brd->blocks()) { + my $block = $brd->getBlock($id); + foreach my $br (@$block) { + if (0 != $br->count()) { + $brHit = 1; + last BLOCK; + } + } + } + if (!defined($lineHit)) { + # must have ignored the above error - so build fake line data here + # (maybe should delete the branch instead?) + $lineData->append($line, $brHit); + next; + } + if ($lineHit && !$brHit) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $traceInfo->filename() . + "\":$line: line is hit but no branches on line have been evaluated." + . _consistencySuffix()) + if $checkBranchConsistency; + } elsif (!$lineHit && $brHit) { + lcovutil::ignorable_error($lcovutil::ERROR_INCONSISTENT_DATA, + '"' . $traceInfo->filename() . + "\":$line: line is not hit but at least one branch on line has been evaluated." + . _consistencySuffix()); + } + } + } + + # @todo expect to have a branch everywhere we have an MCDC - + # further, expect the number of branches and conditions to match + + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{check_consistency}{$traceInfo->filename()} = + $end - $start; + return $modified; +} + +sub _filterFile +{ + my ($traceInfo, $source_file, $actions, $srcReader, $state) = @_; + + my $modified = 0; + if (0 != ($actions & DID_DERIVE)) { + $modified = _deriveFunctionEndLines($traceInfo); + $modified ||= _checkConsistency($traceInfo); + if (0 == ($actions & DID_FILTER)) { + return [$traceInfo, $modified]; + } + } + # @todo: if MCDC has just one expression, then drop it - + # it is equivalent to branch coverage. + my $region = $cov_filter[$FILTER_EXCLUDE_REGION]; + my $branch_region = $cov_filter[$FILTER_EXCLUDE_BRANCH]; + my $range = $cov_filter[$lcovutil::FILTER_LINE_RANGE]; + my $branch_histogram = $cov_filter[$FILTER_BRANCH_NO_COND] + if (is_language('c', $source_file)); + my $brace_histogram = $cov_filter[$FILTER_LINE_CLOSE_BRACE] + if (is_language('c', $source_file)); + my $blank_histogram = $cov_filter[$FILTER_BLANK_LINE]; + my $function_alias_histogram = $cov_filter[$FILTER_FUNCTION_ALIAS]; + my $trivial_histogram = $cov_filter[$FILTER_TRIVIAL_FUNCTION]; + my $filter_initializer_list = $cov_filter[$FILTER_INITIALIZER_LIST] + if (is_language('c', $source_file)); + my $directive = $cov_filter[$FILTER_DIRECTIVE]; + my $omit = $cov_filter[$FILTER_OMIT_PATTERNS] + if defined($FILTER_OMIT_PATTERNS); + my $mcdc_single = $cov_filter[$FILTER_MCDC_SINGLE] + if defined($FILTER_MCDC_SINGLE && $lcovutil::mcdc_coverage); + + my $context = MessageContext->new("filtering $source_file"); + if (lcovutil::is_filter_enabled()) { + lcovutil::info(1, "reading $source_file for lcov filtering\n"); + $srcReader->open($source_file); + } else { + $srcReader->close(); + } + my $path = ReadCurrentSource::resolve_path($source_file); + lcovutil::info(1, "extractVersion($path) for $source_file\n") + if $path ne $source_file; + # Note: this is checking the version of the 'current' file - even if + # we are actually reading the baseline version. + # - This is what we want, as the 'baseline read' is actually recovering/ + # recreating the baseline source from the current source and the diff. + # - We already checked that the diff and the coverage DB baseline/current + # version data is consistent - so filtering will be accurate as long as + # we see the right 'current' source version. + my $fileVersion = lcovutil::extractFileVersion($path) + if $srcReader->notEmpty(); + if (defined($fileVersion) && + defined($traceInfo->version()) + && + !lcovutil::checkVersionMatch($source_file, $traceInfo->version(), + $fileVersion, 'filter') + ) { + lcovutil::info(1, + '$source_file: skip filtering due to version mismatch\n'); + return ($traceInfo, 0); + } + + if (defined($lcovutil::func_coverage) && + (0 != scalar(@lcovutil::exclude_function_patterns) || + defined($trivial_histogram) || + defined($region)) + ) { + # filter excluded function line ranges + my $funcData = $traceInfo->testfnc(); + my $lineData = $traceInfo->test(); + my $branchData = $traceInfo->testbr(); + my $mcdcData = $traceInfo->testcase_mcdc(); + my $checkData = $traceInfo->check(); + my $reader = (defined($trivial_histogram) || defined($region)) && + $srcReader->notEmpty() ? $srcReader : undef; + + foreach my $tn ($lineData->keylist()) { + my $m = + _eraseFunctions($source_file, $reader, + $funcData->value($tn), $lineData->value($tn), + $branchData->value($tn), $mcdcData->value($tn), + $checkData->value($tn), $state, + 0); + $modified ||= $m; + } + my $m = + _eraseFunctions($source_file, $reader, + $traceInfo->func(), $traceInfo->sum(), + $traceInfo->sumbr(), $traceInfo->mcdc(), + $traceInfo->check(), $state, + 1); + $modified ||= $m; + } + + return + unless ($srcReader->notEmpty() && + lcovutil::is_filter_enabled()); + + my $filterExceptionBranches = FilterBranchExceptions->new(); + + my ($testdata, $sumcount, $funcdata, $checkdata, $testfncdata, + $testbrdata, $sumbrcount, $mcdc, $testmcdc) = $traceInfo->get_info(); + + foreach my $testname (sort($testdata->keylist())) { + my $testcount = $testdata->value($testname); + my $testfnccount = $testfncdata->value($testname); + my $testbrcount = $testbrdata->value($testname); + my $mcdc_count = $testmcdc->value($testname); + + my $reason; + my $functionMap = $testfncdata->{$testname}; + if ($lcovutil::func_coverage && + $functionMap && + ($region || $range)) { + # Write function related data - sort by line number + + foreach my $key ($functionMap->keylist()) { + my $data = $functionMap->findKey($key); + my $line = $data->line(); + + my $remove; + if ($srcReader->isOutOfRange($line, 'line')) { + $remove = 1; + lcovutil::info(1, + "filter FN " . $data->name() . + ' ' . $data->file() . ":$line\n"); + ++$range->[-2]; # one location where this applied + } elsif (0 != ($reason = $srcReader->isExcluded($line))) { + # we already warned about this one + next + if (0 != ($reason & $srcReader->e_UNREACHABLE) && + 0 != $data->hit() && + $lcovutil::retainUnreachableCoverpointIfHit); + + $remove = 1; + my $r = $srcReader->excludeReason($line); + foreach my $f ([ReadCurrentSource::EXCLUDE_REGION, $region], + [ReadCurrentSource::OMIT_LINE, $omit]) { + if ($r & $f->[0]) { + $f->[1]->[-2] += scalar(keys %{$data->aliases()}); + last; + } + } + } + if ($remove) { + #remove this function from everywhere + foreach my $tn ($testfncdata->keylist()) { + my $d = $testfncdata->value($tn); + my $f = $d->findKey($key); + next unless $f; + $d->remove($f); + } + # and remove from the master table + $funcdata->remove($funcdata->findKey($key)); + $modified = 1; + next; + } # if excluded + } # foreach function + } # if func_coverage + # $testbrcount is undef if there are no branches in the scope + if (($lcovutil::br_coverage || $lcovutil::mcdc_coverage) && + (defined($testbrcount) || + defined($mcdc_count)) && + ($branch_histogram || + $region || + $branch_region || + $range || + $filterExceptionBranches || + $omit) + ) { + my %uniq; + # check MC/DC lines which are not also branch lines + foreach + my $line (defined($mcdc_count) ? $mcdc_count->keylist() : (), + defined($testbrcount) ? $testbrcount->keylist() : ()) { + next if exists($uniq{$line}); + $uniq{$line} = 1; + + # for counting: keep track filter which triggered exclusion - + my $remove; + # omit if line excluded or branches excluded on this line + if ($srcReader->isOutOfRange($line, 'branch')) { + # only counting line coverpoints that got excluded + die("inconsistent state") unless $range; + $remove = $range; + } elsif ( + 0 != ( + $reason = + $srcReader->isExcluded($line, $srcReader->e_BRANCH) + ) + ) { + # all branches here + my $r = $srcReader->excludeReason($line); + foreach my $f ([ReadCurrentSource::EXCLUDE_REGION, $region], + [ReadCurrentSource::OMIT_LINE, $omit], + [ReadCurrentSource::EXCLUDE_DIRECTIVE, + $directive + ], + [ReadCurrentSource::EXCLUDE_BRANCH_REGION, + $branch_region + ] + ) { + if ($r & $f->[0]) { + $remove = $f->[1]; + last; + } + } + die("inconsistent reason $reason") unless $remove; + } elsif ($branch_histogram && + !$srcReader->containsConditional($line)) { + $remove = $branch_histogram; + } + if ($remove) { + foreach my $t ([$testbrdata, $sumbrcount, 'BRDA'], + [$testmcdc, $mcdc, 'MCDC']) { + my ($testCount, $sumCount, $str) = @$t; + next unless $sumCount; + my $brdata = $sumCount->value($line); + # might not be MCDC here, even if there is a branch + next unless $brdata; + + if ($reason && + 0 != ($reason & $srcReader->e_UNREACHABLE) && + 0 != ($brdata->totals())[1]) { + lcovutil::ignorable_error( + $lcovutil::ERROR_UNREACHABLE, + "\"$source_file\":$line: $str record in 'unreachable' region has non-zero hit count." + ); + next + if $lcovutil::retainUnreachableCoverpointIfHit; + } + ++$remove->[-2]; # one line where we skip + $remove->[-1] += ($brdata->totals())[0]; + lcovutil::info(2, + "filter $str '" + . + ($line < $srcReader->numLines() ? + $srcReader->getLine($line) : + '<-->') . + "' $source_file:$line\n"); + # now remove this branch everywhere... + foreach my $tn ($testCount->keylist()) { + my $d = $testCount->value($tn); + $d->remove($line, 1); # remove if present + } + # remove at top + $sumCount->remove($line); + $modified = 1; + } + } elsif (defined($filterExceptionBranches) && + defined($sumbrcount) && + defined($sumbrcount->value($line))) { + # exclude exception branches here + my $m = + $filterExceptionBranches->filter($line, $srcReader, + $sumbrcount, $testbrdata, $mcdc, $mcdc_count); + $modified ||= $m; + } + } # foreach line + } # if branch_coverage + if ($mcdc_single) { + # find single-expression MC/DC's - if there is a matching branch + # expression on the same line, then remove the MC/DC + foreach my $line ($mcdc_count->keylist()) { + my $block = $mcdc_count->value($line); + my $groups = $block->groups(); + if (exists($groups->{1}) && + scalar(keys %$groups) == 1) { + my $branch = $testbrcount->value($line); + next unless $branch && ($branch->totals())[0] == 2; + $mcdc_count->remove($line); + ++$mcdc_single->[-2]; # one MC/DC skipped + + $mcdc->remove($line); # remove at top + $modified = 1; + } + } + } + next + unless $region || + $range || + $brace_histogram || + $branch_histogram || + $directive || + $omit || + $filter_initializer_list; + + # Line related data + my %initializerListRange; + foreach my $line ($testcount->keylist()) { + + # warn about inconsistency if executed line is marked unreachable + my $l_hit = $testcount->value($line); + if ($l_hit && + 0 != ($reason = + $srcReader->isExcluded( + $line, $srcReader->e_UNREACHABLE, 1 + )) && + 0 != ($reason & $srcReader->e_UNREACHABLE) + ) { + lcovutil::ignorable_error($lcovutil::ERROR_UNREACHABLE, + "\"$source_file\":$line: 'unreachable' line has non-zero hit count." + ); + next + if $lcovutil::retainUnreachableCoverpointIfHit; + } + + # don't suppresss if this line has associated branch or MC/DC data + next + if ( + (defined($sumbrcount) && defined($sumbrcount->value($line))) || + (defined($mcdc_count) && + defined($mcdc_count->value($line)))); + + my $is_initializer; + my $is_filtered = undef; + if (exists($initializerListRange{$line})) { + $is_initializer = 1; + $is_filtered = $filter_initializer_list; + delete $initializerListRange{$line}; + } elsif ($filter_initializer_list) { + # check if this line looks like a complete statement (balanced + # parens, ending with semicolon, etc - + # or whether subsequent lines are required for completion. + # If those subsequent lines have associated coverpoints, + # then those points should be filtered out (see issue #1222) + my $count = $srcReader->is_initializerList($line); + if (0 != $count) { + $is_initializer = 1; + $is_filtered = $filter_initializer_list; + for (my $l = $line + $count - 1; $l > $line; --$l) { + # record start of range + $initializerListRange{$l} = $line; + } + } + } + + my $outOfRange = $srcReader->isOutOfRange($line, 'line') + unless $is_filtered; + $is_filtered = $lcovutil::cov_filter[$lcovutil::FILTER_LINE_RANGE] + if !defined($is_filtered) && + defined($outOfRange) && + $outOfRange; + my $excluded = $srcReader->isExcluded($line) + unless $is_filtered; + if (defined($excluded) && $excluded) { + my $reason = $srcReader->excludeReason($line); + foreach my $f ([ReadCurrentSource::EXCLUDE_REGION, $region], + [ReadCurrentSource::OMIT_LINE, $omit], + [ReadCurrentSource::EXCLUDE_DIRECTIVE, + $directive + ] + ) { + if ($reason & $f->[0]) { + $is_filtered = $f->[1]; + last; + } + } + } + my $isCloseBrace = + ($brace_histogram && + $srcReader->suppressCloseBrace($line, $l_hit, $testcount)) + unless $is_filtered; + $is_filtered = $brace_histogram + if !defined($is_filtered) && + defined($isCloseBrace) && + $isCloseBrace; + my $isBlank = + ($blank_histogram && + ($lcovutil::filter_blank_aggressive || $l_hit == 0) && + $srcReader->isBlank($line)) + unless $is_filtered; + $is_filtered = $blank_histogram + if !defined($is_filtered) && defined($isBlank) && $isBlank; + + next unless $is_filtered; + + $modified = 1; + lcovutil::info(2, + 'filter DA (' . $is_filtered->[0] . ') ' + . + ($line < $srcReader->numLines() ? + ("'" . $srcReader->getLine($line) . "'") : + "") . + " $source_file:$line\n"); + + unless (defined($outOfRange) && $outOfRange) { + # some filters already counted... + ++$is_filtered->[-2]; # one location where this applied + ++$is_filtered->[-1]; # one coverpoint suppressed + } + + # now remove everywhere + foreach my $tn ($testdata->keylist()) { + my $d = $testdata->value($tn); + $d->remove($line, 1); # remove if present + } + $sumcount->remove($line); + if (exists($checkdata->{$line})) { + delete($checkdata->{$line}); + } + } # foreach line + } #foreach test + # count the number of function aliases.. + if ($function_alias_histogram) { + $function_alias_histogram->[-2] += $funcdata->numFunc(1); + $function_alias_histogram->[-1] += $funcdata->numFunc(0); + } + return ($traceInfo, $modified); +} + +sub _mergeParallelChunk +{ + # called from parent + my ($self, $tmp, $child, $children, $childstatus, $store, $worklist, + $childRetryCounts) + = @_; + + my ($chunk, $forkAt, $chunkId) = @{$children->{$child}}; + my $dumped = File::Spec->catfile($tmp, "dumper_$child"); + my $childLog = File::Spec->catfile($tmp, "filter_$child.log"); + my $childErr = File::Spec->catfile($tmp, "filter_$child.err"); + + lcovutil::debug(1, "merge:$child ID $chunkId\n"); + my $start = Time::HiRes::gettimeofday(); + foreach my $f ($childLog, $childErr) { + if (!-f $f) { + $f = ''; # there was no output + next; + } + if (open(RESTORE, "<", $f)) { + # slurp into a string and eval.. + my $str = do { local $/; }; # slurp whole thing + close(RESTORE) or die("unable to close $f: $!\n"); + unlink $f; + $f = $str; + } else { + $f = "unable to open $f: $!"; + if (0 == $childstatus) { + lcovutil::report_parallel_error('filter', + $ERROR_PARALLEL, $child, 0, $f, keys(%$children)); + } + } + } + my $signal = $childstatus & 0xFF; + print(STDOUT $childLog) + if ((0 != $childstatus && + $signal != POSIX::SIGKILL && + $lcovutil::max_fork_fails != 0) || + $lcovutil::verbose); + print(STDERR $childErr); + my $data = Storable::retrieve($dumped) + if (-f $dumped && $childstatus == 0); + if (defined($data)) { + my ($updates, $save, $state, $childFinish, $update) = @$data; + + lcovutil::update_state(@$update); + #my $childCpuTime = $lcovutil::profileData{filt_child}{$chunkId}; + #$totalFilterCpuTime += $childCpuTime; + #$intervalFilterCpuTime += $childCpuTime; + + my $now = Time::HiRes::gettimeofday(); + $lcovutil::profileData{filt_undump}{$chunkId} = $now - $start; + + foreach my $patType (@{$store->[0]}) { + my $svType = shift(@{$save->[0]}); + foreach my $p (@$patType) { + $p->[-1] += shift(@$svType); + } + } + for (my $i = scalar(@{$store->[1]}) - 1; $i >= 0; --$i) { + $store->[1]->[$i]->[-2] += $save->[1]->[$i]->[0]; + $store->[1]->[$i]->[-1] += $save->[1]->[$i]->[1]; + } + foreach my $d (@$updates) { + $self->_updateModifiedFile(@$d, $state); + } + + my $final = Time::HiRes::gettimeofday(); + $lcovutil::profileData{filt_merge}{$chunkId} = $final - $now; + $lcovutil::profileData{filt_queue}{$chunkId} = $start - $childFinish; + + #$intervalMonitor->checkUpdate($processedFiles); + + } else { + if (!-f $dumped || + POSIX::SIGKILL == $signal) { + + if (exists($childRetryCounts->{$chunkId})) { + $childRetryCounts->{$chunkId} += 1; + } else { + $childRetryCounts->{$chunkId} = 1; + } + lcovutil::report_fork_failure( + "filter segment $chunkId", + (POSIX::SIGKILL == $signal ? + "killed by OS - possibly due to out-of-memory" : + "serialized data $dumped not found"), + $childRetryCounts->{$chunkId}); + push(@$worklist, $chunk); + } else { + lcovutil::report_parallel_error('filter', + $ERROR_PARALLEL, $child, $childstatus, + "unable to filter segment $chunkId: $@", + keys(%$children)); + } + } + foreach my $f ($dumped) { + unlink $f + if -f $f; + } + my $to = Time::HiRes::gettimeofday(); + $lcovutil::profileData{filt_chunk}{$chunkId} = $to - $forkAt; +} + +sub _generate_end_line_message +{ + # don't generate gcov warnings for tools that don't use gcov + return if grep({ /(llvm|perl|py|xml)2lcov/ } $lcovutil::tool_name); + if (lcovutil::warn_once('compiler_version', 1)) { + my $msg = + 'Function begin/end line exclusions not supported with this version of GCC/gcov; require gcc/9 or newer'; + if ((defined($lcovutil::derive_function_end_line) && + $lcovutil::derive_function_end_line != 0) || + (defined($lcovutil::derive_function_end_line_all_files) && + $lcovutil::derive_function_end_line_all_files != 0) + ) { + lcovutil::ignorable_warning($lcovutil::ERROR_UNSUPPORTED, + $msg . + ": attempting to derive function end lines - see lcovrc man entry for 'derive_function_end_line'." + ); + } else { + lcovutil::ignorable_error($lcovutil::ERROR_UNSUPPORTED, + $msg . + ". See lcovrc man entry for 'derive_function_end_line'." + ); + } + } +} + +sub _updateModifiedFile +{ + my ($self, $name, $traceFile, $state) = @_; + $self->[FILES]->{$name} = $traceFile; + + _generate_end_line_message() + if $state->[0]->[1] != 0; +} + +sub _processParallelChunk +{ + # called from child + my $childStart = Time::HiRes::gettimeofday(); + my ($tmp, $chunk, $srcReader, $save, $state, $forkAt, $chunkId) = @_; + # clear profile - want only my contribution + my $currentState = lcovutil::initial_state(); + my $stdout_file = File::Spec->catfile($tmp, "filter_$$.log"); + my $stderr_file = File::Spec->catfile($tmp, "filter_$$.err"); + my $childInfo; + # set count to zero so we know how many got created in + # the child process + my $now = Time::HiRes::gettimeofday(); + my $status = 0; + + # clear current status so we see updates from this child + # pattern counts + foreach my $l (@{$save->[0]}) { + foreach my $p (@$l) { + $p->[-1] = 0; + } + } + # filter counts + foreach my $f (@{$save->[1]}) { + $f->[-1] = 0; + $f->[-2] = 0; + } + # using 'capture' here so that we can both capture/redirect geninfo + # messages from a child process during parallel execution AND + # redirect stdout/stderr from gcov calls. + # It does not work to directly open/reopen the STDOUT and STDERR + # descriptors due to interactions between the child and parent + # processes (see the Capture::Tiny doc for some details) + my $start = Time::HiRes::gettimeofday(); + my @updates; + my ($stdout, $stderr, $code) = Capture::Tiny::capture { + + eval { + foreach my $d (@$chunk) { + # could keep track of individual file time if we wanted to + my ($data, $modified) = _filterFile(@$d, $srcReader, $state); + + lcovutil::info(1, + $d->[1] . ' is ' . + ($modified ? '' : 'NOT ') . "modified\n"); + if ($modified) { + push(@updates, [$d->[1], $data]); + } + } + }; + if ($@) { + print(STDERR $@); + $status = 1; + } + }; + my $end = Time::HiRes::gettimeofday(); + # collect pattern counts + my @pcounts; + foreach my $l (@{$save->[0]}) { + my @c = map({ $_->[-1] } @$l); # grab the counts + push(@pcounts, \@c); + } + $save->[0] = \@pcounts; + # filter counts + foreach my $f (@{$save->[1]}) { + $f = [$f->[-2], $f->[-1]]; + } + + # parent might have already caught an error, cleaned up and + # removed the tempdir and exited. + lcovutil::check_parent_process(); + + # print stdout and stderr ... + foreach my $d ([$stdout_file, $stdout], [$stderr_file, $stderr]) { + next unless ($d->[1]); # only print if there is something to print + my $f = InOutFile->out($d->[0]); + my $h = $f->hdl(); + print($h $d->[1]); + } + my $dumpf = File::Spec->catfile($tmp, "dumper_$$"); + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{filt_proc}{$chunkId} = $then - $forkAt; + $lcovutil::profileData{filt_child}{$chunkId} = $end - $start; + my $data; + eval { + $data = Storable::store([\@updates, $save, $state, $then, + lcovutil::compute_update($currentState) + ], + $dumpf); + }; + if ($@ || !defined($data)) { + lcovutil::ignorable_error($lcovutil::ERROR_PARALLEL, + "Child $$ serialize failed" . ($@ ? ": $@" : '')); + } + return $status; +} + +# chunkID is only used for uniquification and as a key in profile data. +# We want this umber to be unique - even if we process more than one TraceFile +our $masterChunkID = 0; + +sub _processFilterWorklist +{ + my ($self, $srcReader, $fileList) = @_; + + my $chunkSize; + my $parallel = $lcovutil::lcov_filter_parallel; + # not much point in parallel calculation if the number of files is small + my $workList = $fileList; + if (exists($ENV{LCOV_FORCE_PARALLEL}) || + (scalar(@$fileList) > 50 && + $parallel && + 1 < $lcovutil::maxParallelism) + ) { + + $parallel = $lcovutil::maxParallelism; + + if (defined($lcovutil::lcov_filter_chunk_size)) { + if ($lcovutil::lcov_filter_chunk_size =~ /^(\d+)\s*(%?)$/) { + if (defined($2) && $2) { + # a percentage + $chunkSize = int(scalar(@$fileList) * $1 / 100); + } else { + # an absolute value + $chunkSize = $1; + } + } else { + lcovutil::ignorable_warning($lcovutil::ERROR_FORMAT, + "lcov_filter_chunk_size '$lcovutil::lcov_filter_chunk_size not recognized - ignoring\n" + ); + } + } + + if (!defined($chunkSize)) { + $chunkSize = + $maxParallelism ? + (int(0.8 * scalar(@$fileList) / $lcovutil::maxParallelism)) : + 1; + if ($chunkSize > 100) { + $chunkSize = 100; + } elsif ($chunkSize < 2) { + $chunkSize = 1; + } + } + if ($chunkSize != 1 || + exists($ENV{LCOV_FORCE_PARALLEL})) { + $workList = []; + my $idx = 0; + my $current = []; + # maybe sort files by number of lines, then distribute larger ones + # across chunks? Or sort so total number of lines is balanced + foreach my $f (@$fileList) { + push(@$current, $f); + if (++$idx == $chunkSize) { + $idx = 0; + push(@$workList, $current); + $current = []; + } + } + push(@$workList, $current) if (@$current); + lcovutil::info("Filter: chunkSize $chunkSize nChunks " . + scalar(@$workList) . "\n"); + } + } + + my @state = (['saw_unsupported_end_line', 0],); + # keep track of patterns application counts before we fork children + my @pats = grep { @$_ } + (\@lcovutil::exclude_function_patterns, \@lcovutil::omit_line_patterns); + # and also filter application counts + my @filters = grep { defined($_) } @lcovutil::cov_filter; + my @save = (\@pats, \@filters); + + my $processedChunks = 0; + my $currentParallel = 0; + my %children; + my $tmp = File::Temp->newdir( + "filter_datXXXX", + DIR => $lcovutil::tmp_dir, + CLEANUP => !defined($lcovutil::preserve_intermediates) + ) + if (exists($ENV{LCOV_FORCE_PARALLEL}) || + $parallel > 1); + + my $failedAttempts = 0; + my %childRetryCounts; + do { + CHUNK: while (@$workList) { + my $d = pop(@$workList); + ++$processedChunks; + # save current counts... + $state[0]->[1] = 0; + if (ref($d->[0]) eq 'TraceInfo') { + # serial processing... + my ($data, $modified) = _filterFile(@$d, $srcReader, \@state); + $self->_updateModifiedFile($d->[1], $data, \@state) + if $modified; + } else { + + my $currentSize = 0; + if (0 != $lcovutil::maxMemory) { + $currentSize = lcovutil::current_process_size(); + } + while ($currentParallel >= $lcovutil::maxParallelism || + ($currentParallel > 1 && + (($currentParallel + 1) * $currentSize) > + $lcovutil::maxMemory) + ) { + lcovutil::info(1, + "memory constraint ($currentParallel + 1) * $currentSize > $lcovutil::maxMemory violated: waiting. " + . (scalar(@$workList) - $processedChunks + 1) + . " remaining\n") + if ((($currentParallel + 1) * $currentSize) > + $lcovutil::maxMemory); + my $child = wait(); + my $childstatus = $?; + unless (exists($children{$child})) { + lcovutil::report_unknown_child($child); + next; + } + eval { + $self->_mergeParallelChunk($tmp, $child, \%children, + $childstatus, \@save, $workList, + \%childRetryCounts); + }; + if ($@) { + $childstatus = 1 << 8 unless $childstatus; + lcovutil::report_parallel_error('filter', + $lcovutil::ERROR_CHILD, $child, $childstatus, $@); + } + --$currentParallel; + } + + # parallel processing... + $lcovutil::deferWarnings = 1; + my $now = Time::HiRes::gettimeofday(); + my $pid = fork(); + if (!defined($pid)) { + # fork failed + ++$failedAttempts; + lcovutil::report_fork_failure('process filter chunk', + $!, $failedAttempts); + --$processedChunks; + push(@$workList, $d); + next CHUNK; + } + $failedAttempts = 0; + if (0 == $pid) { + # I'm the child + my $status = + _processParallelChunk($tmp, $d, $srcReader, \@save, + \@state, $now, $masterChunkID); + exit($status); # normal return + } else { + # parent + $children{$pid} = [$d, $now, $masterChunkID]; + lcovutil::debug(1, "fork:$pid ID $masterChunkID\n"); + ++$currentParallel; + } + ++$masterChunkID; + } + + } # while (each segment in worklist) + while ($currentParallel != 0) { + my $child = wait(); + my $childstatus = $?; + unless (exists($children{$child})) { + lcovutil::report_unknown_child($child); + next; + } + --$currentParallel; + eval { + $self->_mergeParallelChunk($tmp, $child, \%children, + $childstatus, \@save, $workList, \%childRetryCounts); + }; + if ($@) { + $childstatus = 1 << 8 unless $childstatus; + lcovutil::report_parallel_error('filter', + $lcovutil::ERROR_CHILD, $child, $childstatus, $@); + } + + } + } while (@$workList); # outer do/while - to catch spaceouts + lcovutil::info("Finished filter file processing\n"); +} + +sub applyFilters +{ + my $self = shift; + my $srcReader = shift; + + $srcReader = ReadCurrentSource->new() + unless defined($srcReader); + + my $mask = DID_FILTER; + $mask |= DID_DERIVE + if (defined($lcovutil::derive_function_end_line) && + $lcovutil::derive_function_end_line != 0); + return + if ($mask == ($self->[STATE] & $mask)); + + # have to look through each file in each testcase; they may be different + # due to differences in #ifdefs when the corresponding tests were compiled. + my @filter_workList; + + my $computeEndLine = + (0 == ($self->[STATE] & DID_DERIVE) && + defined($lcovutil::derive_function_end_line) && + $lcovutil::derive_function_end_line != 0 && + defined($lcovutil::func_coverage)); + + foreach my $name ($self->files()) { + + my $traceInfo = $self->data($name); + die("expected TraceInfo, got '" . ref($traceInfo) . "'") + unless ('TraceInfo' eq ref($traceInfo)); + my $source_file = $traceInfo->filename(); + if (TraceFile::skipCurrentFile($source_file)) { + $self->remove($source_file); + next; + } + if (lcovutil::is_external($source_file)) { + lcovutil::info("excluding 'external' file '$source_file'\n"); + $self->remove($source_file); + next; + } + # derive function end line for C/C++ and java code if requested + # (not trying to handle python nested functions, etc.) + # However, see indent handling in the py2lcov script. Arguably, that + # could/should be done here/in Perl rather than in Python.) + # Jacoco pretends to report function end line - but it appears + # to be the last line executed - not the actual last line of + # the function - so broken/completely useless. + my $actions = 0; + if ($computeEndLine && + ($lcovutil::derive_function_end_line_all_files || + is_language('c|java|perl', $source_file)) + ) { + # try to derive end lines if at least one is unknown. + # can't compute for lambdas because we can't distinguish + # the last line reliably. + $actions = DID_DERIVE + if grep({ !($_->isLambda() || defined($_->end_line())) } + $traceInfo->func()->valuelist()); + } + + if ((defined($lcovutil::func_coverage) && + (0 != scalar(@lcovutil::exclude_function_patterns) || + defined($lcovutil::cov_filter[$FILTER_TRIVIAL_FUNCTION]))) || + (is_language('c|perl|python|java', $source_file) && + lcovutil::is_filter_enabled()) + ) { + # we are forking anyway - so also compute end lines there + $actions |= DID_FILTER; + push(@filter_workList, [$traceInfo, $name, $actions]); + } else { + if (0 != $actions) { + # all we are doing is deriving function end lines - which doesn't + # take long enough to be worth forking + TraceFile::_deriveFunctionEndLines($traceInfo); + } + TraceFile::_checkConsistency($traceInfo); + } + + } # foreach file + $self->[STATE] |= DID_DERIVE; + + if (@filter_workList) { + lcovutil::info("Apply filtering..\n"); + $self->_processFilterWorklist($srcReader, \@filter_workList); + # keep track - so we don't do this again + $self->[STATE] |= DID_FILTER; + } +} + +sub is_language +{ + my ($lang, $filename) = @_; + my $idx = index($filename, '.'); + my $ext = $idx == -1 ? '' : substr($filename, $idx); + foreach my $l (split('\|', $lang)) { + die("unknown language '$l'") + unless exists($lcovutil::languageExtensions{$l}); + my $extensions = $lcovutil::languageExtensions{$l}; + return 1 if ($ext =~ /\.($extensions)$/); + } + return 0; +} + +# Read in the contents of the .info file specified by INFO_FILENAME. Data will +# be returned as a reference to a hash containing the following mappings: +# +# %result: for each filename found in file -> \%data +# +# %data: "test" -> \%testdata +# "sum" -> \%sumcount +# "func" -> \%funcdata +# "found" -> $lines_found (number of instrumented lines found in file) +# "hit" -> $lines_hit (number of executed lines in file) +# "function_found" -> $fn_found (number of instrumented functions found in file) +# "function_hit" -> $fn_hit (number of executed functions in file) +# "branch_found" -> $br_found (number of instrumented branches found in file) +# "branch_hit" -> $br_hit (number of executed branches in file) +# "check" -> \%checkdata +# "testfnc" -> \%testfncdata +# "testbr" -> \%testbrdata +# "sumbr" -> \%sumbrcount +# +# %testdata : name of test affecting this file -> \%testcount +# %testfncdata: name of test affecting this file -> \%testfnccount +# %testbrdata: name of test affecting this file -> \%testbrcount +# +# %testcount : line number -> execution count for a single test +# %testfnccount: function name -> execution count for a single test +# %testbrcount : line number -> branch coverage data for a single test +# %sumcount : line number -> execution count for all tests +# %sumbrcount : line number -> branch coverage data for all tests +# %funcdata : FunctionMap: function name -> FunctionEntry +# %checkdata : line number -> checksum of source code line +# $brdata : BranchData vector of items: block, branch, taken +# +# Note that .info file sections referring to the same file and test name +# will automatically be combined by adding all execution counts. +# +# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file +# is compressed using GZIP. If available, GUNZIP will be used to decompress +# this file. +# +# Die on error. +# +sub _read_info +{ + my ($self, $tracefile, $readSourceCallback, $verify_checksum) = @_; + $verify_checksum = 0 unless defined($verify_checksum); + + if (!defined($readSourceCallback)) { + $readSourceCallback = ReadCurrentSource->new(); + } + + # per file data + my %perfile; + my $sumcount; # line total counts in this file + my $funcdata; # function total counts in this file + my $sumbrcount; # branch total counts + my $mcdcCount; # MD/DC total counts + + my $checkdata; # line checksums + my %perTestData; + my %summaryData; + # hash of per-testcase coverage data per testcase, in this file + my $testdata; # hash of testname -> line coverage + my $testfncdata; # hash of testname -> function coverage + my $testbrdata; # hash of testname -> branch data + my $testMcdc; # -> MC/DC data + + my $testcount; # line coverage for particular testcase + my $testfnccount; # func coverage " " + my $testbrcount; # branch coverage " " + my $testcase_mcdc; # MC/DC coverage " " + + my $testname; # Current test name + my $filename; # Current filename + my $current_mcdc; + my $changed_testname; # If set, warn about changed testname + + lcovutil::info(1, "Reading data file $tracefile\n"); + + # Check if file exists and is readable + stat($tracefile); + if (!(-r _)) { + die("cannot read file $tracefile!\n"); + } + + # Check if this is really a plain file + if (!(-f _)) { + die("not a plain file: $tracefile!\n"); + } + + # Check for .gz extension + my $inFile = InOutFile->in($tracefile, $lcovutil::demangle_cpp_cmd); + my $infoHdl = $inFile->hdl(); + + $testname = ""; + my $fileData; + # HGC: somewhat of a hack. + # There are duplicate lines in the geninfo output result - for example, + # line '2095' may have multiple DA (line) entries, and may have multiple + # 'BRDA' entries - each with a different number of branches and different + # count + # The hack is to put branches into a hash keyed by branch ID - and + # merge elements with the same key if we run into them in the multiple + # times in the same 'file' data (within an SF entry). + my %nextBranchId; # line -> integer ID + my ($currentBranchLine, $skipBranch); + my $functionMap; + my %excludedFunction; + my $skipCurrentFile = 0; + my %fnIdxMap; + while (<$infoHdl>) { + chomp($_); + my $line = $_; + $line =~ s/\s+$//; # whitespace + + next if $line =~ /^#/; # skip comment + + if ($line =~ /^[SK]F:(.*)/) { + # Filename information found + if ($1 =~ /^\s*$/) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$tracefile\":$.: unexpected empty file name in record '$line'" + ); + $skipCurrentFile = 1; + next; + } + #if ($self->contains($filename)) { + # # we expect there to be only one entry for each source file in each section + # lcovutil::ignorable_warning($lcovutil::ERROR_FORMAT, + # "Duplicate entries for \"$filename\"" + # . ($testname ? " in testcase '$testname'" : '') . '.'); + #} + $filename = ReadCurrentSource::resolve_path($1, 1); + # should this one be skipped? + $skipCurrentFile = skipCurrentFile($filename); + if ($skipCurrentFile) { + if (!exists($lcovutil::excluded_files{$filename})) { + $lcovutil::excluded_files{$filename} = 1; + lcovutil::info("Excluding $filename\n"); + } + next; + } + + # Retrieve data for new entry + %nextBranchId = (); + %excludedFunction = (); + %fnIdxMap = (); + + if ($verify_checksum) { + # unconditionally 'close' the current file - in case we don't + # open a new one. If that happened, then we would be looking + # at the source for some previous file. + $readSourceCallback->close(); + undef $currentBranchLine; + if (is_language('c', $filename)) { + $readSourceCallback->open($filename); + } + } + $fileData = $self->data($filename); + # record line number where file entry found - can use it in error messages + $fileData->location($tracefile, $.); + ($testdata, $sumcount, $funcdata, + $checkdata, $testfncdata, $testbrdata, + $sumbrcount, $mcdcCount, $testMcdc) = $fileData->get_info(); + + if (defined($testname)) { + $testcount = $fileData->test($testname); + $functionMap = $fileData->testfnc($testname); + $testbrcount = $fileData->testbr($testname); + $testcase_mcdc = $fileData->testcase_mcdc($testname); + } else { + $testcount = CountData->new($filename, 1); + $testfnccount = CountData->new($filename, 0); + $testbrcount = BranchData->new(); + $testcase_mcdc = MCDC_Data->new(); + $functionMap = FunctionMap->new($filename); + } + next; + } + next if $skipCurrentFile; + + # Switch statement + # Please note: if you add or change something here (lcov info file format) - + # then please make corresponding changes to the 'write_info' method, below + # and update the format description found in .../man/geninfo.1. + foreach ($line) { + next if $line =~ /^#/; # skip comment + + /^VER:(.+)$/ && do { + # revision control version string found + # we might try to set the version multiple times if the + # file appears multiple times in the .info file + if (defined($fileData->version()) && + $fileData->version() eq $1) { + # this is OK - + # we might try to set the version multiple times if the + # file appears multiple times in the .info file. + # This can happen, with some translators + last; + } + $fileData->version($1); + last; + }; + + /^TN:([^,]*)(,diff)?/ && do { + # Test name information found + $testname = defined($1) ? $1 : ""; + my $orig = $testname; + if ($testname =~ s/\W/_/g) { + $changed_testname = $orig; + } + $testname .= $2 if (defined($2)); + if (defined($ignore_testcase_name) && + $ignore_testcase_name) { + lcovutil::debug(1, + "using default testcase rather than $testname at $tracefile:$.\n" + ); + + $testname = ''; + } + last; + }; + + /^DA:(\d+),([^,]+)(,([^,\s]+))?/ && do { + my ($line, $count, $checksum) = ($1, $2, $4); + if ($line <= 0) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$tracefile\":$.: unexpected line number '$line' in .info file record '$_'" + ); + # just keep invalid number - if error ignored + # last; + } + if ($readSourceCallback->notEmpty()) { + # does the source checksum match the recorded checksum? + if ($verify_checksum) { + if (defined($checksum)) { + my $content = $readSourceCallback->getLine($line); + my $chk = + defined($content) ? + Digest::MD5::md5_base64($content) : + 0; + if ($chk ne $checksum) { + lcovutil::ignorable_error( + $lcovutil::ERROR_VERSION, + "checksum mismatch at between source $filename:$line and $tracefile: $checksum -> $chk" + ); + } + } else { + # no checksum there + lcovutil::ignorable_error($lcovutil::ERROR_VERSION, + "no checksum for $filename:$line in $tracefile" + ); + } + } + } + + # hold line, count and testname for postprocessing? + my $linesum = $fileData->sum(); + + # Execution count found, add to structure + # Add summary counts + $linesum->append($line, $count); + + # Add test-specific counts + if (defined($testname)) { + $fileData->test($testname)->append($line, $count, 1); + } + + # Store line checksum if available + if (defined($checksum) && + $lcovutil::verify_checksum) { + # Does it match a previous definition + if ($fileData->check()->mapped($line) && + ($fileData->check()->value($line) ne $checksum)) { + lcovutil::ignorable_error($lcovutil::ERROR_VERSION, + "checksum mismatch at $filename:$line in $tracefile" + ); + } + $fileData->check()->replace($line, $checksum); + } + last; + }; + + /^FN:(\d+),((\d+),)?(.+)$/ && do { + last if (!$lcovutil::func_coverage); + # Function data found, add to structure + my $lineNo = $1; + my $fnName = $4; + my $end_line = $3; + if (!grep({ $fnName =~ $_ } + @lcovutil::suppress_function_patterns) && + ($lineNo <= 0 || + (defined($end_line) && $end_line <= 0)) + ) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$tracefile\":$.: unexpected function line '$lineNo' in .info file record '$_'" + ) if $lineNo <= 0; + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$tracefile\":$.: unexpected function end line '$end_line' in .info file record '$_'" + ) if defined($end_line) && $end_line <= 0; + } + # the function may already be defined by another testcase + # (for the same file) + $functionMap->define_function($fnName, $lineNo, + $end_line ? $end_line : undef, + , "\"$tracefile\":$."); + last; + }; + + # Hit count may be float if Perl decided to convert it + /^FNDA:([^,]+),(.+)$/ && do { + last if (!$lcovutil::func_coverage); + my $fnName = $2; + my $hit = $1; + # error checking is in the addAlias method + $functionMap->add_count($fnName, $hit); + last; + }; + + # new format... + /^FNL:(\d+),(\d+)(,(\d+))?$/ && do { + last if (!$lcovutil::func_coverage); + my $fnIndex = $1; + my $lineNo = $2; + my $end_line = $4; + die("unexpected duplicate index $fnIndex") + if exists($fnIdxMap{$fnIndex}); + $fnIdxMap{$fnIndex} = [$lineNo, $end_line]; + last; + }; + + /^FNA:(\d+),([^,]+),(.+)$/ && do { + last if (!$lcovutil::func_coverage); + my $fnIndex = $1; + my $hit = $2; + my $alias = $3; + die("unknown index $fnIndex") + unless exists($fnIdxMap{$fnIndex}); + my ($lineNo, $end_line) = @{$fnIdxMap{$fnIndex}}; + my $fn = + $functionMap->define_function($alias, $lineNo, $end_line, + "\"$tracefile\":$."); + $fn->addAlias($alias, $hit); + last; + }; + + /^BRDA:(\d+),(e?)(\d+),(.+)$/ && do { + last if (!$lcovutil::br_coverage); + + # Branch coverage data found + # line data is "lineNo,blockId,(branchIdx|branchExpr),taken + # - so grab the last two elements, split on the last comma, + # and check whether we found an integer or an expression + my ($line, $is_exception, $block, $d) = + ($1, defined($2) && 'e' eq $2, $3, $4); + + if ($line <= 0) { + # Python coverage.py emits line number 0 (zero) for branches + # - which is bogus, as there is no line number zero, + # and the corresponding branch expression is not there in + # any case. + # Meantime: this confuses the lcov DB - so we simply skip + # such data. + # Note that we only need to check while reading .info files. + # - if we wrote one from geninfo, then we will not have + # produced bogus data - so no need to check. + # - only some (broken) external tool could have the issue + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$tracefile\":$.: unexpected line number '$line' in .info file record '$_'" + ); + # just keep invalid line number if error ignored + # last; + } + + last if $is_exception && $lcovutil::exclude_exception_branch; + my $comma = rindex($d, ','); + my $taken = substr($d, $comma + 1); + my $expr = substr($d, 0, $comma); + + # Notes: + # - there may be other branches on the same line (..the next + # contiguous BRDA entry). + # There should always be at least 2. + # - $block is generally '0' - but is used to distinguish cases + # where different branch constructs appear on the same line - + # e.g., due to template instantiation or funky macro usage - + # see .../tests/lcov/branch + # - $taken can be a number or '-' + # '-' means that the first clause of the branch short-circuited - + # so this branch was not evaluated at all. + # In any branch pair, either all should have a 'taken' of '-' + # or at least one should have a non-zero taken count and + # the others should be zero. + # - in order to support Verilog expressions, we treat the + # 'branchId' as an arbitrary string (e.g., ModelSim will + # generate an CNF or truth-table like entry corresponding + # to the branch. + + my $key = "$line,$block"; + my $branch = + exists($nextBranchId{$key}) ? $nextBranchId{$key} : + 0; + $nextBranchId{$key} = $branch + 1; + + my $br = + BranchBlock->new($branch, $taken, $expr, $is_exception); + $fileData->sumbr()->append($line, $block, $br, $filename); + + # Add test-specific counts + if (defined($testname)) { + $fileData->testbr($testname) + ->append($line, $block, $br, $filename); + } + last; + }; + + /^MCDC:(\d+),(\d+),([tf]),(\d+),(\d+),(.+)$/ && do { + # line number, groupSize, sense, count, index, expression + # 'sense' is t/f: was this expression sensitized + last unless $lcovutil::mcdc_coverage; + + my ($line, $groupSize, $sense, $count, $idx, $expr) = + ($1, $2, $3, $4, $5, $6); + if ($line <= 0) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$tracefile\":$.: unexpected line number '$line' in condition data record '$_'." + ); + # keep invalid line number + #last; + } + + if (!defined($current_mcdc) || + $current_mcdc->line() != $line) { + if ($current_mcdc) { + $fileData->mcdc()->close_mcdcBlock($current_mcdc); + + $fileData->testcase_mcdc($testname) + ->append_mcdc(Storable::dclone($current_mcdc)) + if (defined($testname)); + } + $current_mcdc = + $fileData->mcdc()->new_mcdc($fileData, $line); + } + $current_mcdc->insertExpr($filename, $groupSize, $sense eq 't', + $count, $idx, $expr); + last; + }; + + /^end_of_record/ && do { + # Found end of section marker + if ($filename) { + if (!defined($fileData->version()) && + $lcovutil::compute_file_version && + @lcovutil::extractVersionScript) { + my $version = lcovutil::extractFileVersion($filename); + $fileData->version($version) + if (defined($version) && $version ne ""); + } + if ($lcovutil::func_coverage) { + + if ($funcdata != $functionMap) { + $funcdata->union($functionMap); + } + } + if ($current_mcdc) { + # close the current expression in case the next file + # has an expression on the same line + $fileData->mcdc()->close_mcdcBlock($current_mcdc); + $fileData->testcase_mcdc($testname) + ->append_mcdc(Storable::dclone($current_mcdc)) + if (defined($testname)); + $current_mcdc = undef; + } + + # some paranoic checks + $self->data($filename)->check_data(); + last; + } + }; + /^(FN|BR|L|MC)[HF]/ && do { + last; # ignore count records + }; + /^\s*$/ && do { + last; # ignore empty line + }; + + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$tracefile\":$.: unexpected .info file record '$_'"); + # default + last; + } + } + + # Calculate lines_found and lines_hit for each file + foreach $filename ($self->files()) { + #$data = $result{$filename}; + + ($testdata, $sumcount, undef, undef, $testfncdata, $testbrdata, + $sumbrcount) = $self->data($filename)->get_info(); + + # Filter out empty files + if ($self->data($filename)->sum()->entries() == 0) { + delete($self->[FILES]->{$filename}); + next; + } + my $filedata = $self->data($filename); + # Filter out empty test cases + foreach $testname ($filedata->test()->keylist()) { + if (!$filedata->test()->mapped($testname) || + scalar($filedata->test($testname)->keylist()) == 0) { + $filedata->test()->remove($testname); + $filedata->testfnc()->remove($testname); + $filedata->testbr()->remove($testname); + $filedata->testcase_mcdc()->remove($testname); + } + } + } + + if (scalar($self->files()) == 0) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "no valid records found in tracefile $tracefile"); + } + if (defined($changed_testname)) { + lcovutil::ignorable_warning($lcovutil::ERROR_FORMAT, + "invalid characters removed from testname in " . + "tracefile $tracefile: '$changed_testname'->'$testname'\n" + ); + } +} + +# write data to filename (stdout if '-') +# returns nothing +sub write_info_file($$$) +{ + my ($self, $filename, $do_checksum) = @_; + + if ($self->empty()) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "coverage DB is empty"); + } + my $file = InOutFile->out($filename); + my $hdl = $file->hdl(); + $self->write_info($hdl, $do_checksum); +} + +# +# write data in .info format +# returns array of (lines found, lines hit, functions found, functions hit, +# branches found, branches_hit) + +sub write_info($$$) +{ + my $self = $_[0]; + local *INFO_HANDLE = $_[1]; + my $verify_checksum = defined($_[2]) ? $_[2] : 0; + my $br_found; + my $br_hit; + + my $srcReader = ReadCurrentSource->new() + if ($verify_checksum); + foreach my $comment ($self->comments()) { + print(INFO_HANDLE '#', $comment, "\n"); + } + foreach my $filename (sort($self->files())) { + my $entry = $self->data($filename); + my $source_file = $entry->filename(); + die("expected to have have filtered $source_file out") + if lcovutil::is_external($source_file); + die("expected TraceInfo, got '" . ref($entry) . "'") + unless ('TraceInfo' eq ref($entry)); + + my ($testdata, $sumcount, $funcdata, + $checkdata, $testfncdata, $testbrdata, + $sumbrcount, $sum_mcdc, $testmcdc) = $entry->get_info(); + # munge the source file name, if requested + $source_file = ReadCurrentSource::resolve_path($source_file, 1); + + # Please note: if you add or change something here (lcov info file format) - + # then please make corresponding changes to the '_read_info' method, above + # and update the format description found in .../man/geninfo.1. + foreach my $testname (sort($testdata->keylist())) { + my $testcount = $testdata->value($testname); + my $testfnccount = $testfncdata->value($testname); + my $testbrcount = $testbrdata->value($testname); + my $mcdc = $testmcdc->value($testname); + + print(INFO_HANDLE "TN:$testname\n"); + print(INFO_HANDLE "SF:$source_file\n"); + print(INFO_HANDLE "VER:" . $entry->version() . "\n") + if defined($entry->version()); + if (defined($srcReader)) { + lcovutil::info(1, "reading $source_file for lcov checksum\n"); + $srcReader->open($source_file); + } + + my $functionMap = $testfncdata->{$testname}; + if ($lcovutil::func_coverage && + $functionMap) { + # Write function related data - sort by line number then + # by name (compiler-generated functions may have same line) + # sort enables diff of output data files, for testing + my @functionOrder = + sort({ $functionMap->findKey($a)->line() + cmp $functionMap->findKey($b)->line() or + $a cmp $b } $functionMap->keylist()); + + my $fnIndex = -1; + my $f_found = 0; + my $f_hit = 0; + foreach my $key (@functionOrder) { + my $data = $functionMap->findKey($key); + my $aliases = $data->aliases(); + my $line = $data->line(); + + if ($line <= 0) { + my $alias = (sort keys %$aliases)[0]; + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$source_file\": unexpected line number '$line' for function $alias" + ); + # if message is ignored, leave bogus entry in the data + } + ++$fnIndex; + my $endLine = + defined($data->end_line()) ? + ',' . $data->end_line() : + ''; + # print function leader + print(INFO_HANDLE "FNL:$fnIndex,$line$endLine\n"); + ++$f_found; + my $counted = 0; + foreach my $alias (sort keys %$aliases) { + my $hit = $aliases->{$alias}; + ++$f_hit if $hit > 0 && !$counted; + $counted ||= $hit > 0; + # print the alias + print(INFO_HANDLE "FNA:$fnIndex,$hit,$alias\n"); + } + } + print(INFO_HANDLE "FNF:$f_found\n"); + print(INFO_HANDLE "FNH:$f_hit\n"); + } + # $testbrcount is undef if there are no branches in the scope + if ($lcovutil::br_coverage && + defined($testbrcount)) { + # Write branch related data + my $br_found = 0; + my $br_hit = 0; + + foreach my $line (sort({ $a <=> $b } $testbrcount->keylist())) { + + if ($line <= 0) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$source_file\": unexpected line number '$line' in branch data record." + ); + # keep bogus data if error ignored + # last; + } + my $brdata = $testbrcount->value($line); + # want the block_id to be treated as 32-bit unsigned integer + # (need masking to match regression tests) + my $mask = (1 << 32) - 1; + foreach my $block_id (sort(($brdata->blocks()))) { + my $blockData = $brdata->getBlock($block_id); + $block_id &= $mask; + foreach my $br (@$blockData) { + my $taken = $br->data(); + my $branch_id = $br->id(); + my $branch_expr = $br->expr(); + # mostly for Verilog: if there is a branch expression: use it. + printf(INFO_HANDLE "BRDA:%u,%s%u,%s,%s\n", + $line, + $br->is_exception() ? 'e' : '', + $block_id, + defined($branch_expr) ? $branch_expr : + $branch_id, + $taken); + $br_found++; + $br_hit++ + if ($taken ne '-' && $taken > 0); + } + } + } + if ($br_found > 0) { + print(INFO_HANDLE "BRF:$br_found\n"); + print(INFO_HANDLE "BRH:$br_hit\n"); + } + } + if ($mcdc && + $lcovutil::mcdc_coverage) { + + my $mcdc_found = 0; + my $mcdc_hit = 0; + foreach my $line (sort({ $a <=> $b } $mcdc->keylist())) { + if ($line <= 0) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$source_file\": unexpected line number '$line' in MC/DC data record." + ); + } + my $m = $mcdc->value($line); + my $groups = $m->groups(); + foreach my $groupSize (sort keys %$groups) { + my $exprs = $groups->{$groupSize}; + my $index = -1; + foreach my $e (@$exprs) { + $mcdc_found += 2; + ++$index; + foreach my $sense ('t', 'f') { + my $count = $e->count($sense eq 't'); + ++$mcdc_hit if 0 != $count; + print(INFO_HANDLE + "MCDC:$line,$groupSize,$sense,$count,$index," + . $e->expression(), + "\n"); + } + } + } + } + if ($mcdc_found != 0) { + print(INFO_HANDLE "MCF:$mcdc_found\n"); + print(INFO_HANDLE "MCH:$mcdc_hit\n"); + } + } + # Write line related data + my $found = 0; + my $hit = 0; + foreach my $line (sort({ $a <=> $b } $testcount->keylist())) { + if ($line <= 0) { + lcovutil::ignorable_error($lcovutil::ERROR_FORMAT, + "\"$source_file\": unexpected line number '$line' in 'line' data record." + ); + } + my $l_hit = $testcount->value($line); + my $chk = ''; + if ($verify_checksum) { + if (exists($checkdata->{$line})) { + $chk = $checkdata->{$line}; + } elsif (defined($srcReader) && + $srcReader->notEmpty()) { + my $content = $srcReader->getLine($line); + $chk = + defined($content) ? + Digest::MD5::md5_base64($content) : + 0; + } + $chk = ',' . $chk if ($chk); + } + print(INFO_HANDLE "DA:$line,$l_hit$chk\n"); + $found++; + $hit++ + if ($l_hit > 0); + } + print(INFO_HANDLE "LF:$found\n"); + print(INFO_HANDLE "LH:$hit\n"); + print(INFO_HANDLE "end_of_record\n"); + } + } +} + +package AggregateTraces; +# parse sna merge TraceFiles - possibly in parallel +# - common utility, used by lcov 'add_trace' and genhtml multi-file read + +# If set, create map of unique function to list of testcase/info +# files which hit that function at least once +our $function_mapping; +# need a static external segment index lest the exe aggregate multiple groups of data +our $segmentIdx = 0; + +sub find_from_glob +{ + my @merge; + die("no files specified") unless (@_); + foreach my $pattern (@_) { + + if (-f $pattern) { + # this is a glob match... + push(@merge, $pattern); + next; + } + $pattern =~ s/([^\\]) /$1\\ /g # explicitly escape spaces + unless $^O =~ /Win/; + + my @files = glob($pattern); # perl returns files in ASCII sorted order + + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "no files matching pattern $pattern") + unless scalar(@files); + for (my $i = 0; $i <= $#files; ++$i) { + my $f = $files[$i]; + if (-d $f) { + my $cmd = + "find '$f' -name '$lcovutil::info_file_pattern' -type f"; + my ($stdout, $stderr, $code) = Capture::Tiny::capture { + system($cmd); + }; + # can fail due to unreadable entry - but might still + # have returned data to process + lcovutil::ignorable_error($lcovutil::ERROR_UTILITY, + "error in \"$cmd\": $stderr") + if $code; + my @found = split(' ', $stdout); + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "no files matching '$lcovutil::info_file_pattern' found in $f" + ) unless (@found); + push(@files, @found); + next; + } + + unless (-r $f || -f $f) { + lcovutil::ignorable_error($lcovutil::ERROR_MISSING, + "'$f' found from pattern '$pattern' is not a readable file" + ); + next; + } + push(@merge, $f); + } + } + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "no matching file found in '['" . join(', ', @_) . "]'") + unless (@merge); + + return @merge; +} + +sub _process_segment($$$) +{ + my ($total_trace, $readSourceFile, $segment) = @_; + + my @interesting; + my $total = scalar(@$segment); + foreach my $tracefile (@$segment) { + my $now = Time::HiRes::gettimeofday(); + --$total; + lcovutil::info("Merging $tracefile..$total remaining" + . + ($lcovutil::debug ? + (' mem:' . lcovutil::current_process_size()) : + '') . + "\n" + ) if (1 != scalar(@$segment)); # ...in segment $segId + my $context = MessageContext->new("merging $tracefile"); + if (!-f $tracefile || + -z $tracefile) { + lcovutil::ignorable_error($lcovutil::ERROR_EMPTY, + "trace file '$tracefile' " + . + (-z $tracefile ? 'is empty' : + 'does not exist')); + next; + } + my $current; + eval { + $current = TraceFile->load($tracefile, $readSourceFile, + $lcovutil::verify_checksum, 1); + lcovutil::debug("after load $tracefile: memory: " . + lcovutil::current_process_size() . "\n") + if $lcovutil::debug; # predicate to avoid function call... + }; + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{parse}{$tracefile} = $then - $now; + if ($@) { + lcovutil::ignorable_error($lcovutil::ERROR_CORRUPT, + "unable to read trace file '$tracefile': $@"); + next; + } + if ($function_mapping) { + foreach my $srcFileName ($current->files()) { + my $traceInfo = $current->data($srcFileName); + my $funcData = $traceInfo->func(); + foreach my $funcKey ($funcData->keylist()) { + my $funcEntry = $funcData->findKey($funcKey); + if (0 != $funcEntry->hit()) { + # function is hit in this file + my $key = $funcEntry->file() . ":$funcKey"; + $function_mapping->{$key} = [$funcEntry->name(), []] + unless exists($function_mapping->{$key}); + die("mismatched function name for " . + $funcEntry->name() . + " at $funcKey in $tracefile") + unless $funcEntry->name() eq + $function_mapping->{$key}->[0]; + push(@{$function_mapping->{$key}->[1]}, $tracefile); + } + } + } + } else { + if ($total_trace->merge_tracefile($current, TraceInfo::UNION)) { + push(@interesting, $tracefile); + } + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{append}{$tracefile} = $end - $then; + } + return @interesting; +} + +sub merge +{ + my $readSourceFile; + my $t = ref($_[0]); + if (!defined($_[0]) || '' eq $t) { + # backward compatibility - arg is undefined or is a filename + $readSourceFile = ReadCurrentSource->new(); + shift unless defined($_[0]); + } else { + $readSourceFile = shift; + die("unexpected arg $t") + unless grep(/^$t$/, ('ReadCurrentSource', 'ReadBaselineSource')); + } + my $nTests = scalar(@_); + if (1 < $nTests) { + lcovutil::info("Combining tracefiles.\n"); + } else { + lcovutil::info("Reading tracefile $_[0].\n"); + } + + $lcovutil::profileData{parse} = {} + unless exists($lcovutil::profileData{parse}); + $lcovutil::profileData{append} = {} + unless exists($lcovutil::profileData{append}); + + my @effective; + # source-based filters are somewhat expensive - so we turn them + # off for file read and only re-enable when we write the data back out + my $save_filters = lcovutil::disable_cov_filters(); + + my $total_trace = TraceFile->new(); + if (!(defined($lcovutil::maxParallelism) && defined($lcovutil::maxMemory) + )) { + lcovutil::init_parallel_params(); + } + if (0 != $lcovutil::maxMemory && + 1 != $lcovutil::maxParallelism) { + # estimate the number of processes we think we can run.. + my $currentSize = lcovutil::current_process_size(); + # guess that the data size is no smaller than one of the files we will be reading + # which one is largest? + my $fileSize = 0; + foreach my $n (@_) { + my $s = (stat($n))[7]; + $fileSize = $s if $s > $fileSize; + } + my $size = $currentSize + $fileSize; + my $num = int($lcovutil::maxMemory / $size); + lcovutil::debug( + "Sizes: self:$currentSize file:$fileSize total:$size num:$num paralled:$lcovutil::maxParallelism\n" + ); + if ($num < $lcovutil::maxParallelism) { + $num = $num > 1 ? $num : 1; + lcovutil::info( + "Throttling to '--parallel $num' due to memory constraint\n"); + $lcovutil::maxParallelism = $num; + } + } + # use a particular file sort order - to somewhat minimize order effects + my $filelist = \@_; + my @sorted_filelist; + if ($lcovutil::sort_inputs) { + @sorted_filelist = sort({ $a cmp $b } @_); + $filelist = \@sorted_filelist; + } + + if (1 != $lcovutil::maxParallelism && + (exists($ENV{LCOV_FORCE_PARALLEL}) || + 1 < $nTests) + ) { + # parallel implementation is to segment the file list into N + # segments, then parse-and-merge scalar(@merge)/N files in each slave, + # then merge the slave result. + # The reasoning is that one of our examples appears to take 1.3s to + # load the trace file, and 0.8s to merge it into the master list. + # We thus want to parallelize both the load and the merge, as much as + # possible. + # Note that we try to keep the files in the order they were specified + # in the segments (i.e., so adjacent files go in order, into the same + # segment). This plays more nicely with the "--prune-tests" option + # because we expect that files with similar names (e.g., as returned + # by 'glob' have similar coverage profiles and are thus not likely to + # all be 'effective'. If we had put them into different segments, + # then each segment might think that their variant is 'effective' - + # whereas we will notice that only one is effective if they are all + # in the same segment. + + my @segments; + my $testsPerSegment = + ($nTests > $lcovutil::maxParallelism) ? + int(($nTests + $lcovutil::maxParallelism - 1) / + $lcovutil::maxParallelism) : + 1; + my $idx = 0; + foreach my $tracefile (@$filelist) { + my $seg = $idx / $testsPerSegment; + $seg -= 1 if $seg == $lcovutil::maxParallelism; + push(@segments, []) + if ($seg >= scalar(@segments)); + push(@{$segments[$seg]}, $tracefile); + ++$idx; + } + lcovutil::info("Using " . + scalar(@segments) . + ' segment' . (scalar(@segments) > 1 ? 's' : '') . + " of $testsPerSegment test" . + ($testsPerSegment > 1 ? 's' : '') . "\n"); + $lcovutil::profileData{config} = {} + unless exists($lcovutil::profileData{config}); + $lcovutil::profileData{config}{segments} = scalar(@segments); + + # kind of a hack...write to the named directory that the user gave + # us rather than to a funny generated name + my $tempDir = defined($lcovutil::tempdirname) ? $lcovutil::tempdirname : + lcovutil::create_temp_dir(); + my %children; + my @pending; + my $patterns; + my $failedAttempts = 0; + my %childRetryCounts; + do { + while (my $segment = pop(@segments)) { + $lcovutil::deferWarnings = 1; + my $now = Time::HiRes::gettimeofday(); + my $pid = fork(); + if (!defined($pid)) { + ++$failedAttempts; + lcovutil::report_fork_failure('process segment', + $!, $failedAttempts); + push(@segments, $segment); + next; + } + $failedAttempts = 0; + + if (0 == $pid) { + # I'm the child + my $stdout_file = + File::Spec->catfile($tempDir, "lcov_$$.log"); + my $stderr_file = + File::Spec->catfile($tempDir, "lcov_$$.err"); + + my $currentState = lcovutil::initial_state(); + my $status = 0; + my @interesting; + my ($stdout, $stderr, $code) = Capture::Tiny::capture { + eval { + @interesting = + _process_segment($total_trace, + $readSourceFile, $segment); + }; + if ($@) { + print(STDERR $@); + $status = 1; + } + + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{$segmentIdx}{total} = + $then - $now; + }; + # print stdout and stderr ... + foreach + my $d ([$stdout_file, $stdout], [$stderr_file, $stderr]) + { + next + unless ($d->[1]) + ; # only print if there is something to print + my $f = InOutFile->out($d->[0]); + my $h = $f->hdl(); + print($h $d->[1]); + } + my $file = File::Spec->catfile($tempDir, "dumper_$$"); + my $data; + eval { + $data = + Storable::store( + [$total_trace, + \@interesting, + $function_mapping, + lcovutil::compute_update($currentState) + ], + $file); + }; + if ($@ || !defined($data)) { + lcovutil::ignorable_error($lcovutil::ERROR_PARALLEL, + "Child $$ serialize failed" . ($@ ? ": $@" : '')); + } + exit($status); + } else { + $children{$pid} = [$now, $segmentIdx, $segment]; + push(@pending, $segment); + } + $segmentIdx++; + } + # now wait for all the children to finish... + foreach (@pending) { + my $child = wait(); + my $now = Time::HiRes::gettimeofday(); + my $childstatus = $? >> 8; + unless (exists($children{$child})) { + lcovutil::report_unknown_child($child); + next; + } + my ($start, $idx, $segment) = @{$children{$child}}; + lcovutil::info( + 1, + "Merging segment $idx, status $childstatus" + . + ( + $lcovutil::debug ? + (' mem:' . lcovutil::current_process_size()) : + '') . + "\n"); + my $dumpfile = File::Spec->catfile($tempDir, "dumper_$child"); + my $childLog = File::Spec->catfile($tempDir, "lcov_$child.log"); + my $childErr = File::Spec->catfile($tempDir, "lcov_$child.err"); + + foreach my $f ($childLog, $childErr) { + if (!-f $f) { + $f = ''; # there was no output + next; + } + if (open(RESTORE, "<", $f)) { + # slurp into a string and eval.. + my $str = + do { local $/; }; # slurp whole thing + close(RESTORE) or die("unable to close $f: $!\n"); + unlink $f + unless ($str && $lcovutil::preserve_intermediates); + $f = $str; + } else { + $f = "unable to open $f: $!"; + if (0 == $childstatus) { + lcovutil::report_parallel_error('aggregate', + $ERROR_PARALLEL, $child, 0, $f, + keys(%children)); + } + } + } + my $signal = $childstatus & 0xFF; + + print(STDOUT $childLog) + if ((0 != $childstatus && + $signal != POSIX::SIGKILL && + $lcovutil::max_fork_fails != 0) || + $lcovutil::verbose); + print(STDERR $childErr); + + # undump the data + my $data = Storable::retrieve($dumpfile) + if (-f $dumpfile && 0 == $childstatus); + if (defined($data)) { + eval { + my ($current, $changed, $func_map, $update) = @$data; + my $then = Time::HiRes::gettimeofday(); + $lcovutil::profileData{$idx}{undump} = $then - $now; + lcovutil::update_state(@$update); + if ($function_mapping) { + if (!defined($func_map)) { + lcovutil::report_parallel_error( + 'aggregate', + $ERROR_PARALLEL, + $child, + 0, + "segment $idx returned empty function data", + keys(%children)); + next; + } + while (my ($key, $data) = each(%$func_map)) { + $function_mapping->{$key} = [$data->[0], []] + unless exists($function_mapping->{$key}); + die("mismatched function name '" . + $data->[0] . "' at $key") + unless ($data->[0] eq + $function_mapping->{$key}->[0]); + push(@{$function_mapping->{$key}->[1]}, + @{$data->[1]}); + } + } else { + if (!defined($current)) { + lcovutil::report_parallel_error( + 'aggregate', + $ERROR_PARALLEL, + $child, + 0, + "segment $idx returned empty trace data", + keys(%children)); + next; + } + if ($total_trace->merge_tracefile( + $current, TraceInfo::UNION + )) { + # something in this segment improved coverage...so save + # the effective input files from this one + push(@effective, @$changed); + } + } + }; # end eval + if ($@) { + $childstatus = 1 << 8 unless $childstatus; + lcovutil::report_parallel_error( + 'aggregate', + $ERROR_PARALLEL, + $child, + $childstatus, + "unable to deserialize segment $idx $dumpfile:$@", + keys(%children)); + } + } + if (!defined($data) || 0 != $childstatus) { + if (!-f $dumpfile || + POSIX::SIGKILL == $signal) { + + if (exists($childRetryCounts{$idx})) { + $childRetryCounts{$idx} += 1; + } else { + $childRetryCounts{$idx} = 1; + } + lcovutil::report_fork_failure( + "aggregate segment $idx", + (POSIX::SIGKILL == $signal ? + "killed by OS - possibly due to out-of-memory" + : + "serialized data $dumpfile not found"), + $childRetryCounts{$idx}); + push(@segments, $segment); + } else { + + lcovutil::report_parallel_error('aggregate', + $ERROR_CHILD, $child, $childstatus, + "while processing segment $idx", + keys(%children)); + } + } + my $end = Time::HiRes::gettimeofday(); + $lcovutil::profileData{$idx}{merge} = $end - $start; + unlink $dumpfile + if -f $dumpfile; + } + } while (@segments); + } else { + # sequential + @effective = _process_segment($total_trace, $readSourceFile, $filelist); + } + if (defined($lcovutil::tempdirname) && + !$lcovutil::preserve_intermediates) { + # won't remove if directory not empty...probably what I want, for debugging + rmdir($lcovutil::tempdirname); + } + #...and turn any enabled filters back on... + lcovutil::reenable_cov_filters($save_filters); + # filters had been disabled - need to explicitly exclude function bodies + $total_trace->applyFilters($readSourceFile); + + return ($total_trace, \@effective); +} + +# call the common initialization functions + +lcovutil::define_errors(); +lcovutil::init_filters(); + +1; diff --git a/man/gendesc.1 b/man/gendesc.1 index ed8b9cec..984b5b25 100644 --- a/man/gendesc.1 +++ b/man/gendesc.1 @@ -1,4 +1,7 @@ -.TH gendesc 1 "LCOV 1.15" 2019\-02\-28 "User Manuals" +\" Define project URL +.ds lcovurl https://github.com/linux\-test\-project/lcov + +.TH gendesc 1 "LCOV 2.0" 2023\-05\-12 "User Manuals" .SH NAME gendesc \- Generate a test case description file .SH SYNOPSIS @@ -76,3 +79,7 @@ Peter Oberparleiter .BR geninfo (1), .BR genpng (1), .BR gcov (1) +.br + +.I \*[lcovurl] +.br diff --git a/man/genhtml.1 b/man/genhtml.1 index d5a3f9ad..42a8cff6 100644 --- a/man/genhtml.1 +++ b/man/genhtml.1 @@ -1,22 +1,39 @@ -.TH genhtml 1 "LCOV 1.15" 2019\-03\-04 "User Manuals" +\" Define path to scripts +.ds scriptdir scripts + +\" Define project URL +.ds lcovurl https://github.com/linux\-test\-project/lcov + +.TH genhtml 1 "LCOV 2.0" 2023\-05\-17 "User Manuals" .SH NAME genhtml \- Generate HTML view from LCOV coverage data files .SH SYNOPSIS .B genhtml .RB [ \-h | \-\-help ] -.RB [ \-v | \-\-version ] +.RB [ \-\-version ] .RS 8 .br .RB [ \-q | \-\-quiet ] +.RB [ \-v | \-\-verbose ] +.br +.RB [ \-\-debug ] [ \-\-validate ] +.br .RB [ \-s | \-\-show\-details ] +.br .RB [ \-f | \-\-frames ] .br -.RB [ \-b | \-\-baseline\-file ] -.IR baseline\-file +.RB [ \-b | \-\-baseline\-file +.IR baseline\-file\-pattern ] .br .RB [ \-o | \-\-output\-directory .IR output\-directory ] .br +.RB [ \-\-header-title +.IR banner ] +.br +.RB [ \-\-footer +.IR string ] +.br .RB [ \-t | \-\-title .IR title ] .br @@ -31,7 +48,15 @@ genhtml \- Generate HTML view from LCOV coverage data files .IR prefix ] .RB [ \-\-no\-prefix ] .br +.RB [ \-\-build\-directory +.IR directory ] +.br +.RB [ \-\-source\-directory +.IR dirname ] +.br .RB [ \-\-no\-source ] +.RB [ \-\-no\-html ] +.br .RB [ \-\-num\-spaces .IR num ] .RB [ \-\-highlight ] @@ -46,7 +71,7 @@ genhtml \- Generate HTML view from LCOV coverage data files .IR extension ] .br .RB [ \-\-html\-gzip ] -.RB [ \-\-sort ] +.RB [ \-\-sort\-tables ] .RB [ \-\-no\-sort ] .br .RB [ \-\-function\-coverage ] @@ -55,12 +80,27 @@ genhtml \- Generate HTML view from LCOV coverage data files .RB [ \-\-branch\-coverage ] .RB [ \-\-no\-branch\-coverage ] .br -.RB [ \-\-demangle\-cpp ] +.RB [ \-\-mcdc\-coverage ] +.br +.RB [ \-\-demangle\-cpp +.IR [ param ] ] +.br +.RB [ \-\-msg\-log +.IR [ log_file_name ] ] +.br .RB [ \-\-ignore\-errors .IR errors ] .br +.RB [\-\-expect\-message\-count +.IR message_type=expr[,message_type=expr..]] +.br +.RB [ \-\-keep\-going ] .RB [ \-\-config\-file .IR config\-file ] +.br +.RB [ \-\-profile +.IR [profile\-file ] +.br .RB [ \-\-rc .IR keyword = value ] .br @@ -68,153 +108,2353 @@ genhtml \- Generate HTML view from LCOV coverage data files .IR num ] .RB [ \-\-missed ] .br -.IR tracefile(s) +.RB [ \-\-merge\-aliases ] +.br +.RB [ \-\-suppress\-aliases ] +.br +.RB [ \-\-forget\-test\-names ] +.br +.RB [ \-\-dark\-mode ] +.br +.RB [ \-\-baseline\-title +.IR title ] +.br +.RB [ \-\-baseline\-date +.IR date ] +.br +.RB [ \-\-current\-date +.IR date ] +.br +.RB [ \-\-diff\-file +.IR diff\-file ] +.br +.RB [ \-\-annotate\-script +.IR script ] +.br +.RB [ \-\-context\-script +.IR script ] +.br +.RB [ \-\-criteria\-script +.IR script ] +.br +.RB [ \-\-version\-script +.IR script ] +.br +.RB [ \-\-resolve\-script +.IR script ] +.br +.RB [ \-\-select\-script +.IR script ] +.br +.RB [ \-\-simplify\-script +.IR script ] +.br +.RB [ \-\-checksum ] +.br +.RB [ \-\-fail\-under\-branches +.IR percentage ] +.br +.RB [ \-\-fail\-under\-lines +.IR percentage ] +.br +.RB [ \-\-new\-file\-as\-baseline ] +.br +.RB [ \-\-elide\-path\-mismatch ] +.br +.RB [ \-\-synthesize\-missing ] +.br +.RB [ \-\-date\-bins +.IR day[,day,...]] +.br +.RB [ \-\-date\-labels +.IR string[,string,...]] +.br +.RB [ \-\-show\-owners +.IR [ all ] ] +.br +.RB [ \-\-show\-noncode ] +.br +.RB [ \-\-show\-zero\-columns ] +.br +.RB [ \-\-show\-navigation ] +.br +.RB [ \-\-show\-proportions ] +.br +.RB [ \-\-simplified-colors ] +.br +.RB [ \-\-hierarchical ] +.RB [ \-\-flat ] +.br +.RB [ \-\-filter +.IR filters ] +.br +.RB [ \-\-include +.IR glob_pattern ] +.br +.RB [ \-\-exclude +.IR glob_pattern ] +.br +.RB [ \-\-erase\-functions +.IR regexp_pattern ] +.br +.RB [ \-\-substitute +.IR regexp_pattern ] +.br +.RB [ \-\-omit\-lines +.IR regexp_pattern ] +.br +.RB [ \-\-parallel | -j +.IR [integer] ] +.br +.RB [ \-\-memory +.IR integer_num_Mb ] +.br +.RB [ \-\-tempdir +.IR dirname ] +.br +.RB [ \-\-preserve ] +.br +.RB [ \-\-save ] +.br +.RB [ \-\-sort\-input ] +.br +.RB [ \-\-serialize +.IR serialize_output_file ] +.br +.IR tracefile_pattern(s) .RE .SH DESCRIPTION -Create an HTML view of coverage data found in -.IR tracefile . -Note that -.I tracefile -may also be a list of filenames. -HTML output files are created in the current working directory unless the -\-\-output\-directory option is used. If -.I tracefile -ends with ".gz", it is assumed to be GZIP\-compressed and the gunzip tool -will be used to decompress it transparently. +.B genhtml +creates an HTML view of coverage data found in tracefiles +.B geninfo +and +.B lcov +tools which are found from glob-match pattern(s) +.I tracefile_pattern. +See man +.B geninfo(1) for a description of the tracefile format. -Note that all source code files have to be present and readable at the -exact file system location they were compiled. + Features include: -Use option -.I \--css\-file -to modify layout and colors of the generated HTML output. Files are -marked in different colors depending on the associated coverage rate. By -default, the coverage limits for low, medium and high coverage are set to -0\-75%, 75\-90% and 90\-100% percent respectively. To change these -values, use configuration file options -.IR genhtml_hi_limit " and " genhtml_med_limit . +.IP \(bu 3 +Differential coverage comparison against baseline coverage data +.PP +.IP \(bu 3 +Annotation of reports with date and owner information ("binning") +.PP -Also note that when displaying percentages, 0% and 100% are only printed when -the values are exactly 0% and 100% respectively. Other values which would -conventionally be rounded to 0% or 100% are instead printed as nearest -non-boundary value. This behavior is in accordance with that of the -.BR gcov (1) -tool. +The basic concepts of differential coverage and date/owner binning are described in the paper found at +.I https://arxiv.org/abs/2008.07947 -.SH OPTIONS -.B \-h +.SS Differential coverage + +Differential coverage compares two versions of source code - the baseline and the current versions - and the coverage results for each to segment the code into categories. .br -.B \-\-help + +To create a differential coverage report, +.B genhtml +requires + +.IP 1. 3 +one or more +.I baseline\-files +specified via +.BR \-\-baseline\-file , +and +.PP +.IP 2. 3 +a patch file in unified format specified using +.BR \-\-diff\-file . +.PP +.br + +Both +.I tracefile_pattern +and +.I baseline\-file +are treated as glob patterns which match one or more files. +.br + +The difference in coverage between the set of +.I tracefiles +and +.I baseline\-files +is classified line-by-line into categories based on changes in 2 aspects: + +.IP 1. 3 +.BR "Test coverage results" : +a line of code can be tested (1), untested (0), +or unused (#). An unused line is a source code line that has no associated +coverage data, for example due to a disabled #ifdef statement. +.br +.PP + +.IP 2. 3 +.BR "Source code changes" : +a line can be unchanged, added (+\ =>), or removed (=>\ \-). +Note that the diff-file format used by +.B genhtml +reports changes in lines as removal of old line and addition of new line. +.br +.PP + +Below are the resulting 12 categories, sorted by priority (assuming that untested code is more interesting than tested code, and new code is more interesting than old code): +.br + +.RE +.B UNC .RS -Print a short help text, then exit. +Uncovered New Code (+ => 0): newly added code is not tested. +.br +.RE +.B LBC +.RS +Lost Baseline Coverage (1 => 0): unchanged code is no longer tested. +.br .RE -.B \-v + +.B UIC +.RS +Uncovered Included Code (# => 0): previously unused code is untested. .br -.B \-\-version +.RE + +.B UBC .RS -Print version number, then exit. +Uncovered Baseline Code (0 => 0): unchanged code was untested before, is untested now. +.br +.RE +.B GBC +.RS +Gained Baseline Coverage (0 => 1): unchanged code is tested now. +.br .RE -.B \-q + +.B GIC +.RS +Gained Included Coverage (# => 1): previously unused code is tested now. .br -.B \-\-quiet +.RE + +.B GNC .RS -Do not print progress messages. +Gained New Coverage (+ => 1): newly added code is tested. +.br +.RE -Suppresses all informational progress output. When this switch is enabled, -only error or warning messages are printed. +.B CBC +.RS +Covered Baseline Code (1 => 1): unchanged code was tested before and is still tested. +.br +.RE +.B EUB +.RS +Excluded Uncovered Baseline (0 => #): previously untested code is unused now. +.br .RE -.B \-f + +.B ECB +.RS +Excluded Covered Baseline (1 => #): previously tested code is unused now. .br -.B \-\-frames +.RE + +.B DUB .RS -Use HTML frames for source code view. +Deleted Uncovered Baseline (0 => \-): previously untested code has been deleted. +.br -If enabled, a frameset is created for each source code file, providing -an overview of the source code as a "clickable" image. Note that this -option will slow down output creation noticeably because each source -code character has to be inspected once. Note also that the GD.pm Perl -module has to be installed for this option to work (it may be obtained -from http://www.cpan.org). +Note: Because these lines are not represented in the current source version, +they are only represented in the classification summary table. +.RE +.B DCB +.RS +Deleted Covered Baseline (1 => \-): previously tested code has been deleted. +.br + +Note: Because these lines are not represented in the current source version, +they are only represented in the classification summary table. +.br .RE -.B \-s + +The differential coverage report colorizes categorized regions in the source code view using unique colors for each. You can use the +.B \-\-simplified\-colors +option to instead use one color for 'covered' code and another for 'uncovered'. + +.SS Date and owner binning + +.B "Date binning" +annotates coverage reports with age-of-last-change information to distinguish +recently added or modified code which has not been tested from older, presumed +stable code which is also not tested. +.B "Owner binning" +adds annotation identifying the author of changes. .br -.B \-\-show\-details -.RS -Generate detailed directory view. -When this option is enabled, +Both age and ownership reporting can be used to enhance team efforts to maintain +good coverage discipline by spotlighting coverage shortfalls in recently +modified code, even in the absence of baseline coverage data. +.br + +To enable date and owner binning, the +.B \-\-annotate\-script +option must be used to specify a script that provides source code line age and +ownership information. +.br + +For each source line, age is the interval since the most recent modification date +and the owner is the user identity responsible for the most recent change to that line. +.br + +Line coverage overall totals and counts for each of the 12 classification categories are +collected for each of the specified age ranges (see the +.B \-\-date\-bins +option, below). + +.SS Script conventions + +Some .B genhtml -generates two versions of each -file view. One containing the standard information plus a link to a -"detailed" version. The latter additionally contains information about -which test case covered how many lines of each source file. +options expect the name of an external script or tool as argument. These +scripts are then run as part of the associated function. This includes the +following options: +.RS +.B \-\-annotate\-script +.br +.B \-\-context\-script +.br +.B \-\-criteria\-script +.br +.B \-\-resolve\-script +.br +.B \-\-select\-script +.br +.B \-\-simplify\-script +.br +.B \-\-version\-script +.br .RE -.BI "\-b " baseline\-file + +While each script performs a separate function there are some common aspects +in the way these options are handled: + +.IP 1. 3 +If the callback script name ends in +.B \.pm +then the script is assumed to be a Perl module. +.br +A perl module may offer performance advantages over an external script, as it is compiled once and loaded into the interpreter and because it can load and maintain internal state. +.br +The module is expected to export a method 'new', which is +called with the script name and the script parameters (if any) as arguments. It is expected to return an object which implements several standard methods: +.br +.I $callback_obj = packagename\-\>new(perl_module_file, args); +.RS 3 +.IP version\-script 3 +.I $version = $callback_obj->extract_version($source_file_ename); +.br +.I $match = $callback_obj->check_version($old_version, $new_version, $source_file_name); .br -.BI "\-\-baseline\-file " baseline\-file .RS -Use data in -.I baseline\-file -as coverage baseline. +.IP $match 3 + is expected to be 1 (true) if the version keys refer to the came file and 0 (false) otherwise. +.PP -The tracefile specified by -.I baseline\-file -is read and all counts found in the original -.I tracefile -are decremented by the corresponding counts in -.I baseline\-file -before creating any output. +.IP $version 3 + is a string representing a unique identifier of the particular version of the file +.PP +.RE -Note that when a count for a particular line in -.I baseline\-file -is greater than the count in the -.IR tracefile , -the result is zero. +See example implementations +.I $LCOV_HOME/share/lcov/support-scripts/gitversion.pm +and +.I $LCOV_HOME/share/lcov/support-scripts/getp4version.pm. +.PP +.IP annotate\-script 3 +.I ($status, $array) = $callback_obj->annotate($source_file_name); +.br +.br +.br +where +.RS +.IP $status 3 + is 0 if the command succeeded and nonzero otherwise. +.I $status +is interpreted in same way as the return code from 'system(..)' +.PP +.IP $array 3 +is a list of line data of the form: +.br +.I [$text, $abbrev, $full_name, $when, $changelist]. +.PP +.br + +and + +.IP $text 3 +is the source text from the corresponding line (without newline termination) +.PP +.IP $abbrev 3 +is the "abbreviated author name" responsible for this line of code. This is the name that will be used in the various HTML tables. For example, for brevity/readability, you may want to strip the domain from developers who are inside your organization. If there is no associated author, then the value should be +.I \"NONE\". +.PP +.IP $full_name 3 +is the "full author name" which is used in annotation tooltips. See the +.I genhtml_annotate_tooltip +entry in man +.B lcovrc(5). +.I $fullname +may be +.I undef +if the full name and abbreviated names are the same. +.PP +.IP $when 3 +is the timestamp associated with the most recent edit of the corresponding +line and may be +.I \"NONE\" +if there is no associated time. +.PP +.IP $changelist 3 +is the commit identifier associated with the most recent change to this line, or +.I \"NONE\" +if there isn't one. +.PP + + +See example implementations +.I $LCOV_HOME/share/lcov/support-scripts/gitblame.pm +and +.I $LCOV_HOME/share/lcov/support-scripts/p4annotate.pm. .RE -.BI "\-o " output\-directory + +.IP context\-script +.I $hash = $callback_obj->context(); .br -.BI "\-\-output\-directory " output\-directory +.br + +where +.I $hash +is a reference to a hash of key/value pairs which are meaningful to you. +This data is stored in the +.I profile +database. See the 'profile' section in man +.B lcovrc(5) +for more information. + +If your callback is not a perl module - for example, is a shellscript - then it should return a string such that the first word on each line is the key and the remainder is the associated data. If a key is repeated, then the corresponding data strings are concatenated, separated by newline. + +If you want to record only system information, then a shell callback is likely sufficient. If you want to record any tool-specific/internal information, then you will need to implement a perl module so that your callback will be able to access the information. +Note that the constructor of your +.I context-script +callback (or of any callback) can perform any additional actions which +are required - for example, to write additional files, to query or set +tool-specific information, +.I etc. +For example, the example implementation, below, has an option to append +comments to the generated .info file. + + +See the example implementation +.I $LCOV_HOME/share/lcov/support-scripts/context.pm. +.RE + + + +.IP criteria\-script +.I ($status, $array) = $callback_obj->check_criteria($obj_name, $type, $json); +.br +.br + +where .RS -Create files in -.I output\-directory. +.IP $obj_name 3 +is the source file or directory name, or \"top\" of the object whose coverage criteria is being checked. +.PP +.IP $type 3 +is the object type - either +.I \"file\", \"directory\", or \"top\". +.PP +.IP $json 3 +is the coverage data associated with this object, in JSON format - see below. +.PP +.IP $status 3 +is the return status of the operation, interpreted the same way as the +.I annotate +callback status, described above. +.PP +.IP $array 3 +is a reference to a possibly empty list of strings which will be reported by genhtml. The strings are are expected to explain why the coverage criteria failed. +.PP + +See example implementations +.I $LCOV_HOME/share/lcov/support-scripts/criteria.pm. +.RE + +.IP resolve\-script +$newpath = $callback_obj->resolve($source_file_name) +.br +.br + +where +.I $newpath +is the correct path to the indicated source file or +.I undef +if the source file is not found by the callback. +.PP + +.IP simplify\-script +$new_func_name = $callback_obj->simplify($orig_func_name) +.br +.br + +where +.I $new_func_name +is the function name which will appear in the function detail table and +.I $orig_func_name +is the (possibly demangled) function name found in the coverage DB. + +.br +Note that the modified name is only used in the "function detail" table +and does not modify information in the coveage DB. + +.PP + +.RE + +.IP 2. 3 +The option may be specified as a single +.I split_char +separated string which is divied into words (see +.B man lcovrc(5) +), or as a list of arguments. +The resulting command line is passed +to a shell interpreter to be executed. +The command line includes the script path followed by optional additional parameters +separated by spaces. Care must be taken to provide proper quoting if script +path or any parameter contains spaces or shell special characters. +.PP + +.IP 3. 3 +If an option is specified multiple times, then the parameters are +.I not +split, but are simply concatenated to form the command line - see the examples, below. +.br +For simplicity and ease of understanding: your command line should +pass all arguments individually, or all as a comma-separated list - not a mix of the two. +.PP -Use this option to tell +.IP 4. 3 .B genhtml -to write the resulting files to a directory other than -the current one. If -.I output\-directory -does not exist, it will be created. +passes any additional parameters specified via option arguments +between the script path and the parameters required by the script's function. +.br +.PP -It is advisable to use this option since depending on the -project size, a lot of files and subdirectories may be created. +Example: +.br + +.RS +genhtml --annotate-script /bin/script.sh +.br + --annotate-script arg0 ... +.br + +results in the same callback as + +.br +genhtml --annotate-script "/bin/script.sh arg0" ... +.br + +or + +.br +genhtml --annotate-script /bin/script.sh,arg0 ... +.br + +Note that the first form is preferred. + +.RE + +The resulting +.B genhtml +callback executes the command line: +.br + +.RS +/bin/script.sh arg0 +.I source_file_name +.RE +.br + +Similarly +.br +.RS +genhtml --annotate-script +.I /bin/myMoodule.pm +.br + --annotate-script arg0 --annotate-script arg1 ... +.br + +or +.br +genhtml --annotate-script +.I /bin/myMoodule.pm,arg0,arg1 +.br + +.br +.RE + +result in +.B genhtml +executing +.br + +.RS +$annotateCallback = myModule->new(arg0, arg1); +.RE + +to initialize the class object - +.I arg0 +and +.I arg1 +passed as strings - and then to execute + +.RS +($status, $arrayRef) = $annotateCallback( +.I source_file_name +); +.RE + +to retrieve the annotation information. + +In contrast, the command +.br +.RS +genhtml --annotate-script +.I /bin/myMoodule.pm +.br + --annotate-script arg0,arg1 ... +.RE +would result in +.B genhtml +initializing the callback object via +.br + +.RS +$annotateCallback = myModule->new("arg0,arg1"); +.RE +where "arg0,arg1" is passed as single comma-separated string. + +Similarly, the command +.br +.RS +genhtml --annotate-script +.I /bin/myMoodule.pm,arg0 +.br + --annotate-script arg1 ... +.RE +would very likely result in an error when genhtml tries to find +a script called "/bin/mymodule.pm,arg0". + + +Note that multiple instances of each script may execute simultaneously if the +.B \-\-parallel +option was specified. Therefore each script must either be reentrant or should arrange for its own synchronization, if necessary. +.br +In particular, if your callback is implemented via a perl module: +.IP \- 3 +the class object associated with the module will initialized once (in the parent process) +.PP +.IP \- 3 +The callback will occur in the child process (possibly simultaneously with other child processes). +.PP +As a result: if your callback needs to pass data back to the parent, you will need to arrange a communication mechanism to do so. +.br + +.SS Additional considerations + +If the +.B \-\-criteria\-script +option is used, genhtml will use the referenced script to determine whether your coverage criteria have been met - and will return a non\-zero status and print a message if the criteria are not met. +.br + +The +.B \-\-version\-script +option is used to verify that the same/compatible source code versions are displayed as were used to capture coverage data, as well as to verify that the same source code was used to capture coverage information which is going to be merged and to verify that the source version used for filtering operations is compatible with the version used to generate the data. + +HTML output files are created in the current working directory unless the +.B \-\-output\-directory +option is used. If +.I tracefile +or +.I baseline\-file +ends with ".gz", it is assumed to be GZIP\-compressed and the gunzip tool +will be used to decompress it transparently. + +Note that all source code files have to be present and readable at the +exact file system location they were compiled, and all path references in the input data ".info" and "diff" files must match exactly (i.e., exact string match). +.br + +Further, the +.BR \-\-version\-script ", " \-\-annotate\-script ", and " \-\-criteria\-script " scripts " +use the same path strings. However, see the +.B \-\-substitute +and +.B \-\-resolve\-script +options for a mechanism to adjust extracted paths so they match your source and/or revision control layout. + +.br +You can use the +.BR check_exisitence_before_callback +configuration option to tell the tool to check that the file exists before +calling the +.BR \-\-version\-script +or +.BR \-\-annotate\-script +callback. See man +.B lcovrc(5) +for details. +.br + +.SS Additional options + +Use option +.B \-\-diff\-file +to supply a unified diff file that represents the changes to the source +code files between the version used to compile and capture the baseline +trace files, and the version used to compile and capture the current +trace files. + +Use option +.B \-\-css\-file +to modify layout and colors of the generated HTML output. Files are +marked in different colors depending on the associated coverage rate. +.br + +By default, the coverage limits for low, medium and high coverage are set to +0\-75%, 75\-90% and 90\-100% percent respectively. To change these +values, use configuration file options. +.br + +.RS +.IR genhtml_hi_limit " and " genhtml_med_limit +.RE +.br + +or type-specific limits: +.br + +.RS +.IR genhtml_line_hi_limit " and " genhtml_line_med_limit +.br +.IR genhtml_branch_hi_limit " and " genhtml_branch_med_limit +.br +.IR genhtml_function_hi_limit " and " genhtml_function_med_limit +.br +.RE + +See man +.B lcovrc(5) +for details. + +Also note that when displaying percentages, 0% and 100% are only printed when +the values are exactly 0% and 100% respectively. Other values which would +conventionally be rounded to 0% or 100% are instead printed as nearest +non\-boundary value. This behavior is in accordance with that of the +.BR gcov (1) +tool. + +By default, +.B genhtml +reports will include both line and function coverage data. +Neither branch or MC/DC data is displayed by default; you can use the +.B \-\-branch\-coverage +and +.B \-\-mcdc\-coverage +options to enable branch or MC/DC coverage, respectively - or you can permanently enable branch coverage by adding the appropriate +settings to your personal, group, or site lcov configuration file. See the +.B branch_coverage +and +.B mcdc_coverage +sections of man +.B lcovrc(5) +for details. + + +.SH OPTIONS + +In general, (almost) all +.B genhtml +options can also be specified in your personal, group, project, or site +configuration file - see man +.B lcovrc(5) +for details. + + +.B \-h +.br +.B \-\-help +.RS +Print a short help text, then exit. + +.RE +.B \-\-version +.RS +Print version number, then exit. + +.RE +.B \-v +.br +.B \-\-verbose +.RS +Increment informational message verbosity. This is mainly used for script and/or flow debugging - e.g., to figure out which data files are found, where. +Also see the \-\-quiet flag. + +.RE +.B \-q +.br +.B \-\-quiet +.RS +Decrement informational message verbosity. + +Decreased verbosity will suppress 'progress' messages for example - while error and warning messages will continue to be printed. + +.RE +.B \-\-debug +.RS +Increment 'debug messages' verbosity. This is useful primarily to developers who want to enhance the lcov tool suite. + +.RE +.B \-\-validate +.RS +Check the generated HTML to verify that there are no dead hyperlinks and no unused files in the output directory. +The checks can also be enabled by setting environment variable +.B LCOV_VALIDATE = 1. +This option is primarily intended for use by developers who modify the HTML report. + +.RE +.B \-\-flat +.br +.B \-\-hierarchical +.RS +Use the specified HTML report hierarchy layout. +.br + +The default HTML report is 3 levels: + +.RS 2 +.IP 1. 3 +.B top\-level: +table of all directories, +.PP + +.IP 2. 3 +.B directory: +table of source files in a directory, and +.PP + +.IP 3. 3 +.B source file detail: +annotated source code. +.PP +.RE + +Option +.B \-\-hierarchical +produces a multilevel report which follows the directory structure of the +source code (similar to the file tool in Microsoft Windows). + +Option +.B \-\-flat +produces a two-level HTML report: + +.RS 2 +.IP 1. 3 +.B top\-level: +table of all project source files, and +.PP + +.IP 2. 3 +.B source file detail: +annotated source code. +.PP +.RE + +The 'flat' view can reduce the number of clicks required to navigate around +the coverage report - but is unwieldy except for rather small projects consisting of only a few source files. It can be useful in 'code review' mode, even for very large projects (see the +.I \-\-select\-script +option). +.br +Most large projects follow a rational directory structure - which favors the 'hierarchical' report format. Teams responsible for a particular module can focus on a specific subdirectory or set of subdirectories. + +Only one of options +.B \-\-flat +or +.B \-\-hierarchical +can be specified at the same time. + +These options can also be persistently set via the lcovrc configuration file +using either: +.br + +.RS +.I genhtml_hierarchical += 1 +.br +.RE + +or +.br + +.RS +.I genhtml_flat_view += 1 +.br +.RE + +See man +.B lcovrc(5) +for details. + +.RE +.B \-f +.br +.B \-\-frames +.RS +Use HTML frames for source code view. + +If enabled, a frameset is created for each source code file, providing +an overview of the source code as a "clickable" image. Note that this +option will slow down output creation noticeably because each source +code character has to be inspected once. Note also that the GD.pm Perl +module has to be installed for this option to work (it may be obtained +from http://www.cpan.org). + +This option can also be controlled from the +.I genhtml_frames +entry of the +.B lcovrc +file. + +Please note that there is a bug in firefox and in chrome, such that +enabling frames will disable hyperlinks from the 'directory' level summary +table entry to the first line in the corresponding file in the particular +category - e.g., to the first 'MIS' line (vanilla coverage report - see the +.i \-\-show\-navigation +option, below), to +the first 'UNC' branch (differential coverage report), etc. +Hyperlinks from the summary table at the top of the 'source detail' page are not affected. + +.RE +.B \-s +.br +.B \-\-show\-details +.RS +Generate detailed directory view. + +When this option is enabled, +.B genhtml +generates two versions of each source file file entry in the corresponding summary table: +.IP +one containing the standard information plus a link to a +"detailed" version, and +.PP +.IP +a second which contains the number of coverpoints in the hit by each +testcase. +.br +Note that missed coverpoints are not shown in the per-testcase table entry data. +.PP + +The corresponding summary table is found on the 'directory' page of the default 3-level genthm report, or on the top-level page of the 'flat' report (see +.I genhtml \-\-flat ... +), or on the parent directory page of the 'hierarchical' report (see +.I genhtml \-\-hierarchical ... +). + +Note that this option may significantly increase memory consumption. + +.RE +.BI "\-b " baseline\-file\-pattern +.br +.BI "\-\-baseline\-file " baseline\-file\-pattern +.RS +Use data in the files found from glob pattern +.I baseline\-file\-pattern +as coverage baseline. + +.B \-\-baseline\-file +may be specified multiple times - for example, if you have multiple trace data files for each of several test suites and you do not want to go through the additional step of merging all of them into a single aggregated data file. + +The coverage data files specified by +.I baseline\-file\-pattern +is read and used as the baseline for classifying the change in coverage represented by the coverage counts in +.IR tracefile\-patterns . +If +.I baseline\-file\-pattern +is a directory, then genhtml will search the directory for all files ending in '.info'. +See the +.I info_file_extension +section in +.B man(5) lcovrc +for how to change this pattern. + +In general, you should specify a diff file in unified diff format via +.B \-\-diff\-file +when you specify a +.IR \-\-baseline\-file\-pattern . +Without a diff file, genhtml will assume that there are no source differences +between 'baseline' and 'current'. +For example: this might be used to find +incremental changes caused by the addition of more testcases, or to compare +coverage results between gcc versions, or between gcc and llvm. + +.RE +.BI "\-\-baseline\-title " title +.RS +Use +.I title +as the descriptive label text for the source of coverage baseline data. + +.RE +.BI "\-\-baseline\-date " date +.RS +Use +.I date +as the collection date in text format for the coverage baseline data. +If this argument is not specified, the default is to use the creation time of the first file matched by +.I baseline\-file\-pattern +as the baseline date. If there are multiple baseline files, then the creation date of the first file is used. + +.RE +.BI "\-\-current\-date " date +.RS +Use +.I date +as the collection date in text format for the coverage baseline data. +If this argument is not specified, the default is to use the creation time of the current +.IR tracefile . + +.RE +.BI "\-\-diff\-file " diff\-file +.RS +Use the +.I diff\-file +as the definition for source file changes between the sample points for +.I baseline\-file\-pattern +and +.IR tracefile(s) . +.br + + +Note: + +.IP - 3 +if filters are applied during the creation of a differential coverage report, +(see the +,I \-\-filter +section, below), then those filters will be applied to the +.I baseline coverage data +(see the +.I \-\-baseline\-file +section, above) as well as to the +.I current coverage data. +It is important that the +.I diff-file +accurately reflect all source code changes so that the baseline coverage data can be correctly filtered. +.PP + +.IP - 3 +Best practice is to use a +.I \-\-version\-script +callback to verify that source versions match before source-based filtering is applied. +.PP + +It is almost always a better idea to filter at capture or aggregate time - not at report generation. + +A suitable +.I \"universal diff\" +input file for the +.I \-\-diff\-file +option +can be generated using either the "p4udiff" or "gitdiff" sample scripts that are provided as part of this package, or by using revision control commands directly. + +The "p4udiff" or "gitdiff" sample scripts are found in: + +.RS +\*[scriptdir]/p4udiff +.br +.RE + +and + +.RS +\*[scriptdir]/gitdiff +.br +.RE + + +These scripts simply post\-process the 'p4' or 'git' output to (optionally) remove files that are not of interest and to explicitly note files which have not changed. + +.B p4udiff +accepts either a changelist ID or the literal string "sandbox"; "sandbox" indicates that there are modified files which have not been checked in. +See " +.I gitdiff \-\- help +" and " +.I p4udiff \-\- help +" for more information. + +It is useful to note unchanged files denoted by lines of the form: +.br + +.RS +diff [optional header strings] +.br +=== file_path +.RE +.br + +in the p4diff/gitdiff output as this knowledge will help to suppress spurious 'path mismatch' warnings. See the +.B \-\-elide\-path\-mismatch +and +.B \-\-build\-directory +entries, below. + +In general, you will specify +.B \-\-baseline\-file +when you specify +.BR \-\-diff\-file . +The +.I baseline_files +are used to compute coverage differences ( +.I e.g. +gains and losses) between the baseline and current, where the +.I diff_file +is used to compute code changes: source text is +identical between 'baseline' and 'current'. +If you specify +.I baseline_files +but no +.I diff_file, +the tool will assume that there are no code changes between baseline and current. +If you specify a +.I diff_file +but no +.I baseline_files, +the tool will assume that there is no baseline coverage data (no baseline code was covered); as result unchanged code ( +.I i.e., +which does not appear in the +.I diff_file +will be categorized as eiher GIC (covered) or UIC (not covered) while new or changed code will be categorized as either GNC or UNC. + +.RE +.BI "\-\-annotate\-script " script +.RS +Use +.I script +to get source code annotation data. + +Use this option to specify an external tool or command line that +.B genhtml +can use to obtain source code annotation data such as age and author of the last +change for each source code line. +.br + +This option also instructs +.B genhtml +to add a summary table to the HTML report header that shows counts in the various coverage categories, associated with each date bin. In addition, each source code line will show age and owner information. +Annotation data is also used to populate a 'tooltip' which appears when the mouse +hovers over the associated source code. See the +.I genhtml_annotate_tooltip +entry in man +.B lcovrc(5) +for details. +.br + +The specified +.I script +is expected to obtain age and ownership information +for each source code line from the revision management system and to output +this information in the format described below. +.br + +If the annotate script fails and annotation errors are ignored via +.BR --ignore-errors , +then +.B genhtml +will try to load the source file normally. If the file is not present or not readable, and the +.B \-\-synthesize\-missing +flag is specified, then +.B genhtml +will synthesize fake data for the file. +.br + +.B genhtml +will emit an error if you have specified an annotation script but no files are +successfully annotated (see below). +This can happen, for example, if your P4USER, P4CLIENT, +or P4PORT environment variables are not set correctly - +.I e.g. if the +Jenkins user who generates coverage reports is not the same and the user +who checked out the code and owns the sandbox. +.br + +Sample annotation scripts for Perforce ("p4annotate") and git ("gitblame") +are provided as part of this package in the following locations: +.br + +.RS +\*[scriptdir]/p4annotate +.br +.RE + +and +.br + +.RS +\*[scriptdir]/gitblame +.br +.RE + +Note that these scripts generate annotations from the file version checked in to the repository - not the locally modified file in the build directory. If you need annotations for locally modified files, you can shelve your changes in P4, or check them in to a local branch in git. + +.B "Creating your own script" +.br + +When creating your own script, please first see +.B "Script considerations" +above for general calling conventions and script requirements. +.br + +.I script +is called by genhtml with the following command line: + +.RS +.B script +.I "[additional_parameters] " source_file_name +.RE +.br + +where + +.RS +.B script +.br +.RS +is the script executable +.br +.RE + +.B additional_parameters +.br +.RS +includes any optional parameters specified (see +.B "Script conventions" +above) +.br +.RE + +.B source_file_name +.br +.RS +is the source code file name +.br +.RE +.RE + +The +.I script +executable should output a line to the standard output stream in the following format for each line in file +.IR source_file_name : +.br + +.RS +.IR commit_id | author_data | date | source_code +.br +.RE + +where + +.RS +.B commit_id +.br +.RS +is an ID identifying the last change to the line or NONE if this file is not +checked in to your revision control system. +.br +.B genhtml +counts the file as not 'successfully annotated' if +.B commit_id +is +.I NONE +and as 'successfully annotated' otherwise. +.br +.RE +.br + +.B author_data +.br +.RS +identifies the author of the last change. +.br +For backward compatibility with existing annotate-script implementations, +two +.I author_data +formats are supported: +.IP - 3 +.I string +: the string used as both the 'abbreviated name' (used as 'owner' name in +HTML output and callbacks) and as 'full name' (used in tooltip callbacks) +.PP +.IP - 3 +.I abbrev_string;full_name +: the +.I author_data +string contains both an 'abbreviated name' and a 'full name' - separated by a semicolon character (';'). +.br +This is useful when generating coverage reports for opensource software +components where there are many 'External' contributors who you do not want +to distinguish in 'owner' summary tables but you still want to know who the +actual author was. (See the +.B gitblame +callback script for an example.) +.PP +.br +.RE +.br + +.B date +.br +.RS +is the data of last change in W3CDTF format (--
T::) +.br +.RE +.br + +.B source_code +.br +.RS +is the line's source code +.br +.RE +.br +.RE + +The script should return 0 (zero) if processing was successful and non\-zero if it encountered an error. +.br + +.RE +.BI "\-\-criteria\-script " script +.RS +Use +.I script +to test for coverage acceptance criteria. +.br + +Use this option to specify an external tool or command line that +.B genhtml +can use to determine if coverage results meet custom acceptance criteria. +Criteria checking results are shown in the standard output log of +.BR genhtml . +If at least one check fails, +.B genhtml +will exit with a non-zero exit code after completing its processing. +.br + +A sample coverage criteria script is provided as part of this package in the +following location: + +.RS +\*[scriptdir]/criteria +.br +.RE + +The sample script checks that top\-level line coverage meets the criteria "UNC + LBC + UIC == 0" (added code and newly activated code must be tested, and existing tested code must not become untested). +.br + +As another example, it is possible to create scripts that mimic the +.B "lcov \-\-fail\-under\-lines" +feature by checking that the ratio of exercised lines to total lines ("(GNC + GIC + CBC) / (GNC + GIC + CBC + UNC + UIC + UBC)") is greater than the threshold - either only at the top level, in every directory, or wherever desired. Similarly, criteria may include branch and function coverage metrics. +.br + +By default the criteria script is called for all source code hierarchy levels, i.e.: top-level, directory, and file-level. The +.I criteria_callback_levels +configuration file option can be used to limit the hierarchy levels to any combination of 'top', 'directory', or 'file' levels. +.br + +Example: +.br + +.RS +genhtml --rc criteria_callback_levels=directory,top ... +.br +.RE + +You can increase the amount of data passed to the criteria script using +configuration file option +.IR criteria_callback_data . +By default, only total counts are included. Specifying "date" adds per +date-bin counts, "owner" adds per owner-bin counts. +.br + +Example: +.br + +.RS +genhtml --rc criteria_callback_data=date,owner ... +.br +.RE + +See man +.B lcovrc(5) +for more details. + +.B "Creating your own script" +.br + +When creating your own script, please first see +.B "Script considerations" +above for general calling conventions and script requirements. +.br + +.I script +is run with the following command line for each source code file, +leaf\-directory, and top-level coverage results: + +.RS +.B script +.I "[additional_parameters] " "name " " type" +.I "coverage_data" +.br +.RE + +where + +.RS +.B script +.br +.RS +is the script executable +.br +.RE + +.B additional_parameters +.br +.RS +includes any optional parameters specified (see +.B "Script conventions" +above) +.br +.RE + +.B name +.br +.RS +is the name of the object for which coverage criteria should be checked, +that is either the source code file name, directory name, or "top" if the +script is called for top-level data +.br +.RE + +.B type +.br +.RS +is the type of source code object for which coverage criteria should be +checked, that is one of "file", "directory", or "top" +.br +.RE + +.B coverage_data +.br +.RS +is either a coverage data hash or a JSON representation of coverage data hash of the corresponding source code +object. +If the callback is a Perl module, then the it is passes a hash object - other wise, it is passed a JSON representation of that data. +.br +.RE +.RE + +The JSON data format is defined as follows: +.br + +{ +.br + "": { +.br + "found": , +.br + "hit": , +.br + "": , +.br + ... +.br + }, +.br + "": { +.br + "" : { +.br + "found": , +.br + "hit": , +.br + "": , +.br + ... +.br + }, +.br + ... +.br + }, +.br + ... +.br +} +.br + +where + +.RS +.B type +.br +.RS +specifies the type of coverage as one of "line", "function", or "branch" +.br +.RE + +.B bin_type +.br +.RS +specifies the type of per-bin coverage as one of "line_age", "function_age", or "branch_age" for date-bin data, and "line_owners" or "branch_owners" for owner-bin data +.br +.RE + +.B bin_id +.br +.RS +specifies the date-bin index for date-bin data, and owner ID for owner-bin data. +.br +.RE + +.B found +.br +.RS +defines the number of found lines, functions, or branches +.br +.RE + +.B hit +.br +.RS +defines the number of hit lines, functions, or branches +.br +.RE + +.B category +.br +.RS +defines the number of lines, functions, or branches that fall in the specified +category (see +.B "Differential coverage" +above) +.br +.RE + +.RE + +Note that data is only reported for non-empty coverage types and bins. +.br + +The script should return 0 (zero) if the criteria are met and non\-zero otherwise. +.br + +If desired, it may print a single line output string which will be appended to the error log if the return status is non\-zero. Additionally, non\-empty lines are appended to the genhtml standard output log. +.br + +.RE + +.B \-\-version\-script +.I script +.br +.RS +Use +.I script +to get source code file version data. + +Use this option to specify an external tool or command line that +.B genhtml +can use to obtain a source code file's version ID when generating HTML or +applying source filters (see +.B \-\-filter +option). +.br + +A version ID can be a file hash or commit ID from revision control. It is used to check the version of the source file which is loaded against the version which was used to generate coverage data (i.e., the file version seen by lcov/geninfo). It is important that source code versions match - otherwise inconsistent or confusing results may be produced. +.br + +Version mismatches typically happen when the tasks of capture, aggregation, and report generation are split between multiple jobs - e.g., when the same source code is used in multiple projects, a unified/global coverage report is required, and the projects accidentally use different revisions. +.br + +If your .info (coverage data) file does not contain version information - for example, because it was generated by a tool which did not support versioning - then you can use the +.I compute_file_version " = 1" +config file option to generate the data afterward. A convenient way to do this might be to use +.B lcov +.I \-\-add\-tracefile +to read the original file, insert version information, and write out the result. +See man +.B lcovrc(5) +for more details. + + +Sample scripts for Perforce ("getp4version"), git ("gitversion") and using an md5 hash ("get_signature") are provided as part of this package in the following locations: +.br + +.RS +.I \*[scriptdir]/getp4version +.RE +.br + +.RS +.I \*[scriptdir]/gitversion +.RE +.br + +and +.br + +.RS +.I \*[scriptdir]/get_signature +.RE +.br + +Note that you must use the same script/same mechanism to determine the file version when you extract, merge, and display coverage data - otherwise, you may see spurious mismatch reports. +.br + +.B "Creating your own script" +.br + +When creating your own script, please first see +.B "Script considerations" +above for general calling conventions and script requirements. +.br + +.I "script " +is used both to generate and to compare the version ID to enable retaining history between calls or to do more complex processing to determine equivalence. +It will be called by +.B genhtml +with either of the following command lines: +.br + +1. Determine source file version ID +.br + +.RS +.BI script " source_file_name" +.RE +.br + +It should write the version ID of +.I " source_file_name " +to stdout and return a 0 exit status. +If the file is not versioned, it should write an empty string and return a 0 exit status. +.br + +2. Compare source file version IDs + +.RS +.B script \-\-compare +.I " source_file_name source_file_id" +.br +.I " info_file_id" +.br + +.RE +where +.RS +.br + +.B "source_file_name" +.RS +is the source code file name +.RE +.br + +.B "source_file_id " +.RS +is the version ID returned by calling "script source_file_name" +.RE +.br + +.B "info_file_id " +.RS +is the version ID found in the corresponding .info file +.RE +.RE +.br + +It should return non\-zero if the IDs do not match. +.br + +.RE + +.B \-\-resolve\-script +.I script +.br +.RS +Use +.I script +to find the file path for some source file which which appears in +an input data file if the file is not found after applying +.I \-\-substitute +patterns and searching the +.I \-\-source\-directory +list. This option is equivalent to the +.B resolve_script +config file option. See man +.B lcovrc(5) +for details. +.RE + +.B \-\-select\-script +.I callback +.br +.RS +Use +.I callback +to decide whether a particular source line is interesting and should be +included in the output data/generated report or not. + +This option is equivalent to the +.B select_script +config file option. See man +.B lcovrc(5) +for details. +.RE + +.B \-\-simplify\-script +.I callback +.br +.RS +Use +.I callback +to shorten/simplify long demangled C++ function and template names to make the function detail table more compact and readable - for example, to +remove nested namespace names. + +Note that the simplifications affect only the display and not the actual names +stored in the coverage DB. In particular, the DB name (not the simplified name) +is the one used to match +.I \-\-erase\-function +patterns. + +This option is equivalent to the +.B simplify_script +config file option. See man +.B lcovrc(5) +for details +.RE + +.BI "\-\-checksum " +.RS +Specify whether to compare stored tracefile checksum to checksum computed from the source code. + +Checksum verification is +.B disabled +by default. + +When checksum verification is enabled, a checksum will be computed for each source +code line and compared to the checksum found in the 'current' tracefile. +This will help to prevent attempts to display source code which is not identical +to the code used to generate the coverage data. + +Note that this option is somewhat subsumed by the +.B \-\-version\-script +option - which does something similar, but at the 'whole file' level. + +.RE +.B \-\-fail\-under\-branches +.I percentage +.br +.RS +Use this option to tell genhtml to exit with a status of 1 if the total +branch coverage is less than +.I percentage. +See +.B man lcov(1) +for more details. + +.RE +.B \-\-fail\-under\-lines +.I percentage +.br +.RS +Use this option to tell genhtml to exit with a status of 1 if the total +line coverage is less than +.I percentage. +See +.B man lcov(1) +for more details. + + +.RE +.B \-\-new\-file\-as\-baseline +.RS +By default, when code is identified on source lines in the 'current' data which were not identified as code in the 'baseline' data, but the source text has not changed, their coverpoints are categorized as "included code": +.I GIC +or +.I UIC. +.br + +However, if the configuration of the coverage job has been recently changed to instrument additional files, then all un\-exercised coverpoints in those files will fall into the +.I GIC +category - which may cause certain coverage criteria checks to fail. +.br + +When this option is specified, genhtml pretends that the baseline data for the file is the same as the current data - so coverpoints are categorized as +.I CBC +or +.I UBC +which do not trigger the coverage criteria check. + +Please note that coverpoints in the file are re\-categorized only if: + +.RS +.IP \(bu 3 +There is no 'baseline' data for any coverpoint in this file, AND +.PP +.IP \(bu 3 +The file pre\-dates the baseline: the oldest line in the file is older than the 'baseline' data file (or the value specified by the +.B \-\-baseline\-date +option). +.PP +.RE + +.RE +.BI "\-\-elide\-path\-mismatch" +.RS +Differential categorization uses file pathnames to match coverage entries from the ".info" file with file difference entries in the unified\-diff\-file. If the entries are not identical, then categorization may be incorrect or strange. + +When paths do not match, genhtml will produce "path" error messages to tell you about the mismatches. + +If mismatches occur, the best solution is to fix the incorrect entries in the .info and/or unified\-diff\-file files. However, fixing these entries is not possible, then you can use this option to attempt to automatically work around them. +.br + +When this option is specified, genhtml will pretend that the unified\-diff\-file entry matches the .info file entries if: + +.RS +.IP \(bu 3 +the same path is found in both the 'baseline' and 'current' .info files, and +.PP +.IP \(bu 3 +the basename of the path in the .info file and the path in the unified\-diff\-file are the same, and +.PP +.IP \(bu 3 +there is only one unmatched unified\-diff\-file entry with that basename. +.PP +.RE + +See the +.B \-\-diff\-file +and +.B \-\-build\-directory +entries for a discussion of how to avoid spurious warnings and/or incorrect matches. + +.RE +.BI "\-\-synthesize\-missing" +.RS +Generate (fake) file content if source file does not exist. +This option can be used to work around otherwise fatal annotation errors. + +When generating annotated file content, +.B genhtml +assumes that the source was written 'now' (so age is zero), the author is +.I no.body +and the commit ID is +.I synthesized. +These names and ages will appear in your HTML reports. + +.br +This option is equivalent config file +.I genhtml_synthesize_missing +parameter; see man +.B lcovrc(5) +for details. + + +.RE +.BI "\-\-date\-bins " day[,day,...] +.RS +The +.B \-\-date\-bins +option is used to specify age boundaries (cutpoints) for date\-binning classification. Each +.I age +element is expected to be an integer number of days prior to today (or prior to your SOURCE_DATE_EPOCH environment variable, if set). If +.I \-\-date\-bins is not specified, the default is to use 4 age ranges: less than 7 days, 7 to 30 days, 30 to 180 days, and more than 180 days. +This option is equivalent to the +.I genhtml_date_bins +config file option. See man +.B lcovrc(5). + +This argument has no effect if there is no +.I source\-annotation\-script . + +.RE +.BI "\-\-date\-labels " string[,string,...] +.RS +The +.B \-\-date\-labels +option is used to specify labels used for the 'date\-bin' table entries in the HTML report. +.br +The number of labels should be one greater than the number of cutpoints. +.br +If not specified, the default is to use label strings which specify the +.I [from ..to) +range of ages held by the corresponding bin. + +One possible use of this option is to use release names in the tables - +.I i.e., +to indicate the release in which each particular line first appeared. + +This option is equivalent to the +.I genhtml_date_labels +config file option. See man +.B lcovrc(5). + +This argument has no effect if there is no +.I source\-annotation\-script . + +.RE +.BI "\-\-show\-owners " [all] +.RS +If the +.B \-\-show\-owners +option is used, each coverage report header report contain a summary table, showing counts in the various coverage categories for everyone who appears in the revision control annotation as the most recent editor of the corresponding line. If the optional argument 'all' is not specified, the table will show only users who are responsible for un\-exercised code lines. If the optional argument is specified, then users responsible for any code lines will appear. In both cases, users who are responsible for non\-code lines (e.g, comments) are not shown. +This option does nothing if +.B \-\-annotate\-script +is not used; it needs revision control information provided by calling the script. + +Please note: if the +.I all +option is not specified, the summary table will contain "Total" rows for all date/owner bins which are not empty - but there will be no secondary "File/Directory" entries for elements which have no "missed" coverpoints. + +.br +This option is equivalent config file +.I genhtml_show_owner_table +parameter; see man +.B lcovrc(5) +for details. + +The lcovrc controls +.I owner_table_entries +and +.I truncate_owner_table +can be used to improve readability by limiting the number of authors who are displayed in the table +when the author number is large. +For example, if your configuration is: +.RS +.PP +.I owner_table_entries = 5 +.IP +.PP +.I truncate_owner_table = top,directory +.PP +.RE +then the owner table displayed at the top- and directory-levels will be truncated while the table shown at the 'file' level will display the full list. + +See man +.B lcovrc(5) +for details. + + +.RE +.BI "\-\-show\-noncode " +.RS +By default, the source code detail view does not show owner or date annotations in the far-left column for non\-code lines (e.g., comments). If the +.B \-\-show\-noncode +option is used, then the source code view will show annotations for both code and non\-code lines. +This argument has no effect if there is no +.I source\-annotation\-script . + +.br +This option is equivalent config file +.I genhtml_show_noncode_owners +parameter; see man +.B lcovrc(5) +for details. + + +.RE +.BI "\-\-show\-zero\-columns " +.RS +By default, columns whose entries are all zero are removed (not shown) in the summary table at the top of each HTML page. +If the +.B \-\-show\-zero\-columns +option is used, then those columns will be shown. + +When columns are retained, then all the tables have the same width/contain the same number of columns - which may be a benefit in some situations. + +When columns are removed, then the tables are more compact and easier to read. +This is especially true in relatively mature development environments, when there are very few un-exercised coverpoints in the project. + +.RE +.BI "\-\-show\-navigation " +.RS +By default, the summary table in the source code detail view does not contain hyperlinks from the number to the first line in the corresponding category ('Hit' or 'Missed') and from the current location to the next location in the current category, in non-differential coverage reports. (This is the lcov 'legacy' view non-differential reports.) + +If the +.B \-\-show\-navigation +option is used, then the source code summary table will be generated with navigation links. +Hyperlinks are always generated for differential coverage reports. + +This feature enables developers to find and understand coverage issues more quickly than they might otherwise, if they had to rely on scrolling. + +See the +.I \-\-frames +description above for a description of a browser bug which disables +these hyperlinks in certain conditions. + +Navigation hyperlinks are always enabled in differential coveage report. + +.RE +.BI "\-\-show\-proportions " +.RS +In the 'function coverage detail' table, also show the percentage of lines and branches within the function which are exercised. + +This feature enables developers to focus attention on functions which have the largest effect on overall code coverage. + +This feature is disabled by default. +Note that this option requires that you use a compiler version which is new enough to support function begin/end line reports or that you configure the tool to derive the required data - see the +.BI derive_function_end_line +discussion in man +.B lcovrc(5). + + +.RE +.BI "\-\-simplified\-colors " +.RS +By default, each differential category is colorized uniquely in the source code detail view. With this option, only two colors are used: one for covered code and another for uncovered code. Note that ECB and EUB code is neither covered nor uncovered - and so may be difficult to distinguish in the source code view, as they will be presented in normal background color. + +.RE +.BI "\-\-exclude " +.I pattern +.RS +pattern is a glob\-match pattern of filenames to exclude from the report. +Files which do NOT match will be included. +See the lcov man page for details. + +.RE +.BI "\-\-include " +.I pattern +.RS +pattern is a glob\-match pattern of filenames to include in processing. +Files which do not match will be excluded from the report. +See the lcov man page for details. +.RE + +.B \-\-erase\-functions +.I regexp +.br +.RS +Exclude coverage data from lines which fall within a function whose name matches the supplied regexp. Note that this is a mangled or demangled name, depending on whether the +.B \-\-demangle\-cpp +option is used or not. + +Note that this option requires that you use a compiler version which is new enough to support function begin/end line reports or that you configure the tool to derive the required data - see the +.BI derive_function_end_line +discussion in man +.B lcovrc(5). +.RE + +.B \-\-substitute +.I regexp_pattern +.br +.RS +Apply Perl regexp +.IR regexp_pattern +to source file names found during processing. This is useful when some file paths in the baseline or current .info file do not match your source layout and so the source code is not found. +See the lcov man page for more details. + +Note that the substitution patterns are applied to the +.IR \-\-diff\-file +entries as well as the baseline and current .info files. +.RE + +.B \-\-omit\-lines +.I regexp_pattern +.br +.RS +Exclude coverage data from lines whose content matches +.IR regexp . + +Use this switch if you want to exclude line and branch coverage data for some particular constructs in your code (e.g., some complicated macro). +See the lcov man page for details. + +.RE +.BI "\-\-parallel " +.I [ integer ] +.br +.BI "\-j " +.I [ integer ] +.RS +Specify parallelism to use during processing (maximum number of forked child processes). If the optional integer parallelism parameter is zero or is missing, then use to use up the number of cores on the machine. Default is to use a single process (no parallelism). +.br +Also see the +.I memory, memory_percentage, max_fork_fails +and +.I fork_fail_timeout +entries in man +.B lcovrc(5). + + +.RE +.BI "\-\-memory " +.I integer +.RS +Specify the maximum amount of memory to use during parallel processing, in Mb. Effectively, the process will not fork() if this limit would be exceeded. Default is 0 (zero) - which means that there is no limit. + +This option may be useful if the compute farm environment imposes strict limits on resource utilization such that the job will be killed if it tries to use too many parallel children - but the user does not know a priori what the permissible maximum is. This option enables the tool to use maximum parallelism - up to the limit imposed by the memory restriction. + +The configuration file +.I memory_percentage +option provided another way to set the maximum memory consumption. +See man +.B lcovrc (5) +for details. + + +.RE +.BI \-\-filter " filters" +.RS +Specify a list of coverpoint filters to apply to input data. + +Note that certain filters apply only to C/C++ source files. +.B genhtml +associates the file extension ('.c', '.vhd', +.I etc. +) with its source language. See the +.I c_file_extentions +and +.I rtl_file_extensions +sections of man +.B lcovrc(5) +for a description of the default associations and how they can be changed. + +Note that filters are applied to both 'branch' and 'MC/DC' coverpoints, +where appropriate: if a particular filter would remove some branch, +then it will also remove corresponding MC/DC coverpoints - for example, +.I "\-\-filter branch" +will remove MC/DC coverpoints if there is no conditional expression on the +corresponding line, and +.I "\-\-filter branch_region" +will remove both branch and MC/DC coverpoints in the marked region. + +Most filters need the source code; filters are not applied if the source file is not available. Similarly, for each source file, if the version recorded in the coverage data (the '.info' file) +does not match the version found on the filesystem, then a +.I version +error is reported. If the +.I version +error is ignored, then filtering is not applied to the mismatched file. +See the +.I \-\-version\-script +for more information. + +.I filters +can be a comma\-separated list of the following keywords: + +.IP branch: 3 +ignore branch counts for C/C++ source code lines which do not appear to contain conditionals. These may be generated automatically by the compiler (e.g., from C++ exception handling) - and are not interesting to users. +This option has no effect unless +.B \-\-branch\-coverage +is used. + +See also man +.B lcovrc(5) +- which describes several variables which affect branch filtering: +.I filter_lookahead +and +.I filter_bitwise_conditional. + +The most common use for branch filtering is to remove compiler-generated branches related to C++ exception handlers. See the no_exception_branch' option in man +.B lcovrc(5) +for a way to remove all identified exception branches. +.PP + +.IP brace: 3 +ignore line coverage counts on the closing brace of C/C++ code block, if the line contains only a closing brace and the preceding line has the same count or if the close brace has a zero count and either the preceding line has a non\-zero count, or the close brace is not the body of a conditional. + +These lines seem to appear and disappear in gcov output - and cause differential coverage to report bogus LBC and/or GIC and/or UIC counts. Bogus LBC or UIC counts are a problem because an automated regression which uses pass criteria "LBC + UIC + UNC == 0" will fail. +.PP + +.IP blank: 3 +ignore lines which contain only whitespace (or whitespace + comments) whose 'hit' count is zero. These appear to be a 'gcov' artifact related to compiler-generated code - such as exception handlers and destructor calls at the end of scope - and can confuse differential coverage criteria. +.br +If lcovrc option +.I filter_blank_aggressive = 1 +is enabled, then blank lines will be ignored whether their 'hit' count is zero or not. Aggressive filtering may be useful in LLVM-generated coverage data, which tends to include large numbers of such lines. +.PP + +.IP directive: 3 +ignore lines which look like C compiler directives: #ifdef, #include, #define, +.I etc. +These lines are sometimes included by +.I llvm\-cov +when LLVM profile data is translated to LCOV format. +.PP + +.IP exception: 3 +Exclude branches related to C++ exception handling from branch coverage. +Whether C++ exception branches are identified and removed is dependent on your compiler/toolchain correctly marking them in the generated coverage data. +See the +.I no_exception_branch +section of +.B man lcovrc(5). + +.IP initializer: 3 +Exclude lines which appear to be part of a C++ std::initializer_list. +.PP + +.IP line: 3 +alias for "\-\-filter brace,blank". +.PP + +.IP mcdc: 3 +Remove MC/DC coverpoint which contains single expression, if 'branch' coverpoint +is present on the same line. +Singe-element MC/DC coverpoints are identical to the corresponding branch - except +in the case of compile-time expression evaluation, for example, in a template +function. + +.IP orphan: 3 +Remove branches which appear by themselves - +.I i.e., +the branch has only one destination and so cannot be a conditional. +.br +These occur most frequently as a result of exception branch filtering. + +.IP range: 3 +Ignore line and branch coverpoints on lines which are out-of range/whose line number is beyond the end of the source file. These appear to be gcov artifacts caused by a macro instantiation on the last line of the file. +.PP + +.IP region: 3 +apply LCOV_EXCL_START/LCOV_EXCL_STOP/LCOV_EXCL_LINE and LCOV_UNREACHABLE_START/LCOV_UNREACHABLE_STOP/LCOV_UNREACHABLE_LINE directives found in source text to the coverpoints found in the current and baseline .info files. +This option may be useful in cases that the source code was not found during 'lcov \-\-capture ...' but is accessible now. +.PP + +.IP branch_region: 3 +apply LCOV_EXCL_BR_START/LCOV_EXCL_BR_STOP/LCOV_EXCL_BR_LINE directives found in source text to the coverpoints found in the current and baseline .info files. +This is similar to the 'region option, above - but applies to branch coverpoints only. +.PP + +.IP function: 3 +combine data for every "unique" function which is defined at the same file/line. +.I geninfo/gcov +seem to have a bug such that they create multiple entries for the same function. +This feature also merges all instances of the same template function/template method. +.PP + +.IP trivial: 3 +remove trivial functions and associated coverpoints. 'Trivial' functions are +whose body is empty/do not contain any statements. Commonly, these include compiler-generated methods (e.g., default constructors and assignment operators) as well as static initialization wrappers, etc. + +Note that the +.I trivial +filter requires function end line information - and so requires that you use a compiler version which is new enough to support begin/end line reports +( +.I e.g., +gcc/9 or newer) or that you enable lcov/genhtml/geninfo to derive the information: + +In man +.B lcovrc(5), +see the +.I derive_function_end_line +setting as well as the +.I trivial_function_threshold +setting. The former is used to turn end line calculation on or off, +and the latter to change the lookahead used to determine whether the +function body is empty. +Also see the +.I lcov_filter_parallel +and +.I lcov_filter_chunk_size +settings, which may improve CPU performance if the number of files to process is very large. +.PP + +.RE +.BI "\-o " output\-directory +.br +.BI "\-\-output\-directory " output\-directory +.RS +Create files in +.I output\-directory. + +Use this option to tell +.B genhtml +to write the resulting files to a directory other than +the current one. If +.I output\-directory +does not exist, it will be created. + +It is advisable to use this option since depending on the +project size, a lot of files and subdirectories may be created. + +.RE +.BI "\-t " title +.br +.BI "\-\-title " title +.RS +Display +.I title +in header table of all pages. + +.I title +is written to the "Test:"-field in the header table at the top of each +generated HTML page to identify the context in which a particular output +was created. By default, this is the name of the 'current; tracefile. + +A common use is to specify a test run name, or a version control system +identifier (perforce changelist or git SHA, for example) that indicates +the code level that was tested. .RE -.BI "\-t " title -.br -.BI "\-\-title " title +.BI "\-\-header\-title " BANNER .RS -Display -.I title +Display +.I BANNER in header of all pages. -.I title -is written to the header portion of each generated HTML page to -identify the context in which a particular output -was created. By default this is the name of the tracefile. +.I BANNER +is written to the header portion of each generated HTML page. +By default, this +simply identifies this as an LCOV (differential) coverage report. + +A common use is to specify the name of the project or project branch and the +Jenkins build ID. + +.RE +.BI "\-\-footer " FOOTER +.RS +Display +.I FOOTER +in footer of all pages. + +.I FOOTER +is written to the footer portion of each generated HTML page. +The default simply identifies the LCOV tool version used to generate the report. .RE .BI "\-d " description\-file .br .BI "\-\-description\-file " description\-file .RS -Read test case descriptions from +Read test case descriptions from .IR description\-file . All test case descriptions found in @@ -236,6 +2476,8 @@ TD: Valid test case names can consist of letters, numbers and the underscore character ('_'). .RE +.br + .B \-k .br .B \-\-keep\-descriptions @@ -265,12 +2507,60 @@ This option can also be configured permanently using the configuration file option .IR genhtml_css_file . +.RE +.BI "\-\-build\-directory " dirname +.RS +To support 'linked build directory' structures, add 'dirname' to the list of +places to search for soft links to source files - +.I e.g., +to handle the case that the links point to source files which are held in your +revision control system, and appear in the +.I \-\-diff\-file +data. In this use case, paths in the coverage data very likely refer to the +structure seen by the compiler during the build - so resolving them back to +the corresponding revsion-controlled source structure is likely to be successful. +.br + +Look in +.I dirname +for file paths which appear in +.IR tracefile +\- possibly after substitutions have been applied \- +which are soft links. +Both the original file path and the path to the linked file will resolve to the same +.I \-\-diff\-file entry. + +This option can be specified multiple times, to add more directories to the search path. + + +.RE +.BI "\-\-source\-directory " dirname +.RS +Add 'dirname' to the list of places to look for source files. +.br + +For relative source file paths +.I e.g. +paths found in +.IR tracefile, +or in +.IR diff-file +\- possibly after substitutions have been applied - +.B genhtml +will first look for the path from 'cwd' (where genhtml was +invoked) and +then from each alternate directory name in the order specified. +The first location matching location is used. + +This option can be specified multiple times, to add more directories to the source search path. + + .RE .BI "\-p " prefix .br .BI "\-\-prefix " prefix .RS -Remove +Remove .I prefix from all directory names. @@ -306,6 +2596,18 @@ This option can also be configured permanently using the configuration file option .IR genhtml_no_source . +.RE +.B \-\-no\-html +.RS +Do not create HTML report. + +Use this switch if you want some artifact of coverage report generation - +.I e.g., +the coverage criteria check or the serialized coverage DB, +.B etc. +- but do not need the coverage report HTML itself. + + .RE .BI "\-\-num\-spaces " spaces .RS @@ -324,7 +2626,9 @@ option .RS Highlight lines with converted\-only coverage data. -Use this option in conjunction with the \-\-diff option of +Use this option in conjunction with the +.B \-\-diff +option of .B lcov to highlight those lines which were only covered in data sets which were converted from previous source code versions. @@ -348,7 +2652,7 @@ option .RE .BI "\-\-html\-prolog " prolog\-file .RS -Read customized HTML prolog from +Read customized HTML prolog from .IR prolog\-file . Use this option to replace the default HTML prolog (the initial part of the @@ -363,7 +2667,7 @@ The title of the page. .B "@basedir@" .br -A relative path leading to the base directory (e.g. for locating css\-files). +A relative path leading to the base directory (e.g., for locating css\-files). This option can also be configured permanently using the configuration file option @@ -372,7 +2676,7 @@ option .RE .BI "\-\-html\-epilog " epilog\-file .RS -Read customized HTML epilog from +Read customized HTML epilog from .IR epilog\-file . Use this option to replace the default HTML epilog (the final part of the HTML @@ -383,7 +2687,7 @@ Within the epilog text, the following words will be replaced when a page is gene .B "@basedir@" .br -A relative path leading to the base directory (e.g. for locating css\-files). +A relative path leading to the base directory (e.g., for locating css\-files). This option can also be configured permanently using the configuration file option @@ -395,7 +2699,7 @@ option Use customized filename extension for generated HTML pages. This option is useful in situations where different filename extensions -are required to render the resulting pages correctly (e.g. php). Note that +are required to render the resulting pages correctly (e.g., php). Note that a '.' will be inserted between the filename and the extension specified by this option. @@ -418,13 +2722,25 @@ option .IR genhtml_html_gzip . .RE -.B \-\-sort +.B \-\-sort-tables .br .B \-\-no\-sort .RS Specify whether to include sorted views of file and directory overviews. -Use \-\-sort to include sorted views or \-\-no\-sort to not include them. +This option replaces the deprecated +.I "\-\-sort" +option. +.I "\-\-sort" +is still supported and is treated as an alias of +.I "\-\-sort\-tables" +but will be removed in a subsequent LCOV release. + +Use +.B \-\-sort-tables +to include sorted views or +.B \-\-no\-sort +to not include them. Sorted views are .B enabled by default. @@ -446,17 +2762,23 @@ Specify whether to display function coverage summaries in HTML output. Use \-\-function\-coverage to enable function coverage summaries or \-\-no\-function\-coverage to disable it. Function coverage summaries are .B enabled -by default +by default. + +This option can also be configured permanently using the configuration file +option +.IR genhtml_function_coverage . When function coverage summaries are enabled, each overview page will contain the number of functions found and hit per file or directory, together with the resulting coverage rate. In addition, each source code view will contain a link to a page which lists all functions found in that file plus the respective call count for those functions. +The function coverage page groups the data for every alias of each function, sorted by name or execution count. The representative name of the group of functions is the shorted (i.e., containing the fewest characters). -This option can also be configured permanently using the configuration file -option -.IR genhtml_function_coverage . +If using differential coverage and a sufficiently recent compiler version which report both begin and end line of functions ( +.I e.g., +gcc/9 and newer), functions are considered 'new' if any of their source lines have changed. +With older compiler versions, functions are considered 'new' if the function signature has changed or if the entire function is new. .RE .B \-\-branch\-coverage @@ -465,10 +2787,13 @@ option .RS Specify whether to display branch coverage data in HTML output. -Use \-\-branch\-coverage to enable branch coverage display or -\-\-no\-branch\-coverage to disable it. Branch coverage data display is -.B enabled -by default +Use +.B \-\-branch\-coverage +to enable branch coverage display or +.B \-\-no\-branch\-coverage +to disable it. Branch coverage data display is +.B disabled +by default. When branch coverage display is enabled, each overview page will contain the number of branches found and hit per file or directory, together with @@ -489,18 +2814,140 @@ branches around or eliminate some of them to generate better code. This option can also be configured permanently using the configuration file option -.IR genhtml_branch_coverage . +.IR branch_coverage . .RE -.B \-\-demangle\-cpp + +.B \-\-mcdc\-coverage +.RS +Specify whether to display Modifie Condition / Decision Coverage (MC/DC) +data in HTML output. + +MC/DC data display is +.B disabled +by default. + +MC/DC coverage is supported for GCC versions 14.2 and higher, or +LLVM 18.1 and higher. +.br +See +.I llvm2lcov \-\-help +for details on MC/DC data capture in LLVM. + +When MC/DC display is enabled, each overview page will contain +the number of MC/DC expressions found and hit per file or directory - two senses per expression - together with +the resulting coverage rate. In addition, each source code view will contain +an extra column which lists all expressions and condition senses of a line with indications of +whether the condition was sensitized or not. Conditions are shown in the following format: + +.RS 3 +.IP T: 3 +True sense of subexpression was sensitized: if this subexpression's value had been false, then the condition result would have been different. +.IP t: 3 +True sense of subexpression was +.B not +sensitized: the condition result would not change if the subexpression value was different. +.IP F: +False sense of subexpression was sensitized: if this subexpression's value had been true, then the condition result would have been different. +.IP f: +False sense of subexpression was +.B not +sensitized: the condition result would not change if the subexpression value was different. +.RE + +Note that branch and MC/DC coverage are identical if the condition is +a simple expression - +.I e.g., +.RS 3 + if (enable) ... +.RE + +Note that, where appropriate, filters are applied to both 'branch' and 'MC/DC' coverpoints: if a particular filter would remove some branch, +then it will also remove corresponding MC/DC coverpoints. See the +.I \-\-filter +section, above. + +This option can also be configured permanently using the configuration file +option +.IR mcdc_coverage . +See man +.B lcovrc(5) . + +Note that MC/DC coverpoints are defined differently by GCC and LLVM. +.IP GCC: 3 +evaluates the sensitivity of the condition to the 'true' and 'false' sense of each constituent (leaf) expression independently. +.br +That is: it evaluates the question: does the result of the condition change if +.I this +constituent expression changed from true to false (termed the 'true' sense, above) or from false to true (termed the 'false' sens, above). +.br +For example, the expression +.I A || B +is sensitive to +.I A==true +when +.I B==false, +but is is not sensitive to +.I A==true +when +.I B==true. + +.IP LLVM: 3 +records the subexpression as covered if and only if there is a pair of evaluations +of the condition such that the condition was +sensitized for both 'true' and 'false' values of the subexpression. +This is defined as an +.I independence pair +in the LLVM documentation. +.PP + +That is: the testcase must sensitize both values in order to be marked covered by +LLVM, whereas GCC will independently mark each. +Consequently: in LLVM-generated +.B lcov +reports, either both 'true' and 'false' sensitizations will be covered, or neither will be. +.br +See the examples in tesctcase +.I .../tests/lcov/mcdc. + + + +.RE +.BI "\-\-demangle\-cpp " [param] .RS Specify whether to demangle C++ function names. Use this option if you want to convert C++ internal function names to human readable format for display on the HTML function overview page. -This option requires that the c++filt tool is installed (see + +If called with no parameters, genhtml will use +.I c++filt +for demangling. This requires that the c++filt tool is installed (see .BR c++filt (1)). +If +.I param +is specified, it is treated as th tool to call to demangle source code. +The +.I \-\-demangle\-cpp +option can be used multiple times to specify the demangling tool and a set of +command line options that are passed to the tool - similar to how the gcc +.I -Xlinker +parameter works. In that case, you callback will be executed as: +.I | demangle_param0 demangle_param1 ... +Note that the demangle tool is called as a pipe and is expected to read from stdin and write to stdout. + +.RE +.B \-\-msg\-log +.I [ log_file_name ] +.br +.RS +Specify location to store error and warning messages (in addition to writing to STDERR). +If +.I log_file_name +is not specified, then default location is used. +.RE + .RE .B \-\-ignore\-errors .I errors @@ -508,14 +2955,346 @@ This option requires that the c++filt tool is installed (see .RS Specify a list of errors after which to continue processing. -Use this option to specify a list of one or more classes of errors after which -geninfo should continue processing instead of aborting. +Use this option to specify a list of error classes after which +.B genhtml +should continue processing with a warning message instead of aborting. +To suppress the warning message, specify the error class twice. +.br .I errors can be a comma\-separated list of the following keywords: -.B source: -the source code file for a data set could not be found. +.IP annotate: 3 +.B \-\-annotate\-script +returned non\-zero exit status - likely a file path or related error. HTML source code display will not be correct and ownership/date information may be missing. +.PP + +.IP branch: 3 +Branch ID (2nd field in the .info file 'BRDA' entry) does not follow expected integer sequence. +.PP + +.IP callback: 3 +Annotate, version, or criteria script error. +.PP + +.IP category: 3 +Line number categorizations are incorrect in the .info file, so branch coverage line number turns out to not be an executable source line. +.PP + +.IP child: 3 +child process returned non-zero exit code during +.I \-\-parallel +execution. This typically indicates that the child encountered an error: see the log file immediately above this message. +In contrast: the +.B parallel +error indicates an unexpected/unhandled exception in the child process - not a 'typical' lcov error. +.PP + +.IP count: 3 +An excessive number of messages of some class has been reported - subsequent messages of that type will be suppressed. +The limit can be controlled by the 'max_message_count' variable. See man +.B lcovrc(5). +.PP + +.IP corrupt: 3 +Corrupt/unreadable coverage data file found. +.PP + +.IP deprecated: 3 +You are using a deprecated option. +This option will be removed in an upcoming release - so you should change your +scripts now. +.PP + +.IP empty: 3 +The patch file specified by the +.B \-\-diff\-file +argument does not contain any differences. This may be OK if there were no source code changes between 'baseline' and 'current' (e.g., the only change was to modify a Makefile) - or may indicate an unsupported file format. +.PP + +.IP excessive: 3 +your coverage data contains a suspiciously large 'hit' count which is unlikely +to be correct - possibly indicating a bug in your toolchain. +See the +.I excessive_count_threshold +section in man +.B lcorc(5) +for details. +.PP + +.IP fork: 3 +Unable to create child process during +.I \-\-parallel +execution. +.br +If the message is ignored ( +.I \-\-ignore\-errors fork +), then genhtml +will wait a brief period and then retry the failed execution. +.br +If you see continued errors, either turn off or reduce parallelism, set a memory limit, or find a larger server to run the task. + +.PP + +.IP format: 3 +Unexpected syntax or value found in .info file - for example, negative number or +zero line number encountered. + +.PP + +.IP inconsistent: 3 +This error indicates that your coverage data is internally inconsistent: it makes two or more mutually exclusive claims. For example: +.RS +.IP \- 3 +Files have been moved or repository history presented by +.B \-\-diff\-file +data is not consistent with coverage data; for example, an 'inserted' line has baseline coverage data. These issues are likely to be caused by inconsistent handling in the 'diff' data compared to the 'baseline' and 'current' coverage data (e.g., using different source versions to collect the data but incorrectly annotating those differences), or by inconsistent treatment in the 'annotate' script. +Consider using a +.B \-\-version\-script +to guard against version mismatches. +.PP +.IP \- 3 +Two or more +.B gcov +data files or +.B lcov +".info" files report different end lines for the same function. This is likely due either to a gcc/gcov bug or to a source version mismatch. +.br +In this context, if the +.I "inconsistent" +error is ignored, then the tool will record the largest number as the function end line. +.PP +.IP \- 3 +Two or more +.B gcov +data files or +.B lcov +".info" files report different start lines for the same function. This is likely due either to a gcc/gcov bug or to a source version mismatch. +.br +In this context, if the +.I "inconsistent" +error is ignored, then the tool will retain only the first function definition that it saw. +.PP +.IP \- 3 +Mismatched function declaration/alias records encountered: +.RS 3 +.IP "(backward compatible LCOV format)" 3 +function execution count record ( +.I FNDA +) without matching function declaration record ( +.I FN +). +.PP +.IP "(enhanced LCOV format)" 3 +function alias record ( +.I FNA +) without matching function declaration record ( +.I FLN +). +.PP +.RE +.PP +.IP \- 3 +branch expression (3rd field in the .info file 'BRDA' entry) of merge data does not match + +.br +If the error is ignored, the offending record is skipped. +.RE +.PP + +.IP internal: 3 +internal tool issue detected. Please report this bug along with a testcase. +.PP + +.IP mismatch: 3 +Incorrect or inconsistent information found in coverage data and/or source code - for example, +the source code contains overlapping exclusion directives. + +.PP + +.IP missing: 3 +remove all coverpoints associated with source files which are not found or are not readable. +This is equivalent to adding a +.I \-\-exclude +pattern for each file which is not found. +.br +If a +.I \-\-resolve\-script +callback is specified, then the file is considered missing if it is not +locally visible and the callback +returns "" (empty string) or 'undef' - otherwise not missing. +.br + +.IP negative: 3 +negative 'hit' count found. + +Note that negative counts may be caused by a known GCC bug - see + + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68080 + +and try compiling with "-fprofile-update=atomic". You will need to recompile, re-run your tests, and re-capture coverage data. +.PP + + +.IP package: 3 +A required perl package is not installed on your system. In some cases, it is possible to ignore this message and continue - however, certain features will be disabled in that case. +.PP + +.IP parallel: 3 +various types of errors related to parallelism - +.I i.e., +a child process died due to an error. The corresponding error message appears in the log file immediately before the +.I parallel +error. +If you see an error related to parallel execution that seems invalid, it may be a good idea to remove the \-\-parallel flag and try again. If removing the flag leads to a different result, please report the issue (along with a testcase) so that the tool can be fixed. +.PP + +.IP path: 3 +File name found in +.B \-\-diff\-file +file but does not appear in either baseline or current trace data. These may be mapping issues - different pathname in the tracefile vs. the diff file. +.PP + +.IP range: 3 +Coverage data refers to a line number which is larger than the number of +lines in the source file. This can be caused by a version mismatch or +by an issue in the +.I gcov +data. +.PP + +.IP source: 3 +The source code file for a data set could not be found. +.PP + +.IP unmapped: 3 +Coverage data for a particular line cannot be found, possibly because the source code was not found, or because the line number mapping in the \.info file is wrong. + +This can happen if the source file used in HTML generation is not the same as the file used to generate the coverage data - for example, lines have been added or removed. +.PP + +.IP unreachable: 3 +a coverpoint (line, branch, function, or MC/DC) within an "unreachable" region is executed (hit); either the code, directive placement, or both are wrong. +If the error is ignored, the offending coverpoint is retained (not excluded) or not, depending on the value of the +.I retain_unreachable_coverpoints_if_executed +configuration parameter. +See man +.B lcovrc(5) +and the +.I "Exclusion markers" +section of man +.B geninfo(1) +for more information. +.PP + +.IP unsupported: 3 +The requested feature is not supported for this tool configuration. For example, function begin/end line range exclusions use some GCOV features that are not available in older GCC releases. +.PP + +.IP unused: 3 +The include/exclude/erase/substitute/omit pattern did not match any file pathnames. +.PP + +.IP usage: 3 +unsupported usage detected - e.g. an unsupported option combination. +.PP + +.IP utility: 3 +a tool called during processing returned an error code (e.g., 'find' encountered an unreadable directory). +.PP + +.IP version: 3 +\-\-version\-script comparison returned non\-zero mismatch indication. It likely that the version of the file which was used in coverage data extraction is different than the source version which was found. File annotations may be incorrect. +.PP + +Note that certain error messages are caused by issues that you probably cannot +fix by yourself - for example, bugs in your tool chain which result in +.I inconsistent +coverage DB data (see above). +In those cases, after reviewing the messages you may want to exclude the offending code or the entire offending +file, or you may simply ignore the messages - either by converting to warning or suppressing entirely. +Another alternative is to tell +.B genhtml +about the number of messages you expect - so that it can warn you if something changes +such that the count differs, such that you know to review the messages again. +See the +.I "\-\-expect\-message\-count" +flag, below. + +Also see 'man +.B lcovrc(5) +' for a discussion of the 'max_message_count' parameter which can be used to control the number of warnings which are emitted before all subsequent messages are suppressed. This can be used to reduce log file volume. +.br + +.RE +.BI "\-\-expect\-message\-count message_type:expr[,message_type:expr]" +.RS +Give +.B genhtml +a constraint on the number of messages of one or more types which are expected to +be produced during execution. Note that the total includes _all_ messages +of the given type - including those which have been suppressed. +If the constraint is not true, an +error of type +.I "count" +(see above) is generated. +.I message_type +is one of the message mnemonics described above, and +.I expr +may be either +.IP \- 3 +an integer - interpreted to mean that there should be exactly that number +of messages of the corresponding type, or +.IP \- 3 +a Perl expression containing the substring +.B %C +; %C is replaced with the total number of messages of the corresponding type and +then evaluated. The constraint is met if the result is non-zero and is not met +otherwise. + +For example: +.IP +\-\-expect\-message\-count inconsistent:5 +.br +says that we expect exactly 5 messages of type 'inconsistent'. +.PP +.IP +\-\-expect\-message\-count inconsistent:%C==5 +.br +also says that we expect exactly 5 messages of this type, but specified +using expression syntax. +.PP +.IP +\-\-expect\-message\-count 'inconsistent : %C > 6 && %C <= 10' +.br +says that we expect the number of messages to be in the range (6:10]. +(Note that quoting may be necessary, to protect whitespace from interpretation by +your shell, if you want to improve expression readability by adding spaces to your expression.) +.PP + +Multiple constraints can be specified using a comma-separated list or +by using the option multiple times. + +This flag is equivalent to the +.I "expect_message_count" +configuration option. See man +.B lcovrc(5) +for more details on the expression syntax and how expressions are interpreted. +The number of messages of the particular type is substituted into the +expression before it is evaluated. + +.RE + +.BI "\-\-keep\-going " +.RS +Do not stop if error occurs: attempt to generate a result, however flawed. + +This command line option corresponds to the +.I stop_on_error +lcovrc option. See man +.B lcovrc(5) +for more details. + .RE .B \-\-config\-file @@ -523,6 +3302,12 @@ the source code file for a data set could not be found. .br .RS Specify a configuration file to use. +See man +.B lcovrc(5) +for details of the file format and options. Also see the +.I config_file +entry in the same man page for details on how to include one config file into +another. When this option is specified, neither the system\-wide configuration file /etc/lcovrc, nor the per\-user configuration file ~/.lcovrc is read. @@ -531,8 +3316,25 @@ This option may be useful when there is a need to run several instances of .B genhtml with different configuration file options in parallel. + +Note that this option must be specified in full - abbreviations are not supported. + .RE +.B \-\-profile +.I [ profile\-data\-file ] +.br +.RS +Tell the tool to keep track of performance and other configuration data. +If the optional +.I profile\-data\-file +is not specified, then the profile data is written to a file named +.I "genhtml.json" +in the output directory. + +.RE + + .B \-\-rc .IR keyword = value .br @@ -544,7 +3346,7 @@ Use this option to specify a statement which overrides the corresponding configuration statement in the lcovrc configuration file. You can specify this option more than once to override multiple configuration statements. -See +See man .BR lcovrc (5) for a list of available keywords and their meaning. .RE @@ -553,7 +3355,7 @@ for a list of available keywords and their meaning. .RS Show coverage rates with .I num -number of digits after the decimal-point. +number of digits after the decimal point. Default value is 1. @@ -562,14 +3364,72 @@ option .IR genhtml_precision . .RE +.B \-\-merge\-aliases +.RS +Functions whose file/line is the same are considered to be aliases; +.B genthml +uses the shortest name in the list of aliases (fewest characters) as the leader. +.br +This option counts each alias group as a single object - so the 'function' +count will be the number of distinct function groups rather than the total number +of aliases of all functions - and displays them as groups in the 'function detail +table. +.br +Note that this option has an effect only if +.B "\-\-filter function" +has been applied to the coverage DB. +.br + +This parameter an be configured via the configuration file +.IR merge_function_aliases +option. See +.B man(5) lcovrc. + + +.B \-\-suppress\-aliases +.RS +Suppress list of aliases in function detail table. +.br + +Functions whose file/line is the same are considered to be aliases; +.B genthml +uses the shortest name in the list of aliases (fewest characters) as the leader. +.br + +The number of aliases can be large, for example due to instantiated templates - which can make function coverage results difficult to read. This option removes the list of aliases, making it easier to focus on the overall function coverage number, which is likely more interesting. + +Note that this option has an effect only if +.B "\-\-filter function" +has been applied to the coverage DB. + +This parameter an be configured via the configuration file +.IR merge_function_aliases +option. See +.B man(5) lcovrc. + + +.B \-\-forget\-test\-names +.br +.RS +If non\-zero, ignore testcase names in .info file - +.I i.e., +treat all coverage data as if it came from the same testcase. +This may improve performance and reduce memory consumption if user does +not need per-testcase coverage summary in coverage reports. + +This option can also be configured permanently using the configuration file +option +.IR forget_testcase_names . +.RE + .B \-\-missed .RS -Show counts of missed lines, functions, or branches +Show counts of missed lines, functions, branches, and MC/DC expressions. Use this option to change overview pages to show the count of lines, functions, -or branches that were not hit. These counts are represented by negative numbers. +branches, or MC/DC expressions that were not hit. These counts are represented by negative numbers. -When specified together with \-\-sort, file and directory views will be sorted +When specified together with \-\-sort\-tables, file and directory views will be sorted by missed counts. This option can also be configured permanently using the configuration file @@ -577,6 +3437,65 @@ option .IR genhtml_missed . .RE +.B \-\-dark\-mode +.RS +Use a light\-display\-on\-dark\-background color scheme rather than the default dark\-display\-on\-light\-background. + +The idea is to reduce eye strain due to viewing dark text on a bright screen - particularly at night. +.RE + +.B \-\-tempdir +.I dirname +.br +.RS +Write temporary and intermediate data to indicated directory. Default is "/tmp". +.RE + +.BI "\-\-preserve " +.RS +Preserve intermediate data files generated by various steps in the tool - e.g., for debugging. By default, these files are deleted. + +.RE + +.BI "\-\-save " +.RS +Copy +.I unified\-diff\-file, baseline_trace_files, +and +.I tracefile(s) +to +output\-directory. + +Keeping copies of the input data files may help to debug any issues or to regenerate report files later. + +.RE + +.B \-\-sort\-input +.br +.RS +Specify whether to sort file names before capture and/or aggregation. +Sorting reduces certain types of processing order-dependent output differences. +See the +.BI sort_input +section in +man +.B lcovrc(5). + +.RE + +.BI "\-\-serialize " file_name +.RS +Save coverage database to +.I file_name. + +The file is in Perl "Storable" format. + +Note that this option may significantly increase +.I genhtml +memory requirements, as a great deal of data must be retained. + +.RE + .SH FILES .I /etc/lcovrc @@ -589,8 +3508,157 @@ The system\-wide configuration file. The per\-user configuration file. .RE -.SH AUTHOR +Sample +.I \-\-diff\-file +data creation scripts: +.RS + +.I \*[scriptdir]/p4udiff +.RS +Sample script for use with +.B --diff-file +that creates a unified diff file via +.B Perforce. +.br +.RE + +.I \*[scriptdir]/gitdiff +.RS +Sample script for use with +.B --diff-file +that creates a unified diff file via +.B git. +.br +.RE +.RE + +Sample +.I \-\-annotate\-script +callback Perl modules: + +.RS +.I \*[scriptdir]/p4annotate.pm +.RS +Sample script written as Perl module for use with +.B --annotate-script +that provides annotation data via +.B Perforce. +.br +.RE + +.I \*[scriptdir]/gitblame.pm +.RS +Sample script written as Perl module for use with +.B --annotate-script +that provides annotation data via git. +.br +.RE +.RE + +Sample +.I \-\-criteria\-script +callback Perl modules: +.RS + +.I \*[scriptdir]/criteria.pm +.RS +Sample script written as Perl module for use with +.B --criteria-script +that implements a check for "UNC + LBC + UIC == 0". +.br +.RE + +.I \*[scriptdir]/threshold.pm +.RS +Sample script written as Perl module to check for minimum acceptable +line and/or branch and/or and/or MC/DC function coverage. +For example, the +.RS +.I "genhtml --fail_under_lines 75 ..." +.RE +feature can instead be realized by +.RS +.I "genhtml --criteria-script \*[scriptdir]/threshold.pm,--line,75 ..." +.RE +.br +.RE +.RE + +Sample +.I \-\-simplify\-script +callback Perl module: +.RS + +.I \*[scriptdir]/simplify.pm +.RS +Sample script written as Perl module for use with +.B \-\-simplify\-script +that implements regular expression substitutions for function name simplification. +.br +.RE + +.RE +.RE + +Sample +.I \-\-version\-script +callback Perl modules and scripts: +.RS + +.I \*[scriptdir]/getp4version +.RS +Sample script for use with +.B \-\-version\-script +that obtains version IDs via +.B Perforce. +.br +.RE + +.I \*[scriptdir]/P4version.pm +.RS +A perl module with similar functionality to +.B getp4version +but higher performance. +.br +.RE + +.I \*[scriptdir]/get_signature +.RS +Sample script for use with +.B --version-script +that uses md5hash as version IDs. +.br +.RE + +.I \*[scriptdir]/gitversion.pm +.RS +A perl module with for use with +.B \-\-version\-script +which retrieves version IDs from +.B git. +.br +.RE + +.I \*[scriptdir]/batchGitVersion.pm +.RS +A perl module with similar functionality to +.B gitversion.pm +but higher performance. +.br +.RE +.RE + + +.SH AUTHORS Peter Oberparleiter +.br + +Henry Cox +.RS +Differential coverage and date/owner binning, filtering, error management, +parallel execution sections, +.RE +.br .SH SEE ALSO .BR lcov (1), @@ -599,3 +3667,7 @@ Peter Oberparleiter .BR genpng (1), .BR gendesc (1), .BR gcov (1) +.br + +.I \*[lcovurl] +.br diff --git a/man/geninfo.1 b/man/geninfo.1 index b1deed9f..c88d8b24 100644 --- a/man/geninfo.1 +++ b/man/geninfo.1 @@ -1,14 +1,27 @@ -.TH geninfo 1 "LCOV 1.15" 2020\-08\-07 "User Manuals" +\" Define path to scripts +.ds scriptdir bin + +\" Define project URL +.ds lcovurl https://github.com/linux\-test\-project/lcov + +.TH geninfo 1 "LCOV 2.0" 2023\-05\-17 "User Manuals" .SH NAME -geninfo \- Generate tracefiles from .da files +geninfo \- Generate tracefiles from GCOV coverage data files .SH SYNOPSIS .B geninfo .RB [ \-h | \-\-help ] -.RB [ \-v | \-\-version ] +.RB [ \-\-version ] .RB [ \-q | \-\-quiet ] +.RB [ \-v | \-\-verbose ] +.RB [ \-\-debug ] .br .RS 8 +.RB [ \-\-comment +.IR comment-string ] +.br .RB [ \-i | \-\-initial ] +.RB [ \-\-all ] +.br .RB [ \-t | \-\-test\-name .IR test\-name ] .br @@ -19,6 +32,13 @@ geninfo \- Generate tracefiles from .da files .RB [ \-b | \-\-base\-directory .IR directory ] .br +.RB [ \-\-build\-directory +.IR directory ] +.br +.RB [ \-\-branch\-coverage ] +.br +.RB [ \-\-mcdc\-coverage ] +.br .RB [ \-\-checksum ] .RB [ \-\-no\-checksum ] .br @@ -27,18 +47,47 @@ geninfo \- Generate tracefiles from .da files .br .RB [ \-\-gcov\-tool .IR tool ] +.br +.RB [ \-\-parallel | -j +.IR [integer] ] +.br +.br [ \-\-large\-file +.IR regexp ] +.br +.RB [ \-\-memory +.IR integer_num_Mb ] +.br +.RB [ \-\-msg\-log +.IR [ log_file_name ] ] +.br .RB [ \-\-ignore\-errors -.IR errors ] +.IR errors ] +.br +.RB [\-\-expect\-message\-count +.IR message_type=expr[,message_type=expr..]] +.br +.RB [ \-\-keep\-going ] +.br +.RB [ \-\-preserve ] +.br +.RB [ \-\-filter +.IR type ] +.br +.RB [ \-\-demangle\-cpp [param]] .br .RB [ \-\-no\-recursion ] -.I directory .RB [ \-\-external ] .RB [ \-\-no\-external ] .br +.RB [ \-\-sort\-input ] +.br .RB [ \-\-config\-file .IR config\-file ] .RB [ \-\-no\-markers ] .br +.RB [ \-\-profile +.IR [ profile\-file ] ] +.br .RB [ \-\-derive\-func\-data ] .RB [ \-\-compat .IR mode =on|off|auto] @@ -47,104 +96,280 @@ geninfo \- Generate tracefiles from .da files .IR keyword = value ] .br .RB [ \-\-include -.IR pattern ] +.IR glob_pattern ] +.br .RB [ \-\-exclude -.IR pattern ] +.IR glob_pattern ] +.br +.RB [ \-\-erase\-functions +.IR regexp_pattern ] +.br +.RB [ \-\-substitute +.IR regexp_pattern ] +.br +.RB [ \-\-omit\-lines +.IR regexp_pattern ] +.br +.RB [ \-\-fail\-under\-branches +.IR percentage ] +.br +.RB [ \-\-fail\-under\-lines +.IR percentage ] +.br +.RB [ \-\-forget\-test\-names ] +.br +.RB [ \-\-context\-script +.IR script_file ] +.br +.RB [ \-\-criteria\-script +.IR script_file ] +.br +.RB [ \-\-resolve\-script +.IR script_file ] +.br +.RB [ \-\-version\-script +.IR script_file ] +.br +.RB [ \-\-tempdir +.IR dirname ] +.br +.IR directory .RE + .SH DESCRIPTION -.B geninfo -converts all GCOV coverage data files found in -.I directory -into tracefiles, which the + +Use +.B geninfo +to create LCOV tracefiles from GCC and LLVM/Clang coverage data files (see +.B --gcov-tool +for considerations when working with LLVM). You can use .B genhtml -tool can convert to HTML output. +to create an HTML report from a tracefile. +.br + +Note that +.B geninfo +is called by +.BR "lcov --capture" , +so there is typically no need to call it directly. +.br -Unless the \-\-output\-filename option is specified, +Unless the +.B --output-filename +option is specified .B geninfo -writes its -output to one file per .da file, the name of which is generated by simply -appending ".info" to the respective .da file name. +writes its output to one file with .info filename extension per input file. +.br -Note that the current user needs write access to both +Note also that the current user needs write access to both .I directory -as well as to the original source code location. This is necessary because -some temporary files have to be created there during the conversion process. +as well as to the original source code location. This is necessary because some temporary files have to be created there during the conversion process. +.br + +By default, +.B geninfo +collects line and function coverage data. +Neither branch nor MC/DC data is not collected by default; you can use the +.B \-\-branch\-coverage +and +.B \-\-mcdc\-coverage +command line options to enable them, or you can permanently enable them by adding +.B branch_coverage = 1 +and/or +.B mcdc_coverage = 1 +to your personal, group, or site lcov configuration file. See man +.B lcovrc(5) +for details. + + +.SS "File types" + +A +.B tracefile +is a coverage data file in the format used by all LCOV tools such as +.BR geninfo ", " lcov ", and " genhtml . +By convention, tracefiles have a .info filename extension. See "Tracefile format" below for a description of the file format. +.br + +A +.B .gcda file +is a compiler-specific file containing run-time coverage data. It is created and updated when a program compiled with GCC/LLVM's +.B --coverage +option is run to completion. +.B geninfo +reads .gcda files in its default mode of operation. Note: earlier compiler versions used the .da filename extension for this file type. +.br + +A +.B .gcno file +is a compiler-specific file containing static, compile-time coverage data. It is created when source code is compiled with GCC/LLVM's +.B --coverage +option. +.B geninfo +reads .gcno files when option +.B --initial +is specified. Note: earlier compiler versions used .bb and .bbg filename extensions for this file type. +.br -Note also that +A +.B .gcov file +is a textual or JSON representation of the data found in .gcda and .gcno files. It is created by the +.BR gcov +tools that is part of GCC (see +.B --gcov-tool +for LLVM considerations). +There are multiple gcov file format versions, including textual, intermediate, and JSON format. .B geninfo -is called from within -.BR lcov , -so that there is usually no need to call it directly. +internally uses +.B gcov +to extract coverage data from .gcda and .gcno files using the best supported gcov file format. +.br + +See the +.B gcov +man page for more information on .gcda, .gcno and .gcov output formats. +.br -.B Exclusion markers +.SS "Exclusion markers" To exclude specific lines of code from a tracefile, you can add exclusion -markers to the source code. Additionally you can exclude specific branches from -branch coverage without excluding the involved lines from line and function -coverage. Exclusion markers are keywords which can for example be added in the +markers to the source code. Similarly, you can mark specific regions of code +as "unreachable". An "unreachable" error message is generated if any +coverpoints in unreachable regions are executed ( +.I i.e., +have non-zero hit counts. +See the +.I retain_unreachable_coverpoints_if_executed +section in man +.B lcovrc(1) +for a description of the actions taken in this case. + +Additionally you can exclude specific branches or MC/DC expressions from +without excluding the involved lines from line and function +coverage. + + Exclusion markers are keywords which can for example be added in the form of a comment. -See +See man .BR lcovrc (5) -how to override some of them. +how to override the exclusion keywords ( +.I e.g., +to reuse markers inserted for other tools or to generate reports with different +sets of excluded regions). The following markers are recognized by geninfo: -LCOV_EXCL_LINE +.B LCOV_EXCL_LINE +.br .RS Lines containing this marker will be excluded. .br .RE -LCOV_EXCL_START + +.B LCOV_EXCL_START +.br .RS Marks the beginning of an excluded section. The current line is part of this section. .br .RE -LCOV_EXCL_STOP + +.B LCOV_EXCL_STOP +.br .RS Marks the end of an excluded section. The current line not part of this section. +.br +.RE + +.B LCOV_UNREACHABLE_LINE +.br +.RS +If the marked line is 'hit', then generate an error: we believe the marked code +is unreachable and so there is a bug in the code, the plaement of the directive, or both. +Lines containing this marker will be excluded from reporting. +.br +Apart from error reporting, this directive is equivalent to +.I LCOV_EXCL_LINE. +.RE + +.B LCOV_UNREACHABLE_START +.br +.RS +Marks the beginning of an unreachable section of code. The current line in part of this region. +.br +As described in the +.I LCOV_UNREACHABLE_LINE +section, above: an error is generated if any code in the region is hit, but the code is excluded from reporting. +.RE + +.B LCOV_UNREACHABLE_STOP +.br +.RS +Marks the end of the region of unreachable code. The current line not part of this +section. .RE + +.B LCOV_EXCL_BR_LINE .br -LCOV_EXCL_BR_LINE .RS Lines containing this marker will be excluded from branch coverage. .br .RE -LCOV_EXCL_BR_START + +.B LCOV_EXCL_BR_START +.br .RS Marks the beginning of a section which is excluded from branch coverage. The current line is part of this section. .br .RE -LCOV_EXCL_BR_STOP + +.B LCOV_EXCL_BR_STOP +.br .RS Marks the end of a section which is excluded from branch coverage. The current line not part of this section. +.br .RE + +.B LCOV_EXCL_EXCEPTION_BR_LINE .br -LCOV_EXCL_EXCEPTION_BR_LINE .RS Lines containing this marker will be excluded from exception branch coverage: Exception branches will be ignored, but non-exception branches will not be affected. .br .RE -LCOV_EXCL_EXCEPTION_BR_START + +.B LCOV_EXCL_EXCEPTION_BR_START +.br .RS -Marks the beginning of a section which is excluded from exception branch +Marks the beginning of a section which is excluded from exception branch coverage. The current line is part of this section. .br .RE -LCOV_EXCL_EXCEPTION_BR_STOP + +.B LCOV_EXCL_EXCEPTION_BR_STOP +.br .RS -Marks the end of a section which is excluded from exception branch coverage. -The current line not part of this section. -.RE +Marks the end of a section which is excluded from exception branch coverage. +The current line not part of this section .br +.RE .SH OPTIONS +In general, (almost) all +.B geninfo +options can also be specified in your personal, group, project, or site +configuration file - see the +.I \-\-config\-file +section, below, and man +.B lcovrc(5) +for details. + + .B \-b .I directory .br @@ -172,10 +397,103 @@ directory in which the source code file is located. Note that this option will not work in environments where multiple base directories are used. In that case use configuration file setting .B geninfo_auto_base=1 -(see +(see man .BR lcovrc (5)). .RE +.B \-\-build\-directory +.I build_dir +.br +.RS +Search for .gcno data files from +.I build_dir +rather finding them only adjacent to the corresponding .o and/or .gcda file. + +By default, geninfo expects to find the .gcno and .gcda files (compile- +and run-time data, respectively) in the same directory. +.br + +When this option is used: +.br + +.RS +geninfo path1 \-\-build\-directory path2 ... +.RE +.br + +then geninfo will look for .gcno file +.br + +.RS +path2/relative/path/to/da_base.gcno +.RE +.br + +when it finds .gcda file +.br + +.RS +path1/relative/path/to/da_base.gcda. +.RE + +Use this option when you have used the +.I GCOV_PREFIX +environment variable to direct the gcc or llvm runtime environment to write +coverage data files to somewhere other than the directory where the code +was originally compiled. +See +.BR gcc (1) +and/or search for +.I GCOV_PREFIX +and +.I GCOV_PREFIX_STRIP. + +This option can be used several times to specify multiple alternate directories to look for .gcno files. This may be useful if your application uses code which is compiled in many separate locations - for example, common libraries that are shared between teams. + +.RE +.BI "\-\-source\-directory " dirname +.RS +Add 'dirname' to the list of places to look for source files. +.br + +For relative source file paths found in the gcov data \- possibly after substitutions have been applied, +.B geninfo +will first look for the path from 'cwd' (where genhtml was +invoked) and +then from each alternate directory name in the order specified. +The first location matching location is used. + +This option can be specified multiple times, to add more directories to the source search path. +.RE + +.B \-\-branch\-coverage +.br +.RS +Collect retain branch coverage data. + +This is equivalent to using the option "\-\-rc branch_coverage=1"; the option was added to better match the genhml interface. + +.RE + +.B \-\-mcdc\-coverage +.br +.RS +Collect retain MC/DC data. + +This is equivalent to using the option "\-\-rc mcdc_coverage=1". +MC/DC coverage capture is supported for GCC versions 14.2 and higher, +or LLVM versions 18.1 and higher. +.br +See +.I llvm2lcov \-\-help +for details on MC/DC data capture in LLVM. + +.br +See the MC/DC section of man +.B genhtml(1) +for more details +.RE + .B \-\-checksum .br .B \-\-no\-checksum @@ -183,8 +501,11 @@ directories are used. In that case use configuration file setting .RS Specify whether to generate checksum data when writing tracefiles. -Use \-\-checksum to enable checksum generation or \-\-no\-checksum to -disable it. Checksum generation is +Use +.B \-\-checksum +to enable checksum generation or +.B \-\-no\-checksum +to disable it. Checksum generation is .B disabled by default. @@ -195,6 +516,11 @@ code versions. If you don't work with different source code versions, disable this option to speed up coverage data processing and to reduce the size of tracefiles. + +Note that this options is somewhat subsumed by the +.B \-\-version\-script +option - which does something similar, but at the 'whole file' level. + .RE .B \-\-compat @@ -203,7 +529,9 @@ to speed up coverage data processing and to reduce the size of tracefiles. .RS Set compatibility mode. -Use \-\-compat to specify that geninfo should enable one or more compatibility +Use +.B \-\-compat +to specify that geninfo should enable one or more compatibility modes when capturing coverage data. You can provide a comma-separated list of mode=value pairs to specify the values for multiple modes. @@ -235,7 +563,7 @@ are: .RS Enable this mode if you are capturing coverage data for a project that was built using the libtool mechanism. See also -\-\-compat\-libtool. +.BR \-\-compat\-libtool . The default value for this setting is 'on'. @@ -246,7 +574,7 @@ Enable this mode if you are capturing coverage data for a project that was built using a version of GCC 3.3 that contains a modification (hammer patch) of later GCC versions. You can identify a modified GCC 3.3 by checking the build directory of your project for files ending in the -extension '.bbg'. Unmodified versions of GCC 3.3 name these files '.bb'. +extension .bbg. Unmodified versions of GCC 3.3 name these files .bb. The default value for this setting is 'auto'. @@ -271,13 +599,16 @@ The default value for this setting is 'auto' .RS Specify whether to enable libtool compatibility mode. -Use \-\-compat\-libtool to enable libtool compatibility mode or \-\-no\-compat\-libtool +Use +.B \-\-compat\-libtool +to enable libtool compatibility mode or +.B \-\-no\-compat\-libtool to disable it. The libtool compatibility mode is .B enabled by default. When libtool compatibility mode is enabled, geninfo will assume that the source -code relating to a .da file located in a directory named ".libs" can be +code relating to a .gcda file located in a directory named ".libs" can be found in its parent directory. If you have directories named ".libs" in your build environment but don't use @@ -289,6 +620,13 @@ libtool, disable this option to prevent problems when capturing coverage data. .br .RS Specify a configuration file to use. +See man +.B lcovrc(5) +for details of the file format and options. +Also see the +.I config_file +entry in the same man page for details on how to include one config file into +another. When this option is specified, neither the system\-wide configuration file /etc/lcovrc, nor the per\-user configuration file ~/.lcovrc is read. @@ -297,8 +635,27 @@ This option may be useful when there is a need to run several instances of .B geninfo with different configuration file options in parallel. + +Note that this option must be specified in full - abbreviations are not supported. + +.RE + +.B \-\-profile +.I [ profile\-data\-file ] +.br +.RS +Tell the tool to keep track of performance and other configuration data. +If the optional +.I profile\-data\-file +is not specified, then the profile data is written to a file named with the same +basename as the +.I \-\-output\-filename, with suffix +.I ".json" +appended. + .RE + .B \-\-derive\-func\-data .br .RS @@ -310,32 +667,6 @@ instead derive function coverage data from line coverage data and information about which lines belong to a function. .RE -.B \-\-exclude -.I pattern -.br -.RS -Exclude source files matching -.IR pattern . - -Use this switch if you want to exclude coverage data for a particular set -of source files matching any of the given patterns. Multiple patterns can be -specified by using multiple -.B --exclude -command line switches. The -.I patterns -will be interpreted as shell wildcard patterns (note that they may need to be -escaped accordingly to prevent the shell from expanding them first). - -Note: The pattern must be specified to match the -.B absolute -path of each source file. - -Can be combined with the -.B --include -command line switch. If a given file matches both the include pattern and the -exclude pattern, the exclude pattern will take precedence. -.RE - .B \-\-external .br .B \-\-no\-external @@ -344,9 +675,35 @@ exclude pattern, the exclude pattern will take precedence. Specify whether to capture coverage data for external source files. External source files are files which are not located in one of the directories -specified by \-\-directory or \-\-base\-directory. Use \-\-external to include -external source files while capturing coverage data or \-\-no\-external to -ignore this data. +specified by +.I \-\-directory +or +.I \-\-base\-directory. +Use +.I \-\-external +to include +coverpoints in external source files while capturing coverage data or +.I \-\-no\-external +to exclude them. +If your +.I \-\-directory +or +.I \-\-base\-directory +path contains a soft link, then actual target directory is not considered to be +"internal" unless the +.I \-\-follow +option is used. + +The +.I \-\-no\-external +option is somewhat of a blunt instrument; the +.I \-\-exclude +and +.I \-\-include +options provide finer grained control over which coverage data is and is not +included if your project structure is complex and/or +.I \-\-no\-external +does not do what you want. Data for external source files is .B included @@ -357,7 +714,30 @@ by default. .br .B \-\-follow .RS -Follow links when searching .da files. +Follow links when searching .gcda files, as well as to decide whether a +particular (symbolically linked) source directory is "internal" to the project or not - see the +.I \-\-no\-external +option, above, for more information. +The +.I \-\-follow command line option is equivalent to the +.I geninfo_follow_symlinks +config file option. See man +.B lcovrc(5) for more information. +.RE + +.RE + +.B \-\-sort\-input +.br +.RS +Specify whether to sort file names before capture and/or aggregation. +Sorting reduces certain types of processing order-dependent output differences. +See the +.BI sort_input +section in +man +.B lcovrc(5). + .RE .B \-\-gcov\-tool @@ -365,6 +745,40 @@ Follow links when searching .da files. .br .RS Specify the location of the gcov tool. + +If the +.B \-\-gcov\-tool +option is used multiple times, then the arguments are concatenated when the callback +is executed - similar to how the gcc +.B \-Xlinker +parameter works. This provides a possibly easier way to pass arguments to +your tool, without requiring a wrapper script. +In that case, your callback will be executed as: +.I tool\-0 'tool\-1; ... 'filename'. +Note that the second and subsequent arguments are quoted when passed to +the shell, in order to handle parameters which contain spaces. + +The +.B \-\-gcov\-tool +argument may be a +.I split_char +separated string - see +.B man(4) lcovrc. + +A common use for this option is to enable LLVM: +.br + +.RS +.BR "geninfo \-\-gcov-tool " "llvm-cov " "\-\-gcov-tool " "gcov ..." +.br +.BR "geninfo \-\-gcov-tool " "llvm-cov,gcov ..." +.RE +.br + +Note: 'llvm-cov gcov da_file_name' will generate output in gcov-compatible format as required by lcov. + +If not specified, 'gcov' is used by default. + .RE .B \-h @@ -384,87 +798,639 @@ Include source files matching Use this switch if you want to include coverage data for only a particular set of source files matching any of the given patterns. Multiple patterns can be specified by using multiple -.B --include +.B \-\-include command line switches. The .I patterns will be interpreted as shell wildcard patterns (note that they may need to be escaped accordingly to prevent the shell from expanding them first). +See the lcov man page for details + +.RE + +.B \-\-exclude +.I pattern +.br +.RS +Exclude source files matching +.IR pattern . + +Use this switch if you want to exclude coverage data from a particular set +of source files matching any of the given patterns. Multiple patterns can be +specified by using multiple +.B \-\-exclude +command line switches. The +.I patterns +will be interpreted as shell wildcard patterns (note that they may need to be +escaped accordingly to prevent the shell from expanding them first). Note: The pattern must be specified to match the .B absolute path of each source file. + +Can be combined with the +.B \-\-include +command line switch. If a given file matches both the include pattern and the +exclude pattern, the exclude pattern will take precedence. + +See the lcov man page for details. + .RE -.B \-\-ignore\-errors -.I errors +.B \-\-erase\-functions +.I regexp .br .RS -Specify a list of errors after which to continue processing. +Exclude coverage data from lines which fall within a function whose name matches the supplied regexp. Note that this is a mangled or demangled name, depending on whether the \-\-demangle\-cpp option is used or not. -Use this option to specify a list of one or more classes of errors after which -geninfo should continue processing instead of aborting. +Note that this option requires that you use a gcc version which is new enough to support function begin/end line reports or that you configure the tool to derive the required dta - see the +.BI derive_function_end_line +discussion in the +.B lcovrc +man page. -.I errors -can be a comma\-separated list of the following keywords: -.B gcov: -the gcov tool returned with a non\-zero return code. +.RE + +.B \-\-substitute +.I regexp_pattern +.br +.RS +Apply Perl regexp +.IR regexp_pattern +to source file names found during processing. This is useful when the path name reported by gcov does not match your source layout and the file is not found. See the lcov man page for more details. -.B source: -the source code file for a data set could not be found. .RE -.B \-i +.B \-\-omit\-lines +.I regexp .br -.B \-\-initial .RS -Capture initial zero coverage data. +Exclude coverage data from lines whose content matches +.IR regexp . -Run geninfo with this option on the directories containing .bb, .bbg or .gcno -files before running any test case. The result is a "baseline" coverage data -file that contains zero coverage for every instrumented line and function. -Combine this data file (using lcov \-a) with coverage data files captured -after a test run to ensure that the percentage of total lines covered is -correct even when not all object code files were loaded during the test. +Use this switch if you want to exclude line, branch, and MC/DC coverage data for some particular constructs in your code (e.g., some complicated macro). +See the lcov man page for details. -Note: currently, the \-\-initial option does not generate branch coverage -information. .RE -.B \-\-no\-markers +.B \-\-forget\-test\-names .br .RS -Use this option if you want to get coverage data without regard to exclusion -markers in the source code file. +If non\-zero, ignore testcase names in tracefile - +.I i.e., +treat all coverage data as if it came from the same testcase. +This may improve performance and reduce memory consumption if user does +not need per-testcase coverage summary in coverage reports. + +This option can also be configured permanently using the configuration file +option +.IR forget_testcase_names . .RE -.B \-\-no\-recursion +.B \-\-msg\-log +.I [ log_file_name ] .br .RS -Use this option if you want to get coverage data for the specified directory -only without processing subdirectories. +Specify location to store error and warning messages (in addition to writing to STDERR). +If +.I log_file_name +is not specified, then default location is used. .RE -.BI "\-o " output\-filename +.B \-\-ignore\-errors +.I errors .br -.BI "\-\-output\-filename " output\-filename +.RS +Specify a list of errors after which to continue processing. + +Use this option to specify a list of one or more classes of errors after which +.B geninfo +should continue processing instead of aborting. +Note that the tool will generate a warning (rather than a fatal error) unless you ignore the error two (or more) times: +.br +.RS +geninfo ... \-\-ignore\-errors unused,unused +.RE + +.I errors +can be a comma\-separated list of the following keywords: + +.IP branch: 3 +branch ID (2nd field in the .info file 'BRDA' entry) does not follow expected integer sequence. +.PP + +.IP callback: 3 +Version script error. +.PP + +.IP child: 3 +child process returned non-zero exit code during +.I \-\-parallel +execution. This typically indicates that the child encountered an error: see the log file immediately above this message. +In contrast: the +.B parallel +error indicates an unexpected/unhandled exception in the child process - not a 'typical' lcov error. +.PP + +.IP corrupt: 3 +corrupt/unreadable file found. +.PP + +.IP count: 3 +An excessive number of messages of some class have been reported - subsequent messages of that type will be suppressed. +The limit can be controlled by the 'max_message_count' variable. See the lcovrc man page. +.PP + +.IP deprecated: 3 +You are using a deprecated option. +This option will be removed in an upcoming release - so you should change your +scripts now. +.PP + +.IP empty: 3 +the .info data file is empty (e.g., because all the code was 'removed' or excluded. +.PP + +.IP excessive: 3 +your coverage data contains a suspiciously large 'hit' count which is unlikely +to be correct - possibly indicating a bug in your toolchain. + +See the +.I excessive_count_threshold +section in man +.B lcovrc(5) +for details. +.PP + +.IP fork: 3 +Unable to create child process during +.I \-\-parallel +execution. +.br +If the message is ignored ( +.I \-\-ignore\-errors fork +), then genhtml +will wait a brief period and then retry the failed execution. +.br +If you see continued errors, either turn off or reduce parallelism, set a memory limit, or find a larger server to run the task. +.PP + +.IP format: 3 +Unexpected syntax or value found in .info file - for example, negative number or +zero line number encountered. +.PP + +.IP gcov: 3 +the gcov tool returned with a non\-zero return code. +.PP + +.IP graph: 3 +the graph file could not be found or is corrupted. +.PP + +.IP inconsistent: 3 +your coverage data is internally inconsistent: it makes two or more mutually +exclusive claims. For example, some expression is marked as both an exception branch and not an exception branch. (See man +.B genhtml(1) +for more details. + +.IP internal: 3 +internal tool issue detected. Please report this bug along with a testcase. +.PP + +.IP mismatch: 3 +Incorrect information found in coverage data and/or source code - for example, +the source code contains overlapping exclusion directives. +.PP + +.IP missing: 3 +File does not exist or is not readable. +.PP + +.IP negative: 3 +negative 'hit' count found. + +Note that negative counts may be caused by a known GCC bug - see + + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68080 + +and try compiling with "-fprofile-update=atomic". You will need to recompile, re-run your tests, and re-capture coverage data. +.PP + +.IP package: 3 +a required perl package is not installed on your system. In some cases, it is possible to ignore this message and continue - however, certain features will be disabled in that case. +.PP + +.IP parallel: 3 +various types of errors related to parallelism - +.I i.e., +a child process died due to an error. The corresponding error message appears in the log file immediately before the +.I parallel +error. + +If you see an error related to parallel execution that seems invalid, it may be a good idea to remove the \-\-parallel flag and try again. If removing the flag leads to a different result, please report the issue (along with a testcase) so that the tool can be fixed. +.PP + +.IP parent: 3 +the parent process exited while child was active during +.I \-\-parallel +execution. This happens when the parent has encountered a fatal error - +.I e.g. +an error in some other child which was not ignored. This child cannot continue working without its parent - and so will exit. + +.PP + +.IP path: 3 +some file paths were not resolved - e.g., .gcno file corresponding to +some .gcda was not found see +.I \-\-build\-directory +option for additional information. + +.PP + +.IP range: 3 +Coverage data refers to a line number which is larger than the number of +lines in the source file. This can be caused by a version mismatch or +by an issue in the +.I gcov +data. +.PP + +.IP source: 3 +the source code file for a data set could not be found. +.PP + +.IP unreachable: 3 +a coverpoint (line, branch, function, or MC/DC) within an "unreachable" region is executed (hit); either the code, directive placement, or both are wrong. +If the error is ignored, the offending coverpoint is retained (not excluded) or not, depending on the value of the +.I retain_unreachable_coverpoints_if_executed +configuration parameter. +See man +.B lcovrc(5) +and the +.I "Exclusion markers" +section, above. +.PP + +.IP unsupported: 3 +the requested feature is not supported for this tool configuration. For example, function begin/end line range exclusions use some GCOV features that are not available in older GCC releases. +.PP + +.IP unused: 3 +the include/exclude/erase/omit/substitute pattern did not match any file pathnames. +.PP + +.IP usage: 3 +unsupported usage detected - e.g. an unsupported option combination. +.PP + +.IP utility: 3 +a tool called during processing returned an error code (e.g., 'find' encountered an unreadable directory). +.PP + +.IP version: 3 +revision control IDs of the file which we are trying to merge are not the same - line numbering and other information may be incorrect. +.PP + +Also see the +.I \-\-ignore\-errors +section in man +.B genhtml(1). +The description there may be more complete and/or more fully explained. + +See man +.B lcovrc(5) + for a discussion of the 'max_message_count' parameter which can be used to control the number of warnings which are emitted before all subsequent messages are suppressed. This can be used to reduce log file volume. + +.RE + +.BI "\-\-expect\-message\-count message_type:expr[,message_type:expr]" +.RS +Give +.B geninfo +a constraint on the number of messages of one or more types which are expected to +be produced during execution. If the constraint is not true, then generate an +error of type +.I "count" +(see above). + +See man +.B genhtml(1) +for more details about the flag, as well as the +.I "expect_message_count" +section in man +.B lcovrc(5) +for a description of the equivalent configuration file option. +.RE + +.BI "\-\-keep\-going " +.RS +Do not stop if error occurs: attempt to generate a result, however flawed. + +This command line option corresponds to the +.I stop_on_error [0|1] +lcovrc option. See man +.B lcovrc(5) for more details. + +.RE + +.BI "\-\-fail-under-lines " +.I percentage +.br +.RS +Use this option to tell geninfo to exit with a status of 1 if the total +line coverage is less than +.I percentage. +See +.B man lcov(1) +for more details. +.RE + +.BI "\-\-preserve " +.RS +Preserve intermediate data files (e.g., for debugging). + +By default, intermediate files are deleted. + +.RE +.BI "\-\-filter " +.I filters +.RS +Specify a list of coverpoint filters to apply to input data. +See the genhtml man page for details. + +.RE +.BI "\-\-demangle\-cpp " [param] +.RS +Demangle C++ method and function names in captured output. +See the genhtml man page for details. + +.RE +.B \-i +.br +.B \-\-initial +.RS +Capture initial zero coverage data. + +Run geninfo with this option on the directories containing .bb, .bbg or .gcno +files before running any test case. The result is a "baseline" coverage data +file that contains zero coverage for every instrumented line and function. +Combine this data file (using lcov \-a) with coverage data files captured +after a test run to ensure that the percentage of total lines covered is +correct even when not all object code files were loaded during the test. +Also see the +.I \-\-all +flag, below. + +Note: the +.B \-\-initial +option is not supported for gcc versions less than 6, and does not generate branch coverage information for gcc versions less than 8. +.RE + +.B \-\-all +.RS +Capture coverage data from both compile time (.gcno) data files which do not have corresponding runtime (.gcda) data files, as well as from those that +.I do +have corresponding runtime data. +There will be no runtime data unless some executable which links the corresponding object file has run to completion. + +Note that the execution count of coverpoints found only in files which do not have any runtime data will be zero. + +This flag is ignored if the +.I \-\-initial +flag is set. + +Using the +.B \-\-all +flag is equivalent to executing both +.I geninfo --initial ... +and +.I geninfo ... +and merging the result. + +Also see the +.I geninfo_capture_all +entry in +.B man(5) lcovrc. + +.RE + + +.B \-\-no\-markers +.br +.RS +Unless the +.I \-\-no\-markers +option is used, +.BR geninfo +will apply both +.I region +and +.I branch_region +filters to the captured coverae data. +Use this option if you want to get coverage data without regard to exclusion +markers in the source code file. + +If any +.I \-\-filter +options are applied, then the default region filters are not used. + +.I \-\-no\-markers should not be specified along with +.I \-\-filter. +.RE + +.B \-\-no\-recursion +.br +.RS +Use this option if you want to get coverage data for the specified directory +only without processing subdirectories. +.RE + +.BI "\-o " output\-filename +.br +.BI "\-\-output\-filename " output\-filename .RS Write all data to .IR output\-filename . If you want to have all data written to a single file (for easier handling), use this option to specify the respective filename. By default, -one tracefile will be created for each processed .da file. +one tracefile will be created for each processed .gcda file. +.RE + +.RE +.B \-\-context\-script +.I script +.br +.RS + +Use +.I script +to collect additional tool execution context information - to aid in +infrastructure debugging and/or tracking. + +See the genhtml man page for more details on the context script. + +.br + +.RE +.B \-\-criteria\-script +.I script +.br +.RS + +Use +.I script +to test for coverage acceptance criteria. + +See the genhtml man page for more details on the criteria script. +Note that geninfo does not keep track of date and owner information (see the +.I \-\-annotate\-script +entry in the genhtml man page) - so this information is not passed to the geninfo callback. + +.br + +.RE +.B \-\-resolve\-script +.I script +.br +.RS +Use +.I script +to find the file path for some source or GCNO file which appears in +an input data file if the file is not found after applying +.I \-\-substitute +patterns and searching the +.I \-\-source\-directory +or +.I \-\-build\-directory +list. + +This option is equivalent to the +.B resolve_script +config file option. +.br +In addition, the +.I geninfo_follow_path_links +config file option can be used to resolve source paths to their actual +target. + +See man +.B lcovrc(5) +for details. +.RE + .RE +.B \-\-version\-script +.I script +.br +.RS +Use +.I script +to get a source file's version ID from revision control when +extracting data. The ID is used for error checking when merging .info files. +.br + +See the genhtml man page for more details on the version script. + + +.B \-v +.br +.B \-\-verbose +.RS +Increment informational message verbosity. This is mainly used for script and/or flow debugging - e.g., to figure out which data file are found, where. +Also see the +.B \-\-quiet +flag. +Messages are sent to stdout unless there is no output file (i.e., if the coverage data is written to stdout rather than to a file) and to stderr otherwise. + +.RE .B \-q .br .B \-\-quiet .RS -Do not print progress messages. +Decrement informational message verbosity. + +Decreased verbosity will suppress 'progress' messages for example - while error and warning messages will continue to be printed. + +.RE +.B \-\-debug +.RS +Increment 'debug messages' verbosity. This is useful primarily to developers who want to enhance the lcov tool suite. + +.RE +.B \-\-comment comment_string +.RS +Append +.I comment_string +to list of comments emitted into output result file. +This option may be specified multiple times. +Comments are printed at the top of the file, in the order they were specified. + +Comments can be useful to document the conditions under which the trace file was +generated: host, date, environment, +.I etc. + + +.RE + +.BI "\-\-parallel " +.I [ integer ] +.br +.BI "\-j " +.I [ integer ] +.RS +Specify parallelism to use during processing (maximum number of forked child processes). If the optional integer parallelism parameter is zero or is missing, then use to use up the number of cores on the machine. Default is to use a single process (no parallelism). + +The +.I \-\large\-file +option described below may be necessary to enable parallelism to succeed +in the presence of data files which consume excessive memory in +.B gcov. + + +Also see the +.I memory, memory_percentage, max_fork_fails, fork_fail_timeout, geninfo_chunk_size +and +.I geninfo_interval_update +entries in man +.B lcovrc(5) +for a description of some options which may aid in parameter tuning and performance optimization. + +.RE +.BI "\-\-large\-file " +.I regexp +.RS + +GCDA files whose name matches a +.I \-\-large\-file +regexp are processed serially - not in parallel with other files - so that +their +.B gcov +process can use all available system memory. +.br +Use this option is you see errors related to memory allocation from gcov. +.br +This feature is exactly as if you had moved the matching GCDA files to another location and processed them serially, then processed remaining GDCA files in parallel and merged the results. + +This option may be used multiple times to specify more than one regexp. + +.RE +.BI "\-\-memory " +.I integer +.RS +Specify the maximum amount of memory to use during parallel processing, in Mb. Effectively, the process will not fork() if this limit would be exceeded. Default is 0 (zero) - which means that there is no limit. + +This option may be useful if the compute farm environment imposes strict limits on resource utilization such that the job will be killed if it tries to use too many parallel children - but the user does now know a priori what the permissible maximum is. This option enables the tool to use maximum parallelism - up to the limit imposed by the memory restriction. + +The configuration file +.I memory_percentage +option provided another way to set the maximum memory consumption. +See man +.B lcovrc(5) +for details. -Suppresses all informational progress output. When this switch is enabled, -only error or warning messages are printed. .RE .B \-\-rc @@ -478,7 +1444,7 @@ Use this option to specify a statement which overrides the corresponding configuration statement in the lcovrc configuration file. You can specify this option more than once to override multiple configuration statements. -See +See man .BR lcovrc (5) for a list of available keywords and their meaning. .RE @@ -487,7 +1453,7 @@ for a list of available keywords and their meaning. .br .BI "\-\-test\-name " testname .RS -Use test case name +Use test case name .I testname for resulting data. Valid test case names can consist of letters, decimal digits and the underscore character ('_'). @@ -497,73 +1463,292 @@ simply concatenating the respective tracefiles) in which case a test name can be used to differentiate between data from each test case. .RE -.B \-v -.br .B \-\-version .RS Print version number, then exit. .RE - -.SH FILES - -.I /etc/lcovrc +.B \-\-tempdir +.I dirname +.br .RS -The system\-wide configuration file. +Write temporary and intermediate data to indicated directory. Default is "/tmp". .RE -.I ~/.lcovrc -.RS -The per\-user configuration file. -.RE + +.SH "TRACEFILE FORMAT" Following is a quick description of the tracefile format as used by .BR genhtml ", " geninfo " and " lcov . A tracefile is made up of several human\-readable lines of text, -divided into sections. If available, a tracefile begins with the +divided into sections. If the +.BI "--\-comment\ comment_string" +option is supplied, then + +.RS + #comment_string +.RE +will appear at the top of the tracefile. There is no space before or after the +.I # +character. + + +If available, a tracefile begins with the .I testname which is stored in the following format: - TN: +.RS +TN: +.RE -For each source file referenced in the .da file, there is a section containing +For each source file referenced in the .gcda file, there is a section containing filename and coverage data: - SF: +.RS +SF: +.RE + +An optional source code version ID follows: +.br + +.RS +VER: +.RE + +If present, the version ID is compared before file entries are merged (see +.B "lcov \-\-add\-tracefile" +), and before the 'source detail' view is generated by genhtml. +See the +.BI "\-\-version\-script " callback_script +documentation and the sample usage in the lcov regression test examples. + +Function coverage data follows. +Note that the format of the function coverage data has changed from LCOV 2.2 onward. +The tool continues to be able to read the old format but now writes only the +new format. +This change was made so that +.B function +filter outcome is persistent in the generated tracefile. + +Functions and their aliases are recorded contiguously: + +First, the leader: + +.RS +FNL:,[,line number of function end>] +.RE + +Then the aliases of the function; there will be at least one alias. All aliases of a particular function share the same index. + +.RS +FNA:,, +.RE + + +The now-obsolete function data format is: + +.RS -Following is a list of line numbers for each function name found in the -source file: +.RS +FN:,[,] +.RE - FN:, +The 'end' line number is optional, and is generated only if the compiler/toolchain +version is recent enough to generate the data (e.g., gcc 9 or newer). +This data is used to support the +.B \-\-erase\-functions +and +.B \-\-show\-proportions +options. If the function end line data is not available, then these features will not work. Next, there is a list of execution counts for each instrumented function: - FNDA:, +.RS +FNDA:, +.RE +.RE This list is followed by two lines containing the number of functions found and hit: - FNF: - FNH: +.RS +FNF: +.br +FNH: +.RE + +Note that, as of LCOV 2.2, these numbers count function groups - not the individual aliases. -Branch coverage information is stored which one line per branch: +Branch coverage information is stored one line per branch: - BRDA:,,, +.RS +BRDA:,[],, +.RE -Block number and branch number are gcc internal IDs for the branch. Taken is -either '-' if the basic block containing the branch was never executed or +.I +is the line number where the branch is found - and is expected to be a non-zero integer. +.br +.I +and +.I +serve to uniquely define a particular edge in the expression tree of a particular conditional found on the associated line. +.br +Within a particular line, +.I +is an integer numbered from zero with no gaps. For some languages and some coding styles, there will only be one block (index value zero) on any particular line. +.br +.I +is a string which serves to uniquely identify a particular edge. For some languages and tools - e.g., C/C++ code compiled with gcc or llvm - +.I +is an ordered integer index related to expression tree traversal order of the associated conditional. For others, it may be a meaningful string - see below. +.I +appears in the 'tooltip' popup of the associated branch in the +.B genhtml +output - so human-readable values are helpful to users who are trying to understand coverage results - for example, in order to develop additional regression tests, to improve coverage. +.br +.I +is either '-' if the corresponding expression was never evaluated (e.g., the basic block containing the branch was never executed) or a number indicating how often that branch was taken. +.br +.I +is 'e' (single character) if this is a branch related to exception handling - and is not present if the branch is not related to exceptions. +Exception branch identification requires compiler support; note that gcc versions older than 9 do not differentiate exception branches. Geninfo will be able to identify exception branches only if your toolchain version is new enough to support the feature. + +The following are example branch records whose +.I +expression values are human-readable strings. + +.RS + BRDA:10,0,enable,1 +.br + BRDA:10,0,!enable,0 +.RE +In this case, the corresponding code from line 10 is very likely similar to: +.br +.RS + if (enable) { +.br + ... +.br + } +.br +.RE +such that the associated testcase entered the block ('enable' evaluated to 'true'). + +Arbitrarily complicated branch expressions are supported - including branch expressions which contain commas ( +.I e.g., + in an expression containing a function call). + +Note that particular tools may or may not suppress expressions which are statically true or statically false - +.I e.g., +expressions using template parameters. +This makes it potentially complicated to compare coverage data generated by two different tools. + Branch coverage summaries are stored in two lines: - BRF: - BRH: +.RS +BRF: +.br +BRH: +.RE + +MC/DC information is stored one line per expression: + +.RS +MCDC:,,,,, +.RE +where: + +.I +is the line number where the condition is found - and is expected to be a non-zero integer. +.br +.I +and +.I +serve to uniquely define a particular element in the expression tree of a particular conditional found on the associated line. +.br +Within a particular line and group, +.I +is an integer numbered from zero to +.I - 1 +with no gaps. For some languages and some coding styles, there will only be one group on any particular line. + +.I +is either +.I "f" +or +.I "t", +indicating whether the condition is sensitive to the indicated change - that is, does the condition outcome change if the corresponding changes from 'false' to 'true' or from 'tru' to 'false, respectively. + + +.I +is a count - 0 (zero) if the expression was not senstized, non-zero if it was senstized. +Note that tome tools may treat +.I +as the number of times that the expression was sensitized where others may treat it +as a boolean - 1:sensitized or 0: not sensitized. + +.I +is an arbitrary string, intended to be a meaningful string which will help the user to understand the condition context - see below. +.I +appears in the 'tooltip' popup of the associated MC/DC condition in the +.B genhtml +output - so human-readable values are helpful to users who are trying to understand coverage results - for example, in order to develop additional regression tests, to improve coverage. +.br +For a given and , the should be identical for both +"t" and "f" senses. + + +The following are example MC/DC records whose +.I +values are human-readable strings. + +.RS + MCDC:10,2,f,0,0,enable +.br + MCDC:10,2,t,1,0,enable +.br + ... +.RE +In this case, the corresponding code from line 10 is very likely similar to: +.br +.RS + if (enable ...) { +.br + ... +.br + } +.br +.RE +such that the associated testcase was sensitive to a change of 'enable' from true to false (but not the converse). + +Arbitrarily complicated expressions are supported - including expressions which contain commas ( +.I e.g., + in an expression containing a function call). + +Note that particular tools may or may not suppress expressions which are statically true or statically false - +.I e.g., +expressions using template parameters. +This makes it potentially complicated to compare coverage data generated by two different tools. + + +MCDC coverage summaries are stored in two lines: + +.RS +MRF: +.br +MRH: +.RE + Then there is a list of execution counts for each instrumented line (i.e. a line which resulted in executable code): - DA:,[,] +.RS +DA:,[,] +.RE Note that there may be an optional checksum present for each instrumented line. The current @@ -573,29 +1758,69 @@ implementation uses an MD5 hash as checksumming algorithm. At the end of a section, there is a summary about how many lines were found and how many were actually instrumented: - LH: - LF: +.RS +LH: +.br +LF: +.RE Each sections ends with: - end_of_record +.RS +end_of_record +.RE In addition to the main source code file there are sections for all #included files which also contain executable code. Note that the absolute path of a source file is generated by interpreting -the contents of the respective .bb file (see +the contents of the respective .gcno file (see .BR "gcov " (1) for more information on this file type). Relative filenames are prefixed -with the directory in which the .bb file is found. +with the directory in which the .gcno file is found. -Note also that symbolic links to the .bb file will be resolved so that the +Note also that symbolic links to the .gcno file will be resolved so that the actual file path is used instead of the path to a link. This approach is necessary for the mechanism to work with the /proc/gcov files. + +.SH FILES + +.I /etc/lcovrc +.RS +The system\-wide configuration file. +.RE + +.I ~/.lcovrc +.RS +The per\-user configuration file. +.RE + +.I \*[scriptdir]/getp4version +.RS +Sample script for use with +.B --version-script +that obtains version IDs via Perforce. +.br +.RE + +.I \*[scriptdir]/get_signature +.RS +Sample script for use with +.B --version-script +that uses md5hash as version IDs. +.br +.RE + + .SH AUTHOR Peter Oberparleiter +Henry Cox +.RS +Filtering, error management, parallel execution sections. +.RE + .SH SEE ALSO .BR lcov (1), .BR lcovrc (5), @@ -603,3 +1828,7 @@ Peter Oberparleiter .BR genpng (1), .BR gendesc (1), .BR gcov (1) +.br + +.I \*[lcovurl] +.br diff --git a/man/genpng.1 b/man/genpng.1 index 51f2958b..b5f2ab92 100644 --- a/man/genpng.1 +++ b/man/genpng.1 @@ -1,4 +1,7 @@ -.TH genpng 1 "LCOV 1.15" 2019\-02\-28 "User Manuals" +\" Define project URL +.ds lcovurl https://github.com/linux\-test\-project/lcov + +.TH genpng 1 "LCOV 2.0" 2023\-05\-12 "User Manuals" .SH NAME genpng \- Generate an overview image from a source file .SH SYNOPSIS @@ -11,6 +14,7 @@ genpng \- Generate an overview image from a source file .IR tabsize ] .RB [ \-w | \-\-width .IR width ] +.RB [ \-d | \-\-dark\-mode ] .br .RB [ \-o | \-\-output\-filename .IR output\-filename ] @@ -52,7 +56,7 @@ Print version number, then exit. .br .BI "\-\-tab\-size " tab\-size .RS -Use +Use .I tab\-size spaces in place of tab. @@ -66,7 +70,7 @@ by the number of spaces defined by .br .BI "\-\-width " width .RS -Set width of output image to +Set width of output image to .I width pixel. @@ -79,6 +83,12 @@ Note that source code lines which are longer than will be truncated. .RE +.B \-d +.br +.B \-\-dark\-mode +.RS +Use a light-display-on-dark-background color scheme rather than the default dark-display-on-light-background. +.RE .BI "\-o " filename .br @@ -87,7 +97,7 @@ will be truncated. Write image to .IR filename . -Specify a name for the resulting image file (default is +Specify a name for the resulting image file (default is .IR source\-file .png). .RE .SH AUTHOR @@ -99,3 +109,7 @@ Peter Oberparleiter .BR geninfo (1), .BR gendesc (1), .BR gcov (1) +.br + +.I \*[lcovurl] +.br diff --git a/man/lcov.1 b/man/lcov.1 index 0a5ea1e2..6838d0a5 100644 --- a/man/lcov.1 +++ b/man/lcov.1 @@ -1,10 +1,24 @@ -.TH lcov 1 "LCOV 1.15" 2020\-07\-31 "User Manuals" +\" Define project URL +.ds lcovurl https://github.com/linux\-test\-project/lcov + +.TH lcov 1 "LCOV 2.0" 2023\-05\-17 "User Manuals" .SH NAME lcov \- a graphical GCOV front\-end .SH SYNOPSIS + +Capture coverage data tracefile (from compiler-generated data). +.br +The lcov tracefile +.I (".info" file) +format is described in man +.B geninfo(1). + +.br + +.RS 3 .B lcov .BR \-c | \-\-capture -.RS 5 +.RS 4 .br .RB [ \-d | \-\-directory .IR directory ] @@ -18,167 +32,318 @@ lcov \- a graphical GCOV front\-end .br .RB [ \-b | \-\-base\-directory .IR directory ] +.br +.RB [ \-\-build\-directory +.IR directory ] +.br +.RB [ \-\-source\-directory +.IR directory ] +.br .RB [ \-i | \-\-initial ] +.br +.RB [ \-\-all ] ] +.br .RB [ \-\-gcov\-tool .IR tool ] .br +.RB [ \-\-branch\-coverage ] +.br +.RB [ \-\-mcdc\-coverage ] +.br +.RB [ \-\-demangle\-cpp +.IR [ param ] ] +.br .RB [ \-\-checksum ] .RB [ \-\-no\-checksum ] .RB [ \-\-no\-recursion ] .RB [ \-f | \-\-follow ] .br +.RB [ \-\-sort\-input ] +.br .RB [ \-\-compat\-libtool ] .RB [ \-\-no\-compat\-libtool ] +.br +.RB [ \-\-msg\-log +.IR [ log_file_name ] ] +.br .RB [ \-\-ignore\-errors .IR errors ] .br +.RB [\-\-expect\-message\-count +.IR message_type=expr[,message_type=expr..]] +.br +.RB [ \-\-preserve ] .RB [ \-\-to\-package .IR package ] .RB [ \-\-from\-package .IR package ] -.RB [ \-q | \-\-quiet ] -.br .RB [ \-\-no\-markers ] .RB [ \-\-external ] .RB [ \-\-no\-external ] .br -.RB [ \-\-config\-file -.IR config\-file ] -.RB [ \-\-rc -.IR keyword = value ] -.br .RB [ \-\-compat .IR mode =on|off|auto] .br -.RB [ \-\-include -.IR pattern ] -.RB [ \-\-exclude -.IR pattern ] +.RB [ \-\-context\-script +.IR script_file ] +.br +.RB [ \-\-criteria\-script +.IR script_file ] +.br +.RB [ \-\-resolve-\-script +.IR script_file ] +.br +.RB [ \-\-version\-script +.IR script_file ] .br +.RB [ \-\-comment +.IR comment_string ] +.br +.RB [ \-\-large\-file +.IR regexp ] +.br +.RE .RE +Generate tracefile (from compiler-generated data) with all counter values set to zero: +.br + +.RS 3 .B lcov .BR \-z | \-\-zerocounters -.RS 5 +.RS 4 .br .RB [ \-d | \-\-directory .IR directory ] .RB [ \-\-no\-recursion ] .RB [ \-f | \-\-follow ] .br -.RB [ \-q | \-\-quiet ] -.br .RE +.RE + +Show coverage counts recorded in previously generated tracefile: +.br +.RS 3 .B lcov .BR \-l | \-\-list .I tracefile -.RS 5 +.RS 4 .br -.RB [ \-q | \-\-quiet ] .RB [ \-\-list\-full\-path ] .RB [ \-\-no\-list\-full\-path ] .br -.RB [ \-\-config\-file -.IR config\-file ] -.RB [ \-\-rc -.IR keyword = value ] -.br .RE +.RE + +Aggregate multiple coverage tracefiles into one: +.br +.RS 3 .B lcov .BR \-a | \-\-add\-tracefile -.I tracefile -.RS 5 +.I tracefile_pattern +.RS 4 .br .RB [ \-o | \-\-output\-file .IR tracefile ] +.br +.RB [ \-\-prune\-tests ] +.br +.RB [ \-\-forget\-test\-names ] +.br +.RB [ \-\-map\-functions ] +.br +.RB [ \-\-branch\-coverage ] +.br +.RB [ \-\-mcdc\-coverage ] +.br .RB [ \-\-checksum ] .RB [ \-\-no\-checksum ] .br -.RB [ \-q | \-\-quiet ] -.RB [ \-\-config\-file -.IR config\-file ] -.RB [ \-\-rc -.IR keyword = value ] +.RB [ \-\-sort\-input ] .br .RE +Depending on your use model, it may not be necessary to create aggregate coverage data files. +For example, if your regression tests are split into multiple suites, you may want to keep separate suite data and to compare both per-suite and aggregate results over time. +.B genhtml +allows you specify tracefiles via one or more glob patterns - which enables you +generate aggregate reports without explicitly generating aggregated trace files. +See the +.B genhtml +man page. +.RE + + +Generate new tracefile from existing tracefile, keeping only data from files matching pattern: +.br + +.RS 3 .B lcov .BR \-e | \-\-extract .I tracefile pattern -.RS 5 +.RS 4 .br .RB [ \-o | \-\-output\-file .IR tracefile ] .RB [ \-\-checksum ] .RB [ \-\-no\-checksum ] .br -.RB [ \-q | \-\-quiet ] -.RB [ \-\-config\-file -.IR config\-file ] -.RB [ \-\-rc -.IR keyword = value ] -.br +.RE .RE +Generate new tracefile from existing tracefile, removing data from files matching pattern: +.br + +.RS 3 .B lcov .BR \-r | \-\-remove .I tracefile pattern -.RS 5 +.RS 4 .br .RB [ \-o | \-\-output\-file .IR tracefile ] .RB [ \-\-checksum ] .RB [ \-\-no\-checksum ] .br -.RB [ \-q | \-\-quiet ] -.RB [ \-\-config\-file -.IR config\-file ] -.RB [ \-\-rc -.IR keyword = value ] -.br +.RE .RE +Generate new tracefile from existing tracefiles by performing set operations on coverage data: +.br + +.RS 3 .B lcov -.BR \-\-diff -.IR "tracefile diff" -.RS 5 +.BR \-\-intersect +.I rh_glob_pattern +.RS 4 .br .RB [ \-o | \-\-output\-file .IR tracefile ] -.RB [ \-\-checksum ] -.RB [ \-\-no\-checksum ] .br -.RB [ \-\-convert\-filenames ] -.RB [ \-\-strip -.IR depth ] -.RB [ \-\-path -.IR path ] -.RB [ \-q | \-\-quiet ] +lh_glob_pattern + + +The output will reflect +.RS 2 +.I (union of files matching lh_glob_patterns) +.I intersect +.I (union of files matching rh_glob_patterns) +.RE +such that coverpoints found in both sets are merged (summed) whereas coverpoints found in only one set are dropped. +Note that branch blocks are defined to be the same if and only if their block ID and the associated branch expressions list are identical. +Functions are defined to be the same if their name and location are identical. +.RE +.RE + +.RS 3 +.B lcov +.BR \-\-subtract +.I rh_glob_pattern +.RS 4 .br -.RB [ \-\-config\-file -.IR config\-file ] -.RB [ \-\-rc -.IR keyword = value ] +.RB [ \-o | \-\-output\-file +.IR tracefile ] .br +lh_glob_pattern + +The output will reflect +.RS 2 +.I (union of files matching lh_glob_patterns) +.I subtract +.I (union of files matching rh_glob_patterns) +.RE +such that coverpoints found only in the set on the left will be retained and all others are dropped. .RE +.RE + + + +Summarize tracefile content: +.br + +.RS 3 .B lcov .BR \-\-summary .I tracefile -.RS 5 -.br -.RB [ \-q | \-\-quiet ] -.br .RE +Print version or help message and exit: +.br + +.RS 3 .B lcov .RB [ \-h | \-\-help ] -.RB [ \-v | \-\-version ] +.RB [ \-\-version ] +.RE + +Common lcov options - supported by all the above use cases: +.br + +.RS 3 +.B lcov +.RB [ \-\-keep\-going ] +.br .RS 5 +.RB [ \-\-filter +.IR type ] +.br +.br +.RB [ \-q | \-\-quiet ] +.br +.RB [ \-v | \-\-verbose ] +.br +.RB [ \-\-comment +.IR comment_string ] +.br +.RB [ \-\-debug ] +.br +.RB [ \-\-parallel | -j +.IR [integer] ] +.br +.RB [ \-\-memory +.IR integer_num_Mb ] +.br +.RB [ \-\-tempdir +.IR dirname ] +.br +.RB [ \-\-branch\-coverage ] +.br +.RB [ \-\-mcdc\-coverage ] +.br +.RB [ \-\-config\-file +.IR config\-file ] +.RB [ \-\-rc +.IR keyword = value ] +.br +.RB [ \-\-profile +.IR [ profile\-file ] ] +.br +.RB [ \-\-include +.IR glob_pattern ] +.br +.RB [ \-\-exclude +.IR glob_pattern ] +.br +.RB [ \-\-erase\-functions +.IR regexp_pattern ] +.br +.RB [ \-\-substitute +.IR regexp_pattern ] +.br +.RB [ \-\-omit\-lines +.IR regexp_pattern ] +.br +.RB [ \-\-fail\-under\-branches +.IR percentage ] +.br +.RB [ \-\-fail\-under\-lines +.IR percentage ] .br .RE +.RE .SH DESCRIPTION .B lcov @@ -198,15 +363,14 @@ you have to complete the following preparation steps: For Linux kernel coverage: .RS Follow the setup instructions for the gcov\-kernel infrastructure: -.I http://ltp.sourceforge.net/coverage/gcov.php +.I https://docs.kernel.org/dev-tools/gcov.html .br - - .RE + For user space application coverage: -.RS +.RS 3 Compile the application with GCC using the options -"\-fprofile\-arcs" and "\-ftest\-coverage". +"\-fprofile\-arcs" and "\-ftest\-coverage" or "\-\-coverage". .RE Please note that this man page refers to the output format of @@ -221,18 +385,36 @@ non-boundary value. This behavior is in accordance with that of the .BR gcov (1) tool. +By default, +.B lcov +and related tools generate and collect line and function coverage data. +Branch data is not collected or displayed by default; all tools support the +.B\ \--branch\-coverage +and +.B \-\-mdcd\-coverage +options to enable branch and MC/DC coverage, respectively - or you can permanently enable branch coverage by adding the appropriate +settings to your personal, group, or site lcov configuration file. See man +.B lcovrc(5) +for details. + .SH OPTIONS +In general, (almost) all +.B lcov +options can also be specified in a configuration file - see man +.B lcovrc(5) +for details. + .B \-a -.I tracefile +.I tracefile_pattern .br .B \-\-add\-tracefile -.I tracefile +.I tracefile_pattern .br .RS -Add contents of -.IR tracefile . +Add contents of all files matching glob pattern +.IR tracefile_pattern. Specify several tracefiles using the \-a switch to combine the coverage data contained in these files by adding up execution counts for matching test and @@ -241,7 +423,7 @@ filename combinations. The result of the add operation will be written to stdout or the tracefile specified with \-o. -Only one of \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be +Only one of \-z, \-c, \-a, \-e, \-r, \-l or \-\-summary may be specified at a time. .RE @@ -273,16 +455,53 @@ directory in which the source code file is located. Note that this option will not work in environments where multiple base directories are used. In that case use configuration file setting .B geninfo_auto_base=1 -(see -.BR lcovrc (5)). +(see man +.BR lcovrc (5) +). + +.RE + +.B \-\-build\-directory +.I build_directory +.br +.RS +search for .gcno data files from build_directory rather than +adjacent to the corresponding .gcda file. + +See man +.BR geninfo (1)) +for details. + + .RE +.BI "\-\-source\-directory " dirname +.RS +Add 'dirname' to the list of places to look for source files. +.br + +For relative source file paths listed in +.I e.g. +paths found in +.IR tracefile, +or found in gcov output during +.I \-\-capture +\- possibly after substitutions have been applied - +.B lcov + will first look for the path from 'cwd' (where genhtml was +invoked) and +then from each alternate directory name in the order specified. +The first location matching location is used. +This option can be specified multiple times, to add more directories to the source search path. + + +.RE .B \-c .br .B \-\-capture .br .RS -Capture coverage data. +Capture runtime coverage data. By default captures the current kernel execution counts and writes the resulting coverage data to the standard output. Use the \-\-directory @@ -291,16 +510,58 @@ option to capture counts for a user space program. The result of the capture operation will be written to stdout or the tracefile specified with \-o. +When combined with the +.BR \-\-all +flag, both runtime and compile-time coverage will be extracted in one step. +See the description of the +.BR \-\-initial +flag, below. + +See man +.BR geninfo (1)) +for more details about the capture process and available options and parameters. + + Only one of \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be specified at a time. .RE +.B \-\-branch\-coverage +.RS +.br +Collect and/or retain branch coverage data. + +This is equivalent to using the option "\-\-rc branch_coverage=1"; the option was added to better match the genhml interface. + +.RE + +.B \-\-mcdc\-coverage +.RS +.br +Collect retain MC/DC data. + +This is equivalent to using the option "\-\-rc mcdc_coverage=1". +MC/DC coverage is supported for GCC versions 14.2 and higher, or +LLVM 18.1 and higher. +.br +See +.I llvm2lcov \-\-help +for details on MC/DC data capture in LLVM. + +.br +See the MC/DC section of man +.B genhtml(1) +for more details + +.RE + .B \-\-checksum .br .B \-\-no\-checksum .br .RS -Specify whether to generate checksum data when writing tracefiles. +Specify whether to generate checksum data when writing tracefiles and/or to +verify matching checksums when combining trace files. Use \-\-checksum to enable checksum generation or \-\-no\-checksum to disable it. Checksum generation is @@ -314,6 +575,10 @@ code versions. If you don't work with different source code versions, disable this option to speed up coverage data processing and to reduce the size of tracefiles. + +Note that this options is somewhat subsumed by the +.B \-\-version\-script +option - which does something similar, but at the 'whole file' level. .RE .B \-\-compat @@ -408,6 +673,12 @@ libtool, disable this option to prevent problems when capturing coverage data. .br .RS Specify a configuration file to use. +See man +.B lcovrc(5) +for details of the file format and options. Also see the +.I config_file +entry in the same man page for details on how to include one config file into +another. When this option is specified, neither the system\-wide configuration file /etc/lcovrc, nor the per\-user configuration file ~/.lcovrc is read. @@ -416,48 +687,25 @@ This option may be useful when there is a need to run several instances of .B lcov with different configuration file options in parallel. -.RE -.B \-\-convert\-filenames -.br -.RS -Convert filenames when applying diff. +Note that this option must be specified in full - abbreviations are not supported. -Use this option together with \-\-diff to rename the file names of processed -data sets according to the data provided by the diff. .RE -.B \-\-diff -.I tracefile -.I difffile +.B \-\-profile +.I [ profile\-data\-file ] .br .RS -Convert coverage data in -.I tracefile -using source code diff file -.IR difffile . +Tell the tool to keep track of performance and other configuration data. +If the optional +.I profile\-data\-file +is not specified, then the profile data is written to a file named with the same +basename as the +.I \-\-output\-filename, with suffix +.I ".json" +appended. -Use this option if you want to merge coverage data from different source code -levels of a program, e.g. when you have data taken from an older version -and want to combine it with data from a more current version. -.B lcov -will try to map source code lines between those versions and adjust the coverage -data respectively. -.I difffile -needs to be in unified format, i.e. it has to be created using the "\-u" option -of the -.B diff -tool. - -Note that lines which are not present in the old version will not be counted -as instrumented, therefore tracefiles resulting from this operation should -not be interpreted individually but together with other tracefiles taken -from the newer version. Also keep in mind that converted coverage data should -only be used for overview purposes as the process itself introduces a loss -of accuracy. - -The result of the diff operation will be written to stdout or the tracefile -specified with \-o. +.RE Only one of \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be specified at a time. @@ -500,6 +748,9 @@ escaped accordingly to prevent the shell from expanding them first). Note: The pattern must be specified to match the .B absolute path of each source file. +If you specify a pattern which does not seem to be correctly applied - files that you expected to be excluded still appear in the output - you can look for warning messages in the log file. +.B lcov +will emit a warning for every pattern which is not applied at least once. Can be combined with the .B --include @@ -507,6 +758,186 @@ command line switch. If a given file matches both the include pattern and the exclude pattern, the exclude pattern will take precedence. .RE +.B \-\-erase\-functions +.I regexp +.br +.RS +Exclude coverage data from lines which fall within a function whose name matches the supplied regexp. Note that this is a mangled or demangled name, depending on whether the \-\-demangle\-cpp option is used or not. + +Note that this option requires that you use a gcc version which is new enough to support function begin/end line reports or that you configure the tool to derive the required dta - see the +.BI derive_function_end_line +discussion in man +.B lcovrc(5). + +.RE +.B \-\-substitute +.I regexp_pattern +.br +.RS +Apply Perl regexp +.IR regexp_pattern +to source file names found during processing. This is useful, for example, when the path name reported by gcov does not match your source layout and the file is not found, or in more complicated environments where the build directory structure does not match the source code layout or the layout in the projects's revision control system. + +Use this option in situations where geninfo cannot find the correct +path to source code files of a project. By providing a +.I regexp_pattern +in Perl regular expression format (see man +.BR perlre (1) +), you can instruct geninfo to +remove or change parts of the incorrect source path. +Also see the +.B \-\-resolve\-script +option. + +One or more +.I \-\-substitution +patterns and/or a +.I \-\-resolve-script +may be specified. When multiple patterns are specified, they are applied in the order specified, substitution patterns first followed by the resolve callback. +The file search order is: +.RS +.IP 1. 3 +Look for file name (unmodified). +.br +If the file exits: return it. +.PP +.IP 2. 3 +Apply all substitution patterns in order - the result of the first pattern is used as the input of the second pattern, and so forth. +.br +If a file corresponding to the resulting name exists: return it. +.PP +.IP 3. 3 +Apply the 'resolve' callback to the final result of pattern substitutions. +.br +If a file corresponding to the resulting name exists: return it. +.PP +.IP 4. 3 +Otherwise: return original (unmodified) file name. +.br +Depending on context, the unresolved file name may or may not result in an error. +.RE + +Substitutions are used in multiple contexts by lcov/genhtml/geninfo: +.RS +.IP \- 3 +during +.I \-\-capture, +applied to source file names found in gcov-generated coverage data files (see man +.B gcov(1) +). +.PP + +.IP \- 3 +during +.I \-\-capture, +applied to alternate +.I \-\-build\-dir +paths, when looking for the +.I .gcno +(compile time) data file corresponding to some +.I .gcda +(runtime) data file. +.PP + +.IP \- 3 +applied to file names found in lcov data files (".info" files) - +.I e.g., +during lcov data aggregation or HTML and text report generation. +.br +For example, substituted names are used to find source files for +text-based filtering (see the +.I \-\-filter +section, below) and are passed to +.I \-\-version\-script, \-\-annotate\-script, +and +.I \-criteria\-script +callbacks. +.PP + +.IP \- 3 +applied to file names found in the +.I \-\-diff\-file +passed to genhtml. +.PP +.RE + + +.B Example: +.br + +1. When geninfo reports that it cannot find source file +.br + + /path/to/src/.libs/file.c +.br + +while the file is actually located in +.br + + /path/to/src/file.c +.br + +use the following parameter: +.br + + \-\-substitute 's#/.libs##g' + +This will remove all "/.libs" strings from the path. + +2. When geninfo reports that it cannot find source file +.br + + /tmp/build/file.c +.br + +while the file is actually located in +.br + + /usr/src/file.c +.br + +use the following parameter: +.br + + \-\-substitute 's#/tmp/build#/usr/src#g' +.br + +This will change all "/tmp/build" strings in the path to "/usr/src". +.PP + +.RE + +.B \-\-omit\-lines +.I regexp +.br +.RS +Exclude coverage data from lines whose content matches +.IR regexp . + +Use this switch if you want to exclude line and branch coverage data for some particular constructs in your code (e.g., some complicated macro). Multiple patterns can be +specified by using multiple +.B --omit\-lines +command line switches. The +.I regexp +will be interpreted as perl regular expressions (note that they may need to be +escaped accordingly to prevent the shell from expanding them first). +If you want the pattern to explicitly match from the start or end of the line, your regexp should start and/or end with "^" and/or "$". + +Note that the +.B lcovrc +config file setting +.B lcov_excl_line = regexp +is similar to +.B \-\-omit\-lines. +.B \-\-omit\-lines +is useful if there are multiple teams each of which want to exclude certain patterns. +.B \-\-omit\-lines +is additive and can be specified across multiple config files whereas each call to +.B lcov_excl_line +overrides the previous value - and thus teams must coordinate. + +.RE + .B \-\-external .br .B \-\-no\-external @@ -515,13 +946,214 @@ exclude pattern, the exclude pattern will take precedence. Specify whether to capture coverage data for external source files. External source files are files which are not located in one of the directories -specified by \-\-directory or \-\-base\-directory. Use \-\-external to include -external source files while capturing coverage data or \-\-no\-external to -ignore this data. +specified by +.I \-\-directory +or +.I \-\-base\-directory. +Use +.I \-\-external +to include +coverpoints in external source files while capturing coverage data or +.I \-\-no\-external +to exclude them. +If your +.I \-\-directory +or +.I \-\-base\-directory +path contains a soft link, then actual target directory is not considered to be +"internal" unless the +.I \-\-follow +option is used. + +The +.I \-\-no\-external +option is somewhat of a blunt instrument; the +.I \-\-exclude +and +.I \-\-include +options provide finer grained control over which coverage data is and is not +included if your project structure is complex and/or +.I \-\-no\-external +does not do what you want. Data for external source files is .B included by default. + +.RE + +.B \-\-forget\-test\-names +.br +.RS +If non\-zero, ignore testcase names in .info file - +.I i.e., +treat all coverage data as if it came from the same testcase. +This may improve performance and reduce memory consumption if user does +not need per-testcase coverage summary in coverage reports. + +This option can also be configured permanently using the configuration file +option +.IR forget_testcase_names . + +.RE + +.B \-\-prune\-tests +.br +.RS +Determine list of unique tracefiles. + +Use this option to determine a list of unique tracefiles from the list +specified by +.BR \-\-add\-tracefile . +A tracefile is considered to be unique if it is the only tracefile that: + +.RS + +.IP 1. 3 +contains data for a specific source file +.br +.PP +.IP 2. 3 +contains data for a specific test case name +.br +.PP +.IP 3. 3 +contains non-zero coverage data for a specific line, function or branch +.br +.PP + +.RE + +Note that the list of retained files may depend on the order they are processed. For example, if +.I A +and +.I B +contain identical coverage data, then the first one we see will be retained and the second will be pruned. +The file processing order is nondeterministic when the +.BR \-\-parallel +option is used - implying that the pruned result may differ from one execution to the next in this case. + +.BR \-\-prune\-tests must be specified together with +.BR \-\-add\-tracefile . +When specified, +.B lcov +will emit the list of unique files rather than combined tracefile data. +.br + +.RE + +.B \-\-map\-functions +.br +.RS +List tracefiles with non-zero coverage for each function. +.br + +Use this option to determine the list of tracefiles that contain non-zero +coverage data for each function from the list of tracefiles specified by +.BR \-\-add\-tracefile . + +This option must be specified together with +.BR \-\-add\-tracefile . +When specified, +.B lcov +will emit the list of functions and associated tracefiles rather than combined tracefile data. +.br + +.RE +.B \-\-context\-script +.I script +.br +.RS + +Use +.I script +to collect additional tool execution context information - to aid in +infrastructure debugging and/or tracking. + +See the genhtml man page for more details on the context script. + +.br + +.RE +.B \-\-criteria\-script +.I script +.br +.RS + +Use +.I script +to test for coverage acceptance criteria. + +See the genhtml man page for more details on the criteria script. +Note that lcov does not keep track of date and owner information (see the +.I \-\-annotate\-script +entry in the genhtml man page) - so this information is not passed to the lcov callback. + +.br + +.RE +.B \-\-resolve\-script +.I script +.br +.RS +Use +.I script +to find the file path for some source file which appears in +an input data file if the file is not found after applying +.I \-\-substitute +patterns and searching the +.I \-\-source\-directory +list. This option is equivalent to the +.B resolve_script +config file option. See man +.B lcovrc(5) +for details. +.RE + +.RE +.B \-\-version\-script +.I script +.br +.RS + +Use +.I script +to get a source file's version ID from revision control when +extracting data and to compare version IDs for the purpose of error checking when merging .info files. +.br + +See the genhtml man page for more details on the version script. + +.br + +.RE +.B \-\-comment +.I comment_string +.br +.RS + +Append +.I comment_string +to list of comments emitted into output result file. +This option may be specified multiple times. +Comments are printed at the top of the file, in the order they were specified. + +Comments may be useful to document the conditions under which the trace file was +generated: host, date, environment, +.I etc. + +Note that this option has no effect for lcov overations which do not write an +output result file: +.I \-\-list +.I \-\-summary, +.I \-\-prune\-tests, +and +.I \-\-map\-functions. + +See the +.B geninfo +man page for a description of the comment format in the result file. + .RE .B \-e @@ -563,6 +1195,16 @@ specified at a time. Follow links when searching for .da files. .RE +.BI "\-\-large\-file " +.I regexp +.RS +See the +.I \-\-large\-file +section of man +.B geninfo(1) +for details. +.RE + .B \-\-from\-package .I package .br @@ -576,11 +1218,26 @@ want to perform the .info file creation on the build machine. See \-\-to\-package for more information. .RE +.B \-\-sort\-input +.br +.RS +Specify whether to sort file names before capture and/or aggregation. +Sorting reduces certain types of processing order-dependent output differences. +See the +.BI sort_input +section in +man +.B lcovrc(5). + +.RE + .B \-\-gcov\-tool .I tool .br .RS Specify the location of the gcov tool. + +See the geninfo man page for more details. .RE .B \-h @@ -610,6 +1267,19 @@ escaped accordingly to prevent the shell from expanding them first). Note: The pattern must be specified to match the .B absolute path of each source file. +.br + +If you specify a pattern which does not seem to be correctly applied - files that you expected to be included in the output do not appear - lcov will generate an error message of type 'unused'. See the \-\-ignore\-errors option for how to make lcov ignore the error or turn it into a warning. +.RE + +.B \-\-msg\-log +.I [ log_file_name ] +.br +.RS +Specify location to store error and warning messages (in addition to writing to STDERR). +If +.I log_file_name +is not specified, then default location is used. .RE .B \-\-ignore\-errors @@ -620,25 +1290,262 @@ Specify a list of errors after which to continue processing. Use this option to specify a list of one or more classes of errors after which lcov should continue processing instead of aborting. +Note that the tool will generate a warning (rather than a fatal error) unless you ignore the error two (or more) times: +.br +.RS +lcov ... --ignore-errors source,source ... +.RE .I errors can be a comma\-separated list of the following keywords: -.B gcov: +.IP branch: 3 +branch ID (2nd field in the .info file 'BRDA' entry) does not follow expected integer sequence. +.PP + +.IP callback: 3 +Version script error. +.PP + +.IP child: 3 +child process returned non-zero exit code during +.I \-\-parallel +execution. This typically indicates that the child encountered an error: see the log file immediately above this message. +In contrast: the +.B parallel +error indicates an unexpected/unhandled exception in the child process - not a 'typical' lcov error. +.PP + +.IP corrupt: 3 +corrupt/unreadable file found. +.PP + +.IP count: 3 +An excessive number of messages of some class have been reported - subsequent messages of that type will be suppressed. +The limit can be controlled by the 'max_message_count' variable. See man +.B lcovrc(5). +.PP + +.IP deprecated: 3 +You are using a deprecated option. +This option will be removed in an upcoming release - so you should change your +scripts now. +.PP + +.IP empty: 3 +the .info data file is empty (e.g., because all the code was 'removed' or excluded. +.PP + +.IP excessive: 3 +your coverage data contains a suspiciously large 'hit' count which is unlikely +to be correct - possibly indicating a bug in your toolchain. +See the +.I excessive_count_threshold +section in man +.B lcovrc(5) +for details. +.PP + +.IP fork: 3 +Unable to create child process during +.I \-\-parallel +execution. +.br +If the message is ignored ( +.I \-\-ignore\-errors fork +), then genhtml +will wait a brief period and then retry the failed execution. +.br +If you see continued errors, either turn off or reduce parallelism, set a memory limit, or find a larger server to run the task. +.PP + +.IP format: 3 +Unexpected syntax or value found in .info file - for example, negative number or +zero line number encountered. +.PP + +.IP gcov: 3 the gcov tool returned with a non\-zero return code. +.PP + +.IP graph: 3 +the graph file could not be found or is corrupted. +.PP + +.IP inconsistent: 3 +your coverage data is internally inconsistent: it makes two or more mutually +exclusive claims. For example, some expression is marked as both an exception branch and not an exception branch. (See man +.B genhtml(1) +for more details. +.PP + +.IP internal: 3 +internal tool issue detected. Please report this bug along with a testcase. +.PP + +.IP mismatch: 3 +Inconsistent entries found in trace file: + + +.RS 3 +.IP \(bu 3 +branch expression (3rd field in the .info file 'BRDA' entry) of merge data does not match, or +.PP +.IP \(bu 3 +function execution count (FNDA:...) but no function declaration (FN:...). +.PP +.RE +.PP + +.IP missing: 3 +File does not exist or is not readable. +.PP + +.IP negative: 3 +negative 'hit' count found. + +Note that negative counts may be caused by a known GCC bug - see + + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68080 + +and try compiling with "-fprofile-update=atomic". You will need to recompile, re-run your tests, and re-capture coverage data. +.PP + +.IP package: 3 +a required perl package is not installed on your system. In some cases, it is possible to ignore this message and continue - however, certain features will be disabled in that case. +.PP + +.IP parallel: 3 +various types of errors related to parallelism - +.I i.e., +a child process died due to an error. The corresponding error message appears in the log file immediately before the +.I parallel +error. + +If you see an error related to parallel execution that seems invalid, it may be a good idea to remove the \-\-parallel flag and try again. If removing the flag leads to a different result, please report the issue (along with a testcase) so that the tool can be fixed. +.PP -.B source: +.IP parent: 3 +the parent process exited while child was active during +.I \-\-parallel +execution. This happens when the parent has encountered a fatal error - +.I e.g. +an error in some other child which was not ignored. This child cannot continue working without its parent - and so will exit. +.PP + +.IP range: 3 +Coverage data refers to a line number which is larger than the number of +lines in the source file. This can be caused by a version mismatch or +by an issue in the +.I gcov +data. +.PP + +.IP source: 3 the source code file for a data set could not be found. +.PP + +.IP unreachable: 3 +a coverpoint (line, branch, function, or MC/DC) within an "unreachable" region is executed (hit); either the code, directive placement, or both are wrong. +If the error is ignored, the offending coverpoint is retained (not excluded) or not, depending on the value of the +.I retain_unreachable_coverpoints_if_executed +configuration parameter. +See man +.B lcovrc(5) +and the +.I "Exclusion markers" +section of man +.B geninfo(1) +for more information. +.PP + +.IP unsupported: 3 +the requested feature is not supported for this tool configuration. For example, function begin/end line range exclusions use some GCOV features that are not available in older GCC releases. +.PP + +.IP unused: 3 +the include/exclude/erase/omit/substitute pattern did not match any file pathnames. +.PP + +.IP usage: 3 +unsupported usage detected - e.g. an unsupported option combination. + +.PP + +.IP utility: 3 +a tool called during processing returned an error code (e.g., 'find' encountered an unreadable directory). +.PP + +.IP version: 3 +revision control IDs of the file which we are trying to merge are not the same - line numbering and other information may be incorrect. +.PP + +Also see man +.B lcovrc(5) +for a discussion of the 'max_message_count' parameter which can be used to control the number of warnings which are emitted before all subsequent messages are suppressed. This can be used to reduce log file volume. + +.RE + +.BI "\-\-expect\-message\-count message_type:expr[,message_type:expr]" +.RS +Give +.B lcov +a constraint on the number of messages of one or more types which are expected to +be produced during execution. If the constraint is not true, then generate an +error of type +.I "count" +(see above). + +See man +.B genhtml(1) +for more details about the flag, as well as the +.I "expect_message_count" +section in man +.B lcovrc(5) +for a description of the equivalent configuration file option. +.RE + +.BI "\-\-keep\-going " +.RS +Do not stop if error occurs: attempt to generate a result, however flawed. + +This command line option corresponds to the +.I stop_on_error [0|1] +lcovrc option. See man +.B lcovrc(5) +for more details. + +.RE +.BI "\-\-preserve " +.RS +Preserve intermediate data files generated by various steps in the tool - e.g., for debugging. By default, these files are deleted. + +.RE +.BI "\-\-filter " +.I filters +.RS +Specify a list of coverpoint filters to apply to input data. +See the genhtml man page for details. + + +.RE +.BI "\-\-demangle\-cpp " [param] +.RS +Demangle C++ function names. See the genhtml man page for details. + -.B graph: -the graph file could not be found or is corrupted. .RE .B \-i .br .B \-\-initial .RS -Capture initial zero coverage data. +Capture initial zero coverage data - i.e., from the compile-time '.gcno' data +files. +Also see the +.B \-\-all +flag, which tells the tool to capture both compile-time ('.gcno') and runtime +('.gcda') data at the same time. Run lcov with \-c and this option on the directories containing .bb, .bbg or .gcno files before running any test case. The result is a "baseline" @@ -671,8 +1578,32 @@ Recommended procedure when capturing data for a test case: .RS # lcov \-a app_base.info \-a app_test.info \-o app_total.info .br +.RE +The above 4 steps are equivalent to +.br +.RS +# lcov \-\-capture \-\-all -o app_total.info \-d appdir .RE + +The combined compile- and runtime data will produce a different result than +capturing runtime data alone if your project contains some compilation units +which are not used in any of your testcase executables or shared libraries - +that is, there are some '.gcno' (compile time) data files that do not +have matching '.gcda' (runtime) data files. +In that case, the runtime-only report will not contain any coverpoints from +the unused files, whereas those coverpoints will appear (with all zero 'hit' +counts) in the combined report. + +The +.BR \-\-initial +flag is ignored except in +.BR \-\-capture +mode. The +.BR \-\-all +flag is ignored if the +.BR \-\-initial +flag is specified. .RE .B \-k @@ -753,26 +1684,61 @@ By convention, lcov\-generated coverage data files are called "tracefiles" and should have the filename extension ".info". .RE -.B \-\-path -.I path +.B \-v .br +.B \-\-verbose .RS -Strip path from filenames when applying diff. +Increment informational message verbosity. This is mainly used for script and/or flow debugging - e.g., to figure out which data file are found, where. +Also see the \-\-quiet flag. -Use this option together with \-\-diff to tell lcov to disregard the specified -initial path component when matching between tracefile and diff filenames. -.RE +Messages are sent to stdout unless there is no output file (i.e., if the coverage data is written to stdout rather than to a file) and to stderr otherwise. +.RE .B \-q .br .B \-\-quiet +.RS +Decrement informational message verbosity. + +Decreased verbosity will suppress 'progress' messages for example - while error and warning messages will continue to be printed. + +.RE +.B \-\-debug +.RS +Increment 'debug messages' verbosity. This is useful primarily to developers who want to enhance the lcov tool suite. + +.RE + +.BI "\-\-parallel " +.I [ integer ] .br +.BI "\-j " +.I [ integer ] +.RS +Specify parallelism to use during processing (maximum number of forked child processes). If the optional integer parallelism parameter is zero or is missing, then use to use up the number of cores on the machine. Default is to use a single process (no parallelism). +.br +Also see the +.I memory, memory_percentage, max_fork_fails +and +.I fork_fail_timeout +entries in man +.B lcovrc(5). + +.RE +.BI "\-\-memory " +.I integer .RS -Do not print progress messages. +Specify the maximum amount of memory to use during parallel processing, in Mb. Effectively, the process will not fork() if this limit would be exceeded. Default is 0 (zero) - which means that there is no limit. + +This option may be useful if the compute farm environment imposes strict limits on resource utilization such that the job will be killed if it tries to use too many parallel children - but the user does now know a priori what the permissible maximum is. This option enables the tool to use maximum parallelism - up to the limit imposed by the memory restriction. + +The configuration file +.I memory_percentage +option provided another way to set the maximum memory consumption. +See man +.B lcovrc(5) +for details. -This option is implied when no output filename is specified to prevent -progress messages to mess with coverage data which is also printed to -the standard output. .RE .B \-\-rc @@ -786,7 +1752,7 @@ Use this option to specify a statement which overrides the corresponding configuration statement in the lcovrc configuration file. You can specify this option more than once to override multiple configuration statements. -See +See man .BR lcovrc (5) for a list of available keywords and their meaning. .RE @@ -822,16 +1788,6 @@ Only one of \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be specified at a time. .RE -.B \-\-strip -.I depth -.br -.RS -Strip path components when applying diff. - -Use this option together with \-\-diff to tell lcov to disregard the specified -number of initial directories when matching tracefile and diff filenames. -.RE - .B \-\-summary .I tracefile .br @@ -844,6 +1800,24 @@ Only one of \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be specified at a time. .RE +.B \-\-fail\-under\-branches +.I percentage +.br +.RS +Use this option to tell lcov to exit with a status of 1 if the total +branch coverage is less than +.I percentage. +.RE + +.B \-\-fail\-under\-lines +.I percentage +.br +.RS +Use this option to tell lcov to exit with a status of 1 if the total +line coverage is less than +.I percentage. +.RE + .B \-t .I testname .br @@ -900,8 +1874,6 @@ must be converted to a .info file before recompiling the program or it will become invalid. .RE -.B \-v -.br .B \-\-version .br .RS @@ -922,6 +1894,13 @@ Only one of \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be specified at a time. .RE +.B \-\-tempdir +.I dirname +.br +.RS +Write temporary and intermediate data to indicated directory. Default is "/tmp". +.RE + .SH FILES .I /etc/lcovrc @@ -937,6 +1916,11 @@ The per\-user configuration file. .SH AUTHOR Peter Oberparleiter +Henry Cox +.RS +Filtering, error management, parallel execution sections. +.RE + .SH SEE ALSO .BR lcovrc (5), .BR genhtml (1), @@ -944,3 +1928,7 @@ Peter Oberparleiter .BR genpng (1), .BR gendesc (1), .BR gcov (1) +.br + +.I \*[lcovurl] +.br diff --git a/man/lcovrc.5 b/man/lcovrc.5 index 7819185c..dfdcac5d 100644 --- a/man/lcovrc.5 +++ b/man/lcovrc.5 @@ -1,4 +1,4 @@ -.TH lcovrc 5 "LCOV 1.15" 2020\-08\-07 "User Manuals" +.TH lcovrc 5 "LCOV 2.0" 2023\-05\-17 "User Manuals" .SH NAME lcovrc \- lcov configuration file @@ -13,12 +13,27 @@ code coverage tool (see .br The system\-wide configuration file is located at -.IR /etc/lcovrc . +.IR $LCOV_HOME/etc/lcovrc . +This is typically either +.IR /etc/lcovrc +or +.IR /usr/local/etc/lcovrc +but may be wherever you have installed the lcov package. To change settings for a single user, place a customized copy of this file at location .IR ~/.lcovrc . Where available, command\-line options override configuration file settings. +The +.I genhtml, lcov, +and +.I geninfo +commands also support the +.I \-\-config\-file +option, which can be used to specify one or more files which should be used instead of the system or user default rc files. +Multiple options files may be useful if you have both project- and team-specific common options and want to ensure consistency across multiple users. If multiple \-\-config\-file options are applied in the order they appear. +Note that the "\-\-config\-file" option name must be specified in full and cannot be abbreviated. An error will occur if the option is not recognized. + Lines in a configuration file can either be: .IP " *" empty lines or lines consisting only of white space characters. These lines are @@ -29,11 +44,103 @@ lines and will be ignored. .IP " *" statements in the form .RI ' key " = " value '. + +.IP " *" +Values may be taken from environment variables via the syntax + +.RI ' key " = " ... $ENV{ENV_VAR_NAME} ...'. + +.RI +The substring '$ENV{ENV_VAR_NAME}' is replaced by the value of the environment variable. + +.RI +One or more environment variables may be used to set the RC value. +'key' is ignored if any of the environment variables are not set in your user environment. + +.PP A list of valid statements and their description can be found in section 'OPTIONS' below. +.br +.B NOTE +that there is no error checking of keys in the options file: spelling errors +are simply seen as values which are not used by some particular tool. +If you are unsure of whether your options file is read or its values +applied, you can use the +.I \-\-verbose \-\-verbose +flag to enable printing of option value overrides. +(The option appears twice to enable a higher level of verbosity.) + +Both 'list' and 'scalar' (non list) options are supported in the lcovrc file. + +For scalar (non list) options: +.IP " *" +if specified on the command line and in the lcovrc file, the value specified on the command line wins. +The value from the RC file is ignored. +.IP " *" +Scalar options include: +.RI 'criteria_script " = " ...' +, +.RI 'genhtml_annotate_script " = " ...' +, +.RI 'version_script " = " ...' +, etc. +.PP +.PP + +.PP +For list options: +.IP " *" +the RC file entry can be used multiple times; each use appends to the list. +.br +For example, the entry below will result in two 'omit' patterns which will both be checked: +.IP " " +# note explicit start/end line markers in the regexp +.br +omit_lines = ^\\s+//\\s*MY_EXCLUDE_MARKER\\s*$ +.br +# Note that the regexp below matches anywhere on the line +.br +omit_lines = NR_CM_DBG_PRINT + +.IP " *" +If entries are specified on the command line, then the RC file entries are ignored: command line wins. +If entries are specified in more than one RC file (i.e., multiple \-\-config\-file arguments are supplied), then RC files are applied in order of appearance, and list entries are appended in order. For most list-type options, order is not important. + + +.IP " *" +list options include: +.br +.RI " filter = " ... +.br +.RI " exclude = " ... +.br +.RI " ignore = " ... +.br +.RI " substitute = " ... +.br +.RI " omit_lines = " ... +.br +.RI " erase_functions = " ... +.br +.RI " genhtml_annotate_script = " ... +.br +.I etc. +For a complete set of list options, see the documentation of each configuration option, below. +.PP + +.PP .PP .B Example configuration: + +Note that this example does not include all possible configuration +options. +.br +In general: (almost) all command line options can be specified in +the configuration file instead, whereas some configuration file options +have no command line equivalent. + +See the OPTIONS section below for details. .IP # .br @@ -41,12 +148,44 @@ section 'OPTIONS' below. .br # .br - +# include some other config file +.br +# e.g, user-specific options. Note the environment variable expansion +.br +# config_file = $ENV{HOME}/.user_lcovrc +.br +# or project specific - hard-coded from environment variable +.br +# config_file = /path/to/myproject/.lcovrc +.br +# or in the current run directory +.br +# config_file = $ENV{PWD}/.lcovrc +.br +.br # External style sheet file .br #genhtml_css_file = gcov.css .br +# Use 'dark' mode display (light foreground/dark background) +.br +# rather than default +.br +#genhtml_dark_mode = 1 +.br + +# Alternate header text to use at top of each page +.br +#genhtml_header = Coverage report for my project + +.br + +# Alternate footer text to use at the bottom of each page +.br +#genhtml_footer = My footer text +.br + # Coverage rate limits .br genhtml_hi_limit = 90 @@ -54,6 +193,100 @@ genhtml_hi_limit = 90 genhtml_med_limit = 75 .br +# Ignore some errors (comma-separated list) +.br +#ignore_errors = empty,mismatch +.br + +# Stop emitting message after this number have been printed +.br +# 0 == no limit +.br +max_message_count = 100 + +.br +# If nonzero, do not stop when an 'ignorable error' occurs - try +.br +# to generate a result, however flawed. This is equivalent to +.br +# the '--keep-going' command line switch. +.br +# Default is 1: stop when error occurs +.br +#stop_on_error = 1 + +.br +# If nonzero, treat warnings as error +.br +# note that ignored messages will still appear as warnings +.br +# Default is 0 +.br +#treat_warning_as_error = 1 + +.br +# If set to non-zero, only issue particular warning once per file +.br +# Default is 1 +.br +#warn_once_per_file = 1 + +.br +# extension associated with lcov trace files - glob match pattern +.br +# used as argument to 'find' - to find coverage files contained in +.br +# a directory argument +.br +#info_file_pattern = *.info +.br + +# list of file extensions which should be treated as C/C++ code +.br +# (comma-separated list) +.br +#c_file_extensions = h,c,cpp,hpp +.br + +# list of file extensions which should be treated as RTL code +.br +# (e.g., Verilog) (comma-separated list) +.br +#rtl_file_extensions = v,vh,sv +.br + +# list of file extensions which should be treated as Java code +.br +#java_file_extensions = java + +# list of file extensions which should be treated as perl code +.br +#perl_file_extensions = pl,pm +.br + +# list of file extensions which should be treated as python code +.br +#python_file_extensions = py +.br + +# maximum number of lines to look at, when filtering bogus branch expressions +.br +#filter_lookahead = 5 +.br + +# if nonzero, bitwise operators '|', '&', '~' indicate conditional expressions +.br +#filter_bitwise_conditional = 1 +.br +.br +# if nonzero, '--filter blank' is applied to blank lines, regardless +.br +# of their hit count +.br +#filter_blank_aggressive = 1 +.br +.br + # Width of line coverage field in source code view .br genhtml_line_field_width = 12 @@ -64,6 +297,20 @@ genhtml_line_field_width = 12 genhtml_branch_field_width = 16 .br +# Width of MC/DC coverage field in source code view +.br +genhtml_mcdc_field_width = 14 +.br + +# width of 'owner' field in source code view - default is 20 +.br +genhtml_owner_field_width = 20 +.br +# width of 'age' field in source code view - default is 5 +.br +genhtml_age_field_width = 5 +.br + # Width of overview image .br genhtml_overview_width = 80 @@ -99,11 +346,6 @@ genhtml_no_source = 0 genhtml_num_spaces = 8 .br -# Highlight lines with converted\-only data if non\-zero -.br -genhtml_highlight = 0 -.br - # Include color legend in HTML output if non\-zero .br genhtml_legend = 0 @@ -134,14 +376,16 @@ genhtml_legend = 0 genhtml_sort = 1 .br -# Include function coverage data display +# Display coverage data in hierarchical directory structure +.br +# (rather than flat/3 level) .br -#genhtml_function_coverage = 1 +#genhtml_hierarchical = 1 .br -# Include branch coverage data display +# Display coverage data using 'flat' view .br -#genhtml_branch_coverage = 1 +#genhtml_flat_view = 1 .br # Specify the character set of all generated HTML pages @@ -164,18 +408,62 @@ genhtml_desc_html=0 #genhtml_missed=1 .br +# group function aliases in report - see '--merge' section in man(1) genhtml +.br +#merge_function_aliasess = 1 +.br + +# If set, suppress list of aliases in function detail table +.br +#suppress_function_aliases = 1 +.br + +# If set, derive function end line from line coverpoint data - default ON +.br +#derive_function_end_line = 1 +.br + +# If set, derive function end lines for all file types. +# By default, we derive end lines for C/C++ files only +# +.br +#derive_end_line_all_files = 0 +.br + +# Maximum size of function (number lines) which will be checked by '--filter trivial' +.br +#trivial_function_threshold = 5 +.br + +# Set threshold for hit count which tool should deem likely to indicate +.br +# a toolchain bug (corrupt coverage data) +.br +# excessive_count_theshold = number +.br + # Demangle C++ symbols .br -#genhtml_demangle_cpp=1 +# Call multiple times to specify command and command line arguments +.br +# ('-Xlinker'-like behaviour) +.br +#demangle_cpp = c++filt .br # Name of the tool used for demangling C++ function names .br +# This argument is deprecated - please use demangle_cpp' instead +.br #genhtml_demangle_cpp_tool = c++filt .br # Specify extra parameters to be passed to the demangling tool .br +# this argument is deprecated - use Xlinker-like demangle_cpp +.br +# parameter instead +.br #genhtml_demangle_cpp_params = "" .br @@ -189,9 +477,12 @@ genhtml_desc_html=0 #geninfo_adjust_testname = 0 .br -# Calculate a checksum for each line if non\-zero +# Ignore testcase names in .info file +forget_testcase_names = 0 + +# Calculate and/or compute checksum for each line if non\-zero .br -geninfo_checksum = 0 +checksum = 0 .br # Enable libtool compatibility mode if non\-zero @@ -206,12 +497,26 @@ geninfo_compat_libtool = 0 #geninfo_external = 1 .br +# Specify whether to capture coverage data from compile-time data files +.br +# which have no corresponding runtime data. +.br +#geninfo_capture_all = 1 +.br + # Use gcov's --all-blocks option if non-zero .br #geninfo_gcov_all_blocks = 1 .br -# Specify compatiblity modes (same as \-\-compat option +# Adjust 'executed' non-zero hit count of lines which contain no branches +.br +# and have attribute '"unexecuted_blocks": true' +.br +#geninfo_unexecuted_blocks = 0 +.br + +# Specify compatibility modes (same as \-\-compat option .br # of geninfo) .br @@ -238,6 +543,11 @@ geninfo_auto_base = 1 geninfo_intermediate = auto .br +# Specify if exception branches should be excluded from branch coverage. +.br +no_exception_branch = 0 +.br + # Directory containing gcov kernel files .br lcov_gcov_dir = /proc/gcov @@ -268,713 +578,2711 @@ lcov_list_width = 80 .br lcov_list_truncate_max = 20 -# Specify if function coverage data should be collected and +# Specify if function coverage data should be collected, processed, and .br -# processed. +# displayed. .br -lcov_function_coverage = 1 +function_coverage = 1 .br -# Specify if branch coverage data should be collected and +# Specify if branch coverage data should be collected, processed, and .br -# processed. +# displayed. .br -lcov_branch_coverage = 0 +branch_coverage = 0 .br -.PP - -.SH OPTIONS -.BR genhtml_css_file " =" -.I filename -.IP -Specify an external style sheet file. Use this option to modify the appearance of the HTML output as generated by -.BR genhtml . -During output generation, a copy of this file will be placed in the output -directory. +# Specify if Modified Condition / Decision Coveage data should be collected, .br - -This option corresponds to the \-\-css\-file command line option of -.BR genhtml . +# processed, and displayed. +.br +mcdc_coverage = 0 .br -By default, a standard CSS file is generated. -.PP +# Ask lcov/genhtml/geninfo to return non-zero exit code if branch coverage is +.br +# below specified threshold percentage. +.br +fail_under_branches = 75.0 +.br -.BR genhtml_hi_limit " =" -.I hi_limit +# Ask lcov/genhtml/geninfo to return non-zero exit code if line coverage is .br -.BR genhtml_med_limit " =" -.I med_limit +# below specified threshold percentage. .br -.IP -Specify coverage rate limits for classifying file entries. Use this option to -modify the coverage rates (in percent) for line, function and branch coverage at -which a result is classified as high, medium or low coverage. This -classification affects the color of the corresponding entries on the overview -pages of the HTML output: +#fail_under_lines = 97.5 .br -High: hi_limit <= rate <= 100 default color: green +# Specify JSON module to use, or choose best available if .br -Medium: med_limit <= rate < hi_limit default color: orange +# set to auto .br -Low: 0 <= rate < med_limit default color: red +lcov_json_module = auto .br -Defaults are 90 and 75 percent. -.PP +# Specify maximum number of parallel slaves +.br +# default: 1 (no parallelism) +.br +#parallel = 1 +.br -.BR genhtml_line_field_width " =" -.I number_of_characters -.IP -Specify the width (in characters) of the source code view column containing -line coverage information. +.br +# Specify maximum memory to use during parallel processing, in Mb. +.br +# Do not fork if estimated memory consumption exceeds this number. +.br +# default: 0 (no limit) +.br +#memory = 1024 .br -Default is 12. -.PP +.br +# Specify the number of consecutive fork() failures to allow before +.br +# giving up +.br +# max_fork_fails = 5 +.br -.BR genhtml_branch_field_width " =" -.I number_of_characters -.IP -Specify the width (in characters) of the source code view column containing -branch coverage information. +.br +# Seconds to wait after failing to fork() before retrying +.br +# fork_fail_timeout = 10 .br -Default is 16. -.PP +.br +# Throttling control: specify a percentage of system memory to use as +.br +maximum during parallel processing. +.br +# Do not fork if estimated memory consumption exceeds the maximum. +.br +# this value is used only if the maximum memory is not set. +.br +# default: not set +.br +#memory_percentage = 75 +.br -.BR genhtml_overview_width " =" -.I pixel_size -.IP -Specify the width (in pixel) of the overview image created when generating HTML -output using the \-\-frames option of -.BR genhtml . +.br +# Character used to split list-type parameters +.br +# \- for example, the list of "--ignore_errors source,mismatch" +.br +# default: , (comma) +.br +#split_char = , .br -Default is 80. -.PP -.BR genhtml_nav_resolution " =" -.I lines -.IP -Specify the resolution of overview navigation when generating HTML output using -the \-\-frames option of -.BR genhtml . -This number specifies the maximum difference in lines between the position a -user selected from the overview and the position the source code window is -scrolled to. +.br +# use case insensitive compare to find matching files, for include/exclude +.br +# directives, etc +.br +#case_insensitive = 0 .br -Default is 4. -.PP +.br +# override line default line exclusion regexp +.br +#lcov_excl_line = LCOV_EXCL_LINE +.br +.br +# override branch exclusion regexp +.br +#lcov_excl_br_line = LCOV_EXCL_BR_LINE +.br -.BR genhtml_nav_offset " =" -.I lines -.IP -Specify the overview navigation line offset as applied when generating HTML -output using the \-\-frames option of -.BR genhtml. +.br +# override exception branch exclusion regexp +.br +#lcov_excl_exception_br_line = LCOV_EXCL_EXCEPTION_BR_LINE .br -Clicking a line in the overview image should show the source code view at -a position a bit further up, so that the requested line is not the first -line in the window. This number specifies that offset. +.br +# override start of exclude region regexp +.br +#lcov_excl_start = LCOV_EXCL_START .br -Default is 10. -.PP +.br +# override end of exclude region regexp +.br +#lcov_excl_stop = LCOV_EXCL_STOP +.br +.br +# override unreachable line default line exclusion regexp +.br +#lcov_unreachable_line = LCOV_UNREACHABLE_LINE +.br +.br +# override start of unreachable region regexp +.br +#lcov_unreachable_start = LCOV_UNREACHABLE_START +.br -.BR genhtml_keep_descriptions " =" -.IR 0 | 1 -.IP -If non\-zero, keep unused test descriptions when generating HTML output using -.BR genhtml . +.br +# override end of unreachable region regexp +.br +#lcov_unreachable_stop = LCOV_UNREACHABLE_STOP .br -This option corresponds to the \-\-keep\-descriptions option of -.BR genhtml . + +.br +# override start of branch exclude region regexp +.br +#lcov_excl_br_start = LCOV_EXCL_BR_START .br -Default is 0. -.PP +.br +# override start of exclude region regexp +.br +#lcov_excl_br_stop = LCOV_EXCL_BR_STOP +.br -.BR genhtml_no_prefix " =" -.IR 0 | 1 -.IP -If non\-zero, do not try to find and remove a common prefix from directory names. +.br +# override start of exclude region regexp +.br +#lcov_excl_exception_br_start = LCOV_EXCL_EXCEPTION_BR_START .br -This option corresponds to the \-\-no\-prefix option of -.BR genhtml . .br +# override start of exclude region regexp +.br +#lcov_excl_exception_br_stop = LCOV_EXCL_EXCEPTION_BR_STOP -Default is 0. .PP -.BR genhtml_no_source " =" -.IR 0 | 1 +.SH OPTIONS + +.BR config_file " =" +.I filename .IP -If non\-zero, do not create a source code view when generating HTML output using -.BR genhtml . -.br -This option corresponds to the \-\-no\-source option of -.BR genhtml . -.br +Include another config file. -Default is 0. -.PP +Inclusion is equivalent to inserting the text from +.I filename +at this point in the current file. As a result, settings from the included +file are processed after earlier settings in the current file, but before later settings from the current file. +As a result: -.BR genhtml_num_spaces " =" -.I num +.BR "Scalar options" +set earlier in the current file are overridden by settings from the included file, and scalar options from the included file are overridden by later setting in the current file. + +.BR "Array options" +from earlier in the current file appear before setting from the included file, and array options from later in the current file appear after. + +Config file inclusion is recursive: an included config file may include another file - and so on. +Inclusion loops are not supported and will result in a +.I usage +error. + +The most common usecase for config file inclusion is so that a site-wide or project-wide options file can include a user-specific or module-specific options file - for example, as + +.RS 3 .IP -Specify the number of spaces to use as replacement for tab characters in the -HTML source code view as generated by -.BR genhtml . + ... .br - -This option corresponds to the \-\-num\-spaces option of -.BR genthml . +config_file = $ENV{HOME}/.lcovrc_user .br + ... +.PP +.RE -Default is 8. .PP -.BR genhtml_highlight " =" -.IR 0 | 1 +.BR genhtml_css_file " =" +.I filename .IP -If non\-zero, highlight lines with converted\-only data in -HTML output as generated by +Specify an external style sheet file. Use this option to modify the appearance of the HTML output as generated by .BR genhtml . +During output generation, a copy of this file will be placed in the output +directory. .br -This option corresponds to the \-\-highlight option of +This option corresponds to the \-\-css\-file command line option of .BR genhtml . .br -Default is 0. +By default, a standard CSS file is generated. .PP -.BR genhtml_legend " =" -.IR 0 | 1 +.BR genhtml_header " =" +.I string .IP -If non\-zero, include a legend explaining the meaning of color coding in the HTML -output as generated by -.BR genhtml . + +Specify header text to use ta top of each HTML page. .br -This option corresponds to the \-\-legend option of +This option corresponds to the \-\-header\-title command line option of .BR genhtml . .br +Default is "LCOV - (differential )? coverage report" -Default is 0. .PP -.BR genhtml_html_prolog " =" -.I filename +.BR genhtml_footer " =" +.I string .IP -If set, include the contents of the specified file at the beginning of HTML -output. +Specify footer text to use at bottom of each HTML page. +.br -This option corresponds to the \-\-html\-prolog option of +This option corresponds to the \-\-footer command line option of .BR genhtml . .br -Default is to use no extra prolog. +Default is LCOV tool version string. + .PP -.BR genhtml_html_epilog " =" -.I filename +.BR genhtml_dark_mode " =" +.IR 0 | 1 .IP -If set, include the contents of the specified file at the end of HTML output. +If non-zero, display using light text on dark background rather than dark text on light background. +.br -This option corresponds to the \-\-html\-epilog option of +This option corresponds to the \-\-dark\-mode command line option of .BR genhtml . .br -Default is to use no extra epilog. +By default, a 'light' palette is used. .PP -.BR genhtml_html_extension " =" -.I extension +.BR genhtml_hi_limit " =" +.I hi_limit +.br +.BR genhtml_med_limit " =" +.I med_limit +.br .IP -If set, use the specified string as filename extension for generated HTML files. +Specify coverage rate limits for classifying file entries. Use this option to +modify the coverage rates (in percent) for line, function and branch coverage at +which a result is classified as high, medium or low coverage. This +classification affects the color of the corresponding entries on the overview +pages of the HTML output: +.br -This option corresponds to the \-\-html\-extension option of -.BR genhtml . +High: hi_limit <= rate <= 100 default color: green +.br +Medium: med_limit <= rate < hi_limit default color: yellow +.br +Low: 0 <= rate < med_limit default color: red .br -Default extension is "html". +Defaults are 90 and 75 percent. + +There are also options to configure different thresholds for line, branch, and function coverages. See below. .PP -.BR genhtml_html_gzip " =" -.IR 0 | 1 +.BR genhtml_line_hi_limit " =" +.I line_hi_limit +.br +.BR genhtml_line_med_limit " =" +.I line_med_limit +.br .IP -If set, compress all html files using gzip. +Specify specific threshold for line coverage limits used to decide whether a particular line coverage percentage is classified as high, medium, or low coverage. +If the line-specific values are not specified, then the default +.I genhtml_med_limit +or +.I genhtml_hi_limit +values are used. +.PP -This option corresponds to the \-\-html\-gzip option of -.BR genhtml . +.BR genhtml_branch_hi_limit " =" +.I branch_hi_limit .br +.BR genhtml_branch_med_limit " =" +.I branch_med_limit +.br +.IP +Specify specific threshold for branch coverage limits used to decide whether a particular branch coverage percentage is classified as high, medium, or low coverage. +If the branch-specific values are not specified, then the default +.I genhtml_med_limit +or +.I genhtml_hi_limit +values are used. +.PP -Default extension is 0. +.BR genhtml_function_hi_limit " =" +.I function_hi_limit +.br +.BR genhtml_function_med_limit " =" +.I function_med_limit +.br +.IP +Specify specific threshold for function coverage limits used to decide whether a particular function coverage percentage is classified as high, medium, or low coverage. +If the function-specific values are not specified, then the default +.I genhtml_med_limit +or +.I genhtml_hi_limit +value is used. .PP -.BR genhtml_sort " =" -.IR 0 | 1 +.BR rtl_file_extensions " =" +.IR str[,str]+ .IP -If non\-zero, create overview pages sorted by coverage rates when generating -HTML output using -.BR genhtml . +Specify a comma-separated list of file extensions which should be assumed to be RTL code (e.g., Verilog). .br -This option can be set to 0 by using the \-\-no\-sort option of -.BR genhtml . +If not specified, the default set is 'v,vh,sv,vhdl?'. +There is no command line option equivalent. .br +This option is used by genhtml and lcov. -Default is 1. .PP -.BR genhtml_function_coverage " =" -.IR 0 | 1 +.BR info_file_pattern " =" +.IR str .IP -If non\-zero, include function coverage data when generating HTML output using -.BR genhtml . -.br +Specify a glob-match pattern associated with lcov trace files (suitable as an argument to 'find'. +If not specified, the default is '*.info'. -This option can be set to 0 by using the \-\-no\-function\-coverage option of -.BR genhtml . -.br - -Default is 1. .PP -.BR genhtml_branch_coverage " =" -.IR 0 | 1 +.BR c_file_extensions " =" +.IR str[,str]+ .IP -If non\-zero, include branch coverage data when generating HTML output using -.BR genhtml . +Specify a comma-separated list of file extensions which should be assumed to be C/C++ code. .br -This option can be set to 0 by using the \-\-no\-branch\-coverage option of -.BR genhtml . +If not specified, the default set is 'c,h,i,C,H,I,icc,cpp,cc,cxx,hh,hpp,hxx'. +If you want all files to be treated as C/C++ code, you can use: +.I c_file_extensions = .* + +This parameter must be set from the lcovrc file or via the +.I \-\-rc name=value +command line option; note that you may need to protect the value from shell expansion in the latter case. .br -Default is 1. .PP -.BR genhtml_charset " =" -.I charset +.BR java_file_extensions " =" +.IR str[,str]+ .IP -Specify the character set of all generated HTML pages. +Specify a comma-separated list of file extensions which should be assumed to be Java code. .br -Use this option if the source code contains characters which are not -part of the default character set. Note that this option is ignored -when a custom HTML prolog is specified (see also -.BR genhtml_html_prolog ). +If not specified, the default set is 'java'. +If you want all files to be treated as Java code, you can use: +.I java_file_extensions = .* + +This parameter must be set from the lcovrc file or via the +.I \-\-rc name=value +command line option; note that you may need to protect the value from shell expansion in the latter case. .br -Default is UTF-8. .PP -.BR genhtml_demangle_cpp " =" -.IR 0 | 1 +.BR perl_file_extensions " =" +.IR str[,str]+ .IP -If non-zero, demangle C++ function names in function overviews. - -Set this option to one if you want to convert C++ internal function -names to human readable format for display on the HTML function overview -page. This option requires that the c++filt tool is installed (see -.BR c++filt(1) -). +Specify a comma-separated list of file extensions which should be assumed to be Perl code. .br -This option corresponds to the \-\-demangle\-cpp command line option of -.BR genhtml . +If not specified, the default set is 'pl,pm'. +If you want all files to be treated as Perl code, you can use: +.I perl_file_extensions = .* + +This parameter must be set from the lcovrc file or via the +.I \-\-rc name=value +command line option; note that you may need to protect the value from shell expansion in the latter case. .br -Default is 0. .PP -.BR genhtml_demangle_cpp_tool " =" -.I path_to_c++filt +.BR python_file_extensions " =" +.IR str[,str]+ .IP -Specify the location of the demangle tool (see -.BR c++filt (1)) -used to convert C++ internal function names to human readable format -for display on the HTML function overview page. +Specify a comma-separated list of file extensions which should be assumed to be Python code. .br -Default is 'c++filt'. -.PP - -.BR genhtml_demangle_cpp_params " =" -.I parameters -.IP -Specify extra parameters to be passed to the demangling tool +If not specified, the default set is 'py'. +If you want all files to be treated as Python code, you can use: +.I python_file_extensions = .* -Use this option if your environment requires additional parameters such -as --no-strip-underscore for correctly demangling C++ internal function -names. See also -.BR c++filt (1)). +This parameter must be set from the lcovrc file or via the +.I \-\-rc name=value +command line option; note that you may need to protect the value from shell expansion in the latter case. .br -Default is "". + .PP -.BR genhtml_desc_html " =" -.IR 0 | 1 +.BR filter_lookahead " =" +.IR integer .IP -If non-zero, test case descriptions may contain HTML markup. +Specify the maximum number of lines to look at when filtering bogus branch expressions. +A larger number may catch more cases, but will increase execution time. +.br -Set this option to one if you want to embed HTML markup (for example to -include links) in test case descriptions. When set to zero, HTML markup -characters will be escaped to show up as plain text on the test case -description page. +If not specified, the default set is 10. +There is no command line option equivalent. .br +This option is used by genhtml and lcov. -Default is 0. .PP -.BR genhtml_precision " =" -.IR 1 | 2 | 3 | 4 +.BR filter_bitwise_conditional " =" +.IR 0|1 .IP -Specify how many digits after the decimal-point should be used for -displaying coverage rates. +If set to non-zero value, bogus branch filtering will assume that expressions containing bitwise operators '&', '|', '~' are conditional expressions - and will not filter them out. .br -Default is 1. -.PP -.BR genhtml_missed " =" -.IR 0 | 1 -.IP -If non-zero, the count of missed lines, functions, or branches is shown -as negative numbers in overview pages. +If not specified, the default set is 0 (do not treat them as conditional). +There is no command line option equivalent. .br +This option is used by genhtml and lcov. -Default is 0. .PP -. -.BR geninfo_gcov_tool " =" -.I path_to_gcov +.BR filter_blank_aggressive " =" +.IR 0|1 .IP -Specify the location of the gcov tool (see -.BR gcov (1)) -which is used to generate coverage information from data files. +If set to non-zero value, then blank source lines will be ignored whether +or not their 'hit' count is zero. +See the +.I \-\-filter blank +section in man +.B genhtml(1). .br -Default is 'gcov'. -.PP - -.BR geninfo_adjust_testname " =" -.IR 0 | 1 -.IP -If non\-zero, adjust test names to include operating system information -when capturing coverage data. +If not specified, the default set is 0 (filter blank lines only if they are not hit). .br +There is no command line option equivalent. -Default is 0. .PP -.BR geninfo_checksum " =" -.IR 0 | 1 +.BR ignore_errors " =" +.IR message_type(,message_type)* .IP -If non\-zero, generate source code checksums when capturing coverage data. -Checksums are useful to prevent merging coverage data from incompatible -source code versions but checksum generation increases the size of coverage -files and the time used to generate those files. +Specify a message type which should be ignored. .br -This option corresponds to the \-\-checksum and \-\-no\-checksum command line -option of -.BR geninfo . +This option can be used multiple times in the lcovrc file to ignore multiple message types. + +This option is equivalent to the \-\-ignore\-errors option to geninfo, genhtml, or lcov. Note that the lcovrc file message list is not applied (those messages NOT ignored) if the '\-\-ignore\-errors' command line option is specified. .br +This option is used by genhtml, lcov, and geninfo. -Default is 0. .PP -.BR geninfo_compat_libtool " =" -.IR 0 | 1 +.BR expect_message_count " =" +.IR message_type:expr(,message_type:expr)* .IP -If non\-zero, enable libtool compatibility mode. When libtool compatibility -mode is enabled, lcov will assume that the source code relating to a .da file -located in a directory named ".libs" can be found in its parent directory. -.br +Specify +a constraint on the number of messages of one or more types which are expected to +be produced during tool execution. If the constraint is not true, an +error of type +.I "count" +will be generated. -This option corresponds to the \-\-compat\-libtool and \-\-no\-compat\-libtool -command line option of -.BR geninfo . +Multiple constraints can be specified using a comma-separated list or +by using the option multiple times. + + +Substitutions are performed on the expression before it is evaluated: + +For example: + +.RS 3 + +.IP \- 3 +expect_message_count = inconsistent : %C == 5 .br +says that you expect exactly 5 messages of this type +.PP -Default is 1. +.IP \- 3 +expect_message_count inconsistent : %C > 6 && %C <= 10 +.br +says that you expect the number of messages to be in the range (6:10]. .PP -.BR geninfo_external " =" -.IR 0 | 1 -.IP -If non\-zero, capture coverage data for external source files. +.RE -External source files are files which are not located in one of the directories -(including sub-directories) -specified by the \-\-directory or \-\-base\-directory options of -.BR lcov / geninfo . +This option is useful if errors are caused by conditions that you cannot +fix - for example, due to inconsistent coverage data generated by your +toolchain. In those scenarios, you may decide: -Default is 1. +.RS 3 + +.IP \- 3 +to exclude the offending code, or +.PP +.IP \- 3 +to exclude the entire offending file(s), or .PP +.IP \- 3 +to ignore the +messages, either by converting them to warnings or suppressing them entirely. +.PP +.RE -.BR geninfo_gcov_all_blocks " =" -.IR 0 | 1 -.IP -If non\-zero, call the gcov tool with option --all-blocks. +In the latter case, this option provides some additional safety by warning +you when the count differs due to some change which occurred, giving you +the opportunity to diagnose the change and/or to review message changes. -Using --all-blocks will produce more detailed branch coverage information for -each line. Set this option to zero if you do not need detailed branch coverage -information to speed up the process of capturing code coverage or to work -around a bug in some versions of gcov which will cause it to endlessly loop -when analysing some files. +This option is equivalent to the +.I "\-\-expect\-message\-count" +command line flag. -Default is 1. .PP -.BR geninfo_compat " =" -.IR mode = value [, mode = value ,...] +.BR max_message_count " =" +.IR integer .IP -Specify that geninfo should enable one or more compatibility modes -when capturing coverage data. +Set the maximum number of warnings of any particular type which should be emitted. This can be used to reduce the size of log files. +.br -This option corresponds to the \-\-compat command line option of -.BR geninfo . +No more warnings will be printed after this number is reached. 0 (zero) is interpreted as 'no limit'. +.br +This option is used by genhtml, lcov, and geninfo. -Default is 'libtool=on, hammer=auto, split_crc=auto'. .PP -.BR geninfo_adjust_src_path " =" -.IR pattern " => " replacement -.br -.BR geninfo_adjust_src_path " =" -.I pattern +.BR message_log " =" +.IR filename .IP -Adjust source paths when capturing coverage data. - -Use this option in situations where geninfo cannot find the correct -path to source code files of a project. By providing a -.I pattern -in Perl regular expression format (see -.BR perlre (1)) -and an optional replacement string, you can instruct geninfo to -remove or change parts of the incorrect source path. - -.B Example: +Specify location to store error and warning messages (in addition to writing to STDERR). +If not specified, then the default location is used. .br -1. When geninfo reports that it cannot find source file .br +This attribute is equivalent to the +.I \-\-msg\-log +command line option. The command line option takes precedence if both are specified. - /path/to/src/.libs/file.c -.br +.PP -while the file is actually located in +.BR stop_on_error " = " +.IR 0|1 +.IP +If set to 0, tell the tools to ignore errors and keep going to try to generate a result - however flawed or incomplete that result might be. +Note that some errors cannot be ignored and that ignoring some errors may lead to other errors. .br +The tool will return a non-zero exit code if one or more errors are detected +during execution when +.I stop_on_error +is disabled. That is, the tool will continue execution in the presence +of errors but will return an exit status. - /path/to/src/file.c .br -use the following parameter: -.br +This is equivalent to the +.I '\-\-keep\-going' +command line option. - geninfo_adjust_src_path = /.libs +Default is 1: stop when error occurs. -This will remove all "/.libs" strings from the path. +If the +.I 'ignore_error msgType' +option is also used, then those messages will be treated as warnings rather than errors (or will be entirely suppressed if the message type appears multiple times in the ignore_messages option). Warnings do not cause a non-zero exit status. -2. When geninfo reports that it cannot find source file -.br +This option is used by genhtml, lcov, and geninfo. - /tmp/build/file.c -.br -while the file is actually located in -.br +.PP - /usr/src/file.c -.br +.BR treat_warning_as_error " = " +.IR 0|1 +.IP +If set to 1, tell the tools that messages which are normally treated as +warnings ( +.I e.g., +certain usage messages) should be treated as errors. -use the following parameter: -.br +Note that ignored messages will still appear as warnings: see the +.I ignore_errors +entry, above. - geninfo_adjust_src_path = /tmp/build => /usr/src -.br +This option is used by genhtml, lcov, and geninfo. -This will change all "/tmp/build" strings in the path to "/usr/src". .PP -.BR geninfo_auto_base " =" -.IR 0 | 1 +.BR warn_once_per_file " = " +.IR 0|1 .IP -If non\-zero, apply a heuristic to determine the base directory when -collecting coverage data. -.br +If set to 1, tell the tools to emit certain errors only once per file +(rather than multiple times, if the issue occurs multiple times in the same +file). -Use this option when using geninfo on projects built with libtool or -similar build environments that work with multiple base directories, -i.e. environments, where the current working directory when invoking the -compiler ist not the same directory in which the source code file is -located, and in addition, is different between files of the same project. -.br +Default is 1: do not report additional errors. + +This option is used by genhtml, lcov, and geninfo. -Default is 1. .PP -.BR geninfo_intermediate " =" -.IR 0 | 1 | auto +.BR check_data_consistency " = " +.IR 0|1 .IP -Specify whether to use gcov intermediate format -.br +If set to 1, tell the tools to execute certain data consistency checks - +.I e.g., +that function with a non-zero hit count contains at least one line with a non-zero +hit count - and vice versa. -Use this option to control whether geninfo should use the gcov intermediate -format while collecting coverage data. The use of the gcov intermediate format -should increase processing speed. It also provides branch coverage data when -using the \-\-initial command line option. -.br +It may be useful to use this option to disable checking if you have inconsistent +legacy data and have +no way to correct or exclude it. -Valid values are 0 for off, 1 for on, and "auto" to let geninfo automatically -use immediate format when supported by gcov. -.br +Default is 1: execute consistency checks. -Default is "auto". .PP -.BR geninfo_no_exception_branch " =" -.IR 0 | 1 +.BR genhtml_line_field_width " =" +.I number_of_characters .IP -Specify whether to exclude exception branches from branch coverage. +Specify the width (in characters) of the source code view column containing +line coverage information. .br -Default is 0. +Default is 12. .PP -.BR lcov_gcov_dir " =" -.I path_to_kernel_coverage_data +.BR genhtml_branch_field_width " =" +.I number_of_characters .IP -Specify the path to the directory where kernel coverage data can be found -or leave undefined for auto-detection. +Specify the width (in characters) of the source code view column containing +branch coverage information. .br -Default is auto-detection. +Default is 16. .PP -.BR lcov_tmp_dir " =" -.I temp +.BR genhtml_mcdc_field_width " =" +.I number_of_characters .IP -Specify the location of a directory used for temporary files. +Specify the width (in characters) of the source code view column containing +MC/DC coverage information. .br -Default is '/tmp'. +Default is 14. .PP -.BR lcov_list_full_path " =" -.IR 0 | 1 +.BR genhtml_owner_field_width " =" +.I number_of_characters .IP -If non-zero, print the full path to source code files during a list operation. +Specify the width (in characters) of the source code view column containing +owner information (as reported by your annotation script. This option has an effect only if you are using a source annotation script: see the \-\-annotation-script option in the genhtml man page. .br -This option corresponds to the \-\-list\-full\-path option of -.BR lcov . +Default is 20. +.PP + +.BR genhtml_age_field_width " =" +.I number_of_characters +.IP +Specify the width (in characters) of the source code view column containing +age of the corresponding block (as reported by your annotation script). This option has an effect only if you are using a source annotation script: see the \-\-annotation-script option in the genhtml man page. .br -Default is 0. +Default is 5. .PP -.BR lcov_list_max_width " =" -.IR width +.BR genhtml_frames " =" +.I 0 | 1 .IP -Specify the maximum width for list output. This value is ignored when -lcov_list_full_path is non\-zero. +Specify whether source detail view should contain a navigation image. +See the +.I \-\-frame +entry in the +.B genhtml +man page. +.PP + +.BR genhtml_overview_width " =" +.I pixel_size +.IP +Specify the width (in pixel) of the overview image created when generating HTML +output using the \-\-frames option of +.BR genhtml . .br Default is 80. .PP -.BR lcov_list_truncate_max -.B " =" -.IR percentage +.BR genhtml_nav_resolution " =" +.I lines .IP -Specify the maximum percentage of file names which may be truncated when -choosing a directory prefix in list output. This value is ignored when -lcov_list_full_path is non\-zero. +Specify the resolution of overview navigation when generating HTML output using +the \-\-frames option of +.BR genhtml . +This number specifies the maximum difference in lines between the position a +user selected from the overview and the position the source code window is +scrolled to. .br -Default is 20. +Default is 4. +.PP + + +.BR genhtml_nav_offset " =" +.I lines +.IP +Specify the overview navigation line offset as applied when generating HTML +output using the \-\-frames option of +.BR genhtml. +.br + +Clicking a line in the overview image should show the source code view at +a position a bit further up, so that the requested line is not the first +line in the window. This number specifies that offset. +.br + +Default is 10. .PP -.BR lcov_function_coverage " =" + +.BR genhtml_keep_descriptions " =" .IR 0 | 1 .IP -Specify whether lcov should handle function coverage data. +If non\-zero, keep unused test descriptions when generating HTML output using +.BR genhtml . .br -Setting this option to 0 can reduce memory and CPU time consumption -when lcov is collecting and processing coverage data, as well as -reduce the size of the resulting data files. Note that setting -.B genhtml_function_coverage -will override this option for HTML generation. +This option corresponds to the \-\-keep\-descriptions option of +.BR genhtml . .br -Default is 1. +Default is 0. .PP -.BR lcov_branch_coverage " =" +.BR genhtml_no_prefix " =" .IR 0 | 1 .IP -Specify whether lcov should handle branch coverage data. +If non\-zero, do not try to find and remove a common prefix from directory names. .br -Setting this option to 0 can reduce memory and CPU time consumption -when lcov is collecting and processing coverage data, as well as -reduce the size of the resulting data files. Note that setting -.B genhtml_branch_coverage -will override this option for HTML generation. +This option corresponds to the \-\-no\-prefix option of +.BR genhtml . .br Default is 0. .PP -.BR lcov_excl_line " =" -.I expression +.BR genhtml_no_source " =" +.IR 0 | 1 .IP -Specify the regular expression of lines to exclude. +If non\-zero, do not create a source code view when generating HTML output using +.BR genhtml . .br -Default is 'LCOV_EXCL_LINE'. +This option corresponds to the \-\-no\-source option of +.BR genhtml . +.br + +Default is 0. .PP -.BR lcov_excl_br_line " =" -.I expression +.BR genhtml_num_spaces " =" +.I num .IP -Specify the regular expression of lines to exclude from branch coverage. +Specify the number of spaces to use as replacement for tab characters in the +HTML source code view as generated by +.BR genhtml . .br -Default is 'LCOV_EXCL_BR_LINE'. +This option corresponds to the \-\-num\-spaces option of +.BR genhtml . +.br + +Default is 8. + .PP -.BR lcov_excl_exception_br_line " =" -.I expression +.BR genhtml_legend " =" +.IR 0 | 1 .IP -Specify the regular expression of lines to exclude from exception branch coverage. +If non\-zero, include a legend explaining the meaning of color coding in the HTML +output as generated by +.BR genhtml . .br -Default is 'LCOV_EXCL_EXCEPTION_BR_LINE'. +This option corresponds to the \-\-legend option of +.BR genhtml . +.br + +Default is 0. .PP +.BR genhtml_html_prolog " =" +.I filename +.IP +If set, include the contents of the specified file at the beginning of HTML +output. + +This option corresponds to the \-\-html\-prolog option of +.BR genhtml . +.br + +Default is to use no extra prolog. +.PP + +.BR genhtml_html_epilog " =" +.I filename +.IP +If set, include the contents of the specified file at the end of HTML output. + +This option corresponds to the \-\-html\-epilog option of +.BR genhtml . +.br + +Default is to use no extra epilog. +.PP + +.BR genhtml_html_extension " =" +.I extension +.IP +If set, use the specified string as filename extension for generated HTML files. + +This option corresponds to the \-\-html\-extension option of +.BR genhtml . +.br + +Default extension is "html". +.PP + +.BR genhtml_html_gzip " =" +.IR 0 | 1 +.IP +If set, compress all html files using gzip. + +This option corresponds to the \-\-html\-gzip option of +.BR genhtml . +.br + +Default extension is 0. +.PP + +.BR genhtml_sort " =" +.IR 0 | 1 +.IP +If non\-zero, create overview pages sorted by coverage rates when generating +HTML output using +.BR genhtml . +.br + +This option can be set to 0 by using the \-\-no\-sort option of +.BR genhtml . +.br + +Default is 1. +.PP + +.BR genhtml_hierarchical " =" +.IR 0 | 1 +.IP +If non\-zero, the HTML report will follow the hierarchical directory structure of the source code. +.br + +This option is equivalent to using the \-\-hierarchical command line option of +.BR genhtml . +'Hierarchical' and 'flat' views are mutually exclusive. +.br + +Default is 0. +.PP + +.BR genhtml_flat_view " =" +.IR 0 | 1 +.IP +If non\-zero, the top-level HTML table will contain all of the files in the project and there will be no intermediate directory pages. +.br + +This option is equivalent to using the \-\-flat command line option of +.BR genhtml . +'Hierarchical' and 'flat' views are mutually exclusive. +.br + +Default is 0. +.PP + +.BR genhtml_show_navigation " =" +.IR 0 | 1 +.IP +If non\-zero, the 'source code' view summary table will contain hyperlinks from the number to the first source line in the corresponding category ('Hit' or 'Not hit') in the non-differential coverage report. +Source code hyperlinks are always enabled in differential coverage reports. +.br + +This option is equivalent to using the \-\-show\-navigation command line option of +.BR genhtml . +.br + +Default is 0. +.PP + +.BR genhtml_show_owner_table " =" +.IR 0 | 1 | all +.IP +If non\-zero, equivalent to the genhtml +.I \-\-shwow\-owners +flag: see man +.B genhtml(1) +for details. +.br + +Default is 0. +.PP + +.BR compact_summary_tables " =" +.IR 0 | 1 +.IP +If non\-zero, suppress the 'Total' row in the 'date' and 'owner' summary table if there is only one element in the corresponding bin. +.br +When there are a large number of files with a single author, this can cut the summary table size by almost half. +.br + +Default is 1 (enabled). +.PP + +.BR owner_table_entries " =" +.IR integer +.IP +This option is used to tell genhtml the number of 'owner' table entries to +retain in the summary table (at the top of the page) if owner table truncation is requested. Authors are sorted by quantity of un-exercised code - so elided entries will be smaller offenders: maximal offenders are retained. +If the option is not set, then owner tables are not truncated. + +This option has no effect unless +.I "genhtml \-\-show\-owners" +is enabled. +See the +.I \-\-show-owners +option in +.B genhtml(1) +for details. + +.br + +Default is not set ( +.I i.e., +do not truncate owner tables). +.PP + +.BR truncate_owner_table " =" +.IR comma_separated_list +.IP +This option is used to tell genhtml whether to truncate the 'owner' table +at the top, directory, or file level. +This option acts together with the +.I owner_table_entries +parameter to determine how many author entries are retained. + +This option has no effect unless +.I "genhtml \-\-show\-owners" +is enabled and and the +.I owner_table_entries +configuration is set. + +If this option is set multiple times in the lcovrc file, the values are +combined to form the list of levels where truncation will occur. +Similarly, if this option is not set and +.I owner_table_entries +is set, then the table will be truncated everywhere. + +See the +.I \-\-show-owners +option in +.B genhtml(1) +for details. +.br + +Default is to not truncate the list. +.PP + + + +.BR genhtml_show_noncode_owners " =" +.IR 0 | 1 +.IP +If non\-zero, equivalent to the genhtml +.I \-\-shwow\-noncode +flag: see man +.B genhtml(1) +for details. +.br + +Default is 0. +.PP + +.BR genhtml_show_function_proportion " =" +.IR 0 | 1 +.IP + +If nonzero, add column to "function coverage detail" table to show the proportion of lines and branches within the function which are exercised. +.br + +This option is equivalent to using the \-\-show\-proportion command line option of +.BR genhtml . +.br + +Default is 0. +.PP + +.BR genhtml_synthesize_missing " =" +.IR 0 | 1 +.IP +If non\-zero, equivalent to the genhtml +.I \-\-synthesize\-missing +flag: see man +.B genhtml(1) +for details. +.br + +Default is 0. +.PP + +.BR genhtml_charset " =" +.I charset +.IP +Specify the character set of all generated HTML pages. +.br + +Use this option if the source code contains characters which are not +part of the default character set. Note that this option is ignored +when a custom HTML prolog is specified (see also +.BR genhtml_html_prolog ). +.br + +Default is UTF-8. +.PP + +.BR demangle_cpp " =" +.IR c++filt +.IP +If set, this option tells genhtml/lcov/geninfo to demangle C++ function names in function overviews, +and gives the name of the tool used for demangling. +Set this option to one if you want to convert C++ internal function +names to human readable format for display on the HTML function overview +page. + +If the +.I demangle_cpp +option is used multiple times, then the arguments are concatenated when the callback +is executed - similar to how the gcc +.I \-Xlinker +parameter works. This provides a possibly easier way to pass arguments to +your tool, without requiring a wrapper script. +In that case, your callback will be executed as: +.I | tool\-0 'tool\-1; ... +Arguments are quoted when passed to +the shell, in order to handle parameters which contain spaces. + +Note that the demangling tool is called via a pipe, and is expected to read from stdin and write to stdout. + +This option corresponds to the \-\-demangle\-cpp command line option of +.BR genhtml . +.br + +Default is not set (C++ demangling is disabled). +.PP + +.BR genhtml_demangle_cpp_tool " =" +.I path_to_c++filt +.IP +Specify the location of the demangle tool (see +.BR c++filt (1)) +used to convert C++ internal function names to human readable format +for display on the HTML function overview page. + +This option is deprecated and will be removed from a future lcov release. +Please use +.I demangle_cpp = path_to_c++filt +instead. + +.PP + +.BR genhtml_demangle_cpp_params " =" +.I parameters +.IP +Specify extra parameters to be passed to the demangling tool + +Use this option if your environment requires additional parameters such +as --no-strip-underscore for correctly demangling C++ internal function +names. See also +.BR c++filt (1)). +.br + +This argument is deprecated. Please use the Xlinker-like +.I demangle_cpp +argument instead. + +.PP + +.BR genhtml_desc_html " =" +.IR 0 | 1 +.IP +If non-zero, test case descriptions may contain HTML markup. + +Set this option to one if you want to embed HTML markup (for example to +include links) in test case descriptions. When set to zero, HTML markup +characters will be escaped to show up as plain text on the test case +description page. +.br + +Default is 0. +.PP + +.BR genhtml_precision " =" +.IR 1 | 2 | 3 | 4 +.IP +Specify how many digits after the decimal-point should be used for +displaying coverage rates. +.br + +Default is 1. +.PP +.BR merge_function_aliases " =" +.IR 0 | 1 +.IP +If non-zero, group function aliases in the function detail tabile. +See man(1) genhtml. +.br + +Default is 0. +.PP + +.PP +.BR genhtml_missed " =" +.IR 0 | 1 +.IP +If non-zero, the count of missed lines, functions, or branches is shown +as negative numbers in overview pages. +.br + +Default is 0. +.PP + +.BR suppress_function_aliases " =" +.IR 0 | 1 +.IP +If non-zero, do not show aliases in the function detail table. +.br +If nonzero, implies that +.B merge_function_aliases +is enabled. +.br +See the genhtml man page for more details. +.br + +Default is 0. +.PP + +.BR derive_function_end_line " =" +.IR 0 | 1 +.IP +If non-zero, use 'line' coverage data to deduce the end line of each function +definition. This is useful when excluding certain functions from your coverage report. See the +.I \-\-erase\-functions, +.I \-\-filter trivial +and +.I \-\-show\-proportion +options. + +Default is 1. + +This option is not required if you are using gcc/9 or newer; these versions report function begin/end lines directly. + +Note that end lines are derived only for C/C++ files unless the +.I derive_function_end_lines_all_files +option is enabled; see the +.I c_file_extensions +setting, above, for the list of extensions used to identify C/C++ these files. +.br +Lambda functions are ignored during end line computation. Note that lambdas +are identified via function name matching - so you must enable demangling +if your toolchain is too old to report demangled names in the GCOV output. +See the +.I demangle_cpp +setting, above. + +For languages other than C/C++: end-line derivation may compute the wrong +value - +.I e.g., +in cases where there are lines of code in global scope following +some function definition. In this case, lcov will incorrectly associate the +following code with the preceding function. +.br +If this creates problems - for example, causes lcov to warn about inconsistent +coveage data - then there are several possible workarounds: + +.RS +.IP \- 3 +disable end-line derivation - +.I e.g., +via +.I "\-\-rc derive_function_end_line=0". +.PP +.IP \- 3 +exclude the offending code and/or then entire associated file. +.PP +.IP \- 3 +ignore the error message, +.I e.g., +via the +.I \-\-ignore\-errors +command line option +.PP +.IP \- 3 +disable coverage DB consistency checks - +.I e.g., +via +.I "\-\-rc check_data_consistency=0". +.PP +.RE +. +.PP + +.BR derive_function_end_line_all_files " =" +.IR 0 | 1 +.IP +If non-zero, derive end lines for all functions, regardless of source language. +By default, end lines are computed only in C/C++ files. +.br +This option has no effect if +.I derive_function_end_lines +is disabled. + +Default is 0 (disabled). +.PP + +.BR trivial_function_threshold " =" +.IR integer +.IP +Set the maximum size of function (in number of lines) which will be checked +by +.I \-\-filter trivial filter. +.br + +Default is 5. +.PP + +.BR excessive_count_threshold " =" +.IR number +.IP +Set the threshold for hit count that lcov deems excessive/unlikely/indicating +a bug somewhere in your toolchain. +.br For example, it is unlikely that your job can run long enough to rack up +tens of billions of hits. +.br Message type +.B excessive +is used to report potential issue - see the +.B genhtml(1), lcov(1), geninfo(1) +man pages. + +Default is not set. (Do not check for excessive counts.) +.PP + +. +.BR geninfo_gcov_tool " =" +.I path_to_gcov +.IP +Specify the location of the gcov tool (see +.BR gcov (1)) +which is used to generate coverage information from data files. +.br + +This option can be used multiple times - e.g., to add arguments to the gcov +callback. See the geninfo man page for details. + +.PP + +.BR geninfo_adjust_testname " =" +.IR 0 | 1 +.IP +If non\-zero, adjust test names to include operating system information +when capturing coverage data. +.br + +Default is 0. +.PP + +.BR forget_testcase_names " =" +.IR 0 | 1 +.IP +If non\-zero, ignore testcase names in .info file. +This may improve performance and reduce memory consumption if user does +not need per-testcase coverage summary in coverage reports. + +This is equivalent to the "\-\-forget\-test\-names" lcov/genhtml option. +.br + +Default is 0. +.PP + +.BR geninfo_checksum " =" +.IR 0 | 1 +.br +.BR checksum " =" +.IR 0 | 1 +.IP +If non\-zero, generate source code checksums when capturing coverage data. +Checksums are useful to prevent merging coverage data from incompatible +source code versions but checksum generation increases the size of coverage +files and the time used to generate those files. +.br + +The backward compatible +.I geninfo_checksum +option is deprecated. Please use +.I checksum +instead. The new option is preferred as it is more clear that the option is used by lcov and genhtml as well as geninfo, +.br + +This option can be overridden by the \-\-checksum and \-\-no\-checksum command line +options. +.br + +Default is 0. + +Note that this options is somewhat subsumed by the +.I version_script +option - which does something similar, but at the 'whole file' level. + +.PP + +.BR geninfo_compat_libtool " =" +.IR 0 | 1 +.IP +If non\-zero, enable libtool compatibility mode. When libtool compatibility +mode is enabled, lcov will assume that the source code relating to a .da file +located in a directory named ".libs" can be found in its parent directory. +.br + +This option corresponds to the \-\-compat\-libtool and \-\-no\-compat\-libtool +command line option of +.BR geninfo . +.br + +Default is 1. +.PP + +.BR geninfo_external " =" +.IR 0 | 1 +.IP +If non\-zero, capture coverage data for external source files. + +External source files are files which are not located in one of the directories +(including sub-directories) +specified by the \-\-directory or \-\-base\-directory options of +.BR lcov / geninfo . +Also see the +.I \-\-follow +option and the +.I geninfo_follow_symlinks +and +.I geninfo_follow_file_links +for additional path controls. + +Default is 1. +.PP + +.BR geninfo_capture_all " =" +.IR 0 | 1 +.IP +If non\-zero, capture coverage data from both runtime data files as +well as compile time data files which have no corresponding runtime data. +See the +.I \-\-all +flag description in +.B man(1) geninfo +for more information. + +Default is 0: do not process bare compile-time data files. +.PP + +.BR geninfo_external " =" +.IR 0 | 1 +.IP +If non\-zero, capture coverage data for external source files. + +External source files are files which are not located in one of the directories +(including sub-directories) +specified by the \-\-directory or \-\-base\-directory options of +.BR lcov / geninfo . +Also see the +.I \-\-follow +option and the +.I geninfo_follow_file_links +for additional path controls. + +Default is 1. +.PP + +.BR geninfo_follow_symlinks " =" +.IR 0 | 1 +.IP +Equivalent to the lcov/geninfo +.I \-\-follow +command line option. +See man +.B geninfo(1) +for details. + +Default is 0: do not modify follow symbolic links. +.PP + +.BR geninfo_follow_file_links " =" +.IR 0 | 1 +.IP +If non\-zero and the lcov/geninfo +.I \-\-follow +command line option is specified, then source file pathnames which contain +symlinks are resolved to their actual target. +Note that the parent directory of the link target may be considered 'external' +and thus be removed by the +.I \-\-no\-external +flag. + +Default is 0: do not modify pathnames. +.PP + + +.BR geninfo_gcov_all_blocks " =" +.IR 0 | 1 +.IP +If non\-zero, call the gcov tool with option --all-blocks. + +Using --all-blocks will produce more detailed branch coverage information for +each line. Set this option to zero if you do not need detailed branch coverage +information to speed up the process of capturing code coverage or to work +around a bug in some versions of gcov which will cause it to endlessly loop +when analyzing some files. + +Default is 1. +.PP + +.BR geninfo_unexecuted_blocks " =" +.IR 0 | 1 +.IP +If non\-zero, adjust the 'hit' count of lines which have attribute +.I "unexecuted_block": true +but which contain no branches and have a non-zero count. +Assume that these lines are not executed. + +Note that this option is effective only for gcov versions 9 and newer. + +Default is 0. +.PP + +.BR geninfo_compat " =" +.IR mode = value [, mode = value ,...] +.IP +Specify that geninfo should enable one or more compatibility modes +when capturing coverage data. + +This option corresponds to the \-\-compat command line option of +.BR geninfo . + +Default is 'libtool=on, hammer=auto, split_crc=auto'. +.PP + +.BR geninfo_adjust_src_path " =" +.IR pattern " => " replacement +.br +.BR geninfo_adjust_src_path " =" +.I pattern +.IP +Adjust source paths when capturing coverage data. + +Use this option in situations where geninfo cannot find the correct +path to source code files of a project. By providing a +.I pattern +in Perl regular expression format (see +.BR perlre (1)) +and an optional replacement string, you can instruct geninfo to +remove or change parts of the incorrect source path. + +.B Example: +.br + +1. When geninfo reports that it cannot find source file +.br + + /path/to/src/.libs/file.c +.br + +while the file is actually located in +.br + + /path/to/src/file.c +.br + +use the following parameter: +.br + + geninfo_adjust_src_path = /.libs + +This will remove all "/.libs" strings from the path. + +2. When geninfo reports that it cannot find source file +.br + + /tmp/build/file.c +.br + +while the file is actually located in +.br + + /usr/src/file.c +.br + +use the following parameter: +.br + + geninfo_adjust_src_path = /tmp/build => /usr/src +.br + +This will change all "/tmp/build" strings in the path to "/usr/src". + +The +.I adjust_src_path +option is similar to the +.I substitution = ... +option - which is somewhat more general and allows you to specify +multiple substitution patterns. Also see the +.I resolve_script +option. +.PP + +.BR source_directory " =" +.IR dirname +.IP + +Add 'dirname' to the list of places to look for source files. +Also see the +.I \-\-source\-directory +entry in the +.B lcov, geninfo, +and +.B genhtml +man pages. +.br + +For relative source file paths +.I e.g., +found in some +.IR tracefile +or in gcov output, +first look for the path from 'cwd' (where genhtml was +invoked) and +then from each alternate directory name in the order specified. +The first location matching location is used. + +This option can be specified multiple times, to add more directories to the source search path. + +Note that the command line option overrides the RC file entries (if any). +.PP + +.BR build_directory " =" +.IR dirname +.IP + +Add 'dirname' to the list of places to look for matching GCNO files (geninfo) or source file soft links (genhtml). +See the the +.I \-\-build\-directory +description in the +.B geninfo +and in the +.B genhtml +man page. +.br + +This option can be specified multiple times, to add more directories to the source search path. + +Note that the command line option overrides the RC file entries (if any). +.PP + +.BR geninfo_auto_base " =" +.IR 0 | 1 +.IP +If non\-zero, apply a heuristic to determine the base directory when +collecting coverage data. +.br + +Use this option when using geninfo on projects built with libtool or +similar build environments that work with multiple base directories, +i.e. environments, where the current working directory when invoking the +compiler is not the same directory in which the source code file is +located, and in addition, is different between files of the same project. +.br + +Default is 1. +.PP + +.BR geninfo_intermediate " =" +.IR 0 | 1 | auto +.IP +Specify whether to use gcov intermediate format +.br + +Use this option to control whether geninfo should use the gcov intermediate +format while collecting coverage data. The use of the gcov intermediate format +should increase processing speed. It also provides branch coverage data when +using the \-\-initial command line option. +.br + +Valid values are 0 for off, 1 for on, and "auto" to let geninfo automatically +use immediate format when supported by gcov. +.br + +Default is "auto". +.PP + +.BR geninfo_no_exception_branch " =" +.IR 0 | 1 +.br +.BR no_exception_branch " =" +.IR 0 | 1 +.IP +Specify whether to exclude exception branches from branch coverage. +Whether C++ exception branches are identified and removed is dependent on your compiler/toolchain correctly marking them in the generated coverage data. + +This option is used by lcov, geninfo, genhtml. + +The value +.I no_exception_branch = 1 +is equivalent to the +.I \-\-filter exception +command line option. + +The backward compatible +.I geninfo_no_exception_branch +option is deprecated. Please use +.I no_exception_branch +instead. The new option is preferred as it is more clear that the option is used by lcov and genhtml as well as geninfo, +.br + +Default is 0. +.PP + +.BR geninfo_chunk_size " =" +.IR integer [%] +.IP +Specify the number of GCDA files which should be processed per-call in each child process. +This parameter affects the balance of CPU time spent in the child and thus the number of completed child processes which are queued to be merged into the parent - which then affects the queuing delay. Higher queuing delay lowers the effective parallelism. + +The default is 80% of +.I total_number_of_gcda_files / maximum_number_of_parallel_children, +the average number of files expected to be processed by each child. +See the +.I \-\-parallel +entry in the +.B geninfo +man page. + +The argument may be either an integer value to be used as the chunk size or +a percentage of the average number files processed per child. + +This option has no effect unless the +.I \-\-parallel +option has been specified. + +.PP +.BR geninfo_interval_update " =" +.IR integer +.IP +Seet the percentage of GCDA files which should be processed between console/progress +updates. This setting may be useful for parameter tuning and debugging apparent performance issues. + +The default is 5%. + +This option has no effect unless the +.I \-\-parallel +option has been specified. + +.PP + +.BR lcov_filter_chunk_size " =" +.IR integer [%] +.IP +Specify the number of source files which should be processed per-call in each child process when applying coverpoint filters - see the +.BR filter = ... +parameter, below. +This parameter affects the balance of CPU time spent in the child and thus the number of completed child processes which are queued to be merged into the parent - which then affects the queuing delay. Higher queuing delay lowers the effective parallelism. + +The default is 80% of +.I total_number_of_source_files / maximum_number_of_parallel_children. + +The argument may be either an integer value to be used as the chunk size or +a percentage of the average number files processed per child. + +This option has no effect unless the +.I \-\-parallel +option has been specified and +.BR lcov_filter_parallel +is not zero. + +.PP + +.BR lcov_filter_parallel " = 0 | 1" +.IP + +This option specifies whether coverpoint filtering should be done serially or in parallel. If the number of files to process is very large, then parallelization may improve performance. + +This option has no effect unless the +.I \-\-parallel +option has been specified. + +The default is 1 (enabled). + + +.PP +.BR lcov_gcov_dir " =" +.I path_to_kernel_coverage_data +.IP +Specify the path to the directory where kernel coverage data can be found +or leave undefined for auto-detection. +.br + +Default is auto-detection. +.PP + +.BR lcov_tmp_dir " =" +.I temp +.IP +Specify the location of a directory used for temporary files. +.br + +Default is '/tmp'. +.PP + +.BR lcov_list_full_path " =" +.IR 0 | 1 +.IP +If non-zero, print the full path to source code files during a list operation. +.br + +This option corresponds to the \-\-list\-full\-path option of +.BR lcov . +.br + +Default is 0. +.PP + +.BR lcov_list_max_width " =" +.IR width +.IP +Specify the maximum width for list output. This value is ignored when +lcov_list_full_path is non\-zero. +.br + +Default is 80. +.PP + +.BR lcov_list_truncate_max +.B " =" +.IR percentage +.IP +Specify the maximum percentage of file names which may be truncated when +choosing a directory prefix in list output. This value is ignored when +lcov_list_full_path is non\-zero. +.br + +Default is 20. +.PP + +.BR function_coverage " =" +.IR 0 | 1 +.IP +Specify whether lcov/geninfo/genhtml should generate, process, and +display function coverage data. +.br + +Turning off function coverage by setting this option to 0 can +sligly reduce memory and CPU time consumption +when lcov is collecting and processing coverage data, as well as +reduce the size of the resulting data files. +.br + +This option can be overridden by the +.I \-\-function\-coverage +and +.I \-\-no\-function\-coverage +command line options. +.br + + +Backward-compatible RC options +.B lcov_function_coverage +and +.B genhtml_function_coverage +are supported but deprecated. Please use the new option instead. +.br + +Default is 1. +.PP + +.BR branch_coverage " =" +.IR 0 | 1 +.IP +Specify whether lcov/geninfo should generate, process, and display branch +coverage data. +.br + +Turning off branch coverage by setting this option to 0 can reduce +memory and CPU time consumption +when lcov is collecting and processing coverage data, as well as +reduce the size of the resulting data files. +.br + +This option can be overridden by the +.I \-\-branch\-coverage +and +.I \-\-no\-branch\-coverage + command line options. +.br + +Backward-compatible RC options +.B lcov_branch_coverage +and +.B genhtml_branch_coverage +are supported but deprecated. Please use the new option instead. +.br + + +Default is 0. +.PP + +.BR mcdc_coverage " =" +.IR 0 | 1 +.IP +Specify whether lcov/geninfo should generate, process, and display Modified +Condition / Decision Coverage (MC/DC) +coverage data. +.br + +Turning off MC/DC coverage by setting this option to 0 can reduce +memory and CPU time consumption +when lcov is collecting and processing coverage data, as well as +reduce the size of the resulting data files. +.br + +This option can be overridden by the +.I \-\-mcdc\-coverage + command line option. +.br + +Default is 0 (not enabled). + +.br +See the MC/DC section of man +.B genhtml(1) +for more details +.PP + +.BR lcov_excl_line " =" +.I expression +.IP +Specify the regular expression of lines to exclude. +Line, branch, and function coverpoints are associated with lines where this regexp is found are dropped. +.br + +There are at least 2 (moderately) common use cases for custom exclusion markers: +.br +- You are using multiple tools for coverage analysis, each of which has its own directives, and you don't want to complicate your source code with directives for each of them. +.br +- You want to exclude different regions/different types of code in different contexts - for example, to ignore or not ignore debug/trace code depending on your team. + +Default is 'LCOV_EXCL_LINE'. +.PP + +.BR lcov_excl_br_line " =" +.I expression +.IP +Specify the regular expression of lines to exclude from branch coverage. +Branch coverpoints are associated with lines where this regexp is found are dropped. (Line and function coverpoints are not affected.) +.br + +Default is 'LCOV_EXCL_BR_LINE'. +.PP + +.BR lcov_excl_exception_br_line " =" +.I expression +.IP +Specify the regular expression of lines to exclude from exception branch coverage. +Exception-related Branch coverpoints associated with lines where this regexp is found are dropped. (Line, function coverpoints are not affected. Branch coverpoints which are not associated with exceptions are also not affected.) + +Also see 'geninfo_no_exception_branch'; if nonzero, then all identified exception branches will be removed. + +.br +Note that this feature requires support from your compiler - and thus may not ignore all exception-related coverpoints. +.br + +Default is 'LCOV_EXCL_EXCEPTION_BR_LINE'. +.PP + +.BR lcov_excl_start " =" +.IR expression +.IP +Specify the regexp mark the start of an exception region +All coverpoints within exception regions are dropped. +.br + +Default is 'LCOV_EXCL_START'. + +.PP + +.BR lcov_excl_stop " =" +.IR expression +.IP +Specify the regexp mark the end of an exception region +.br + +Default is 'LCOV_EXCL_STOP'. + +.PP + +.BR lcov_excl_br_start " =" +.IR expression +.IP +Specify the regexp used to mark the start of a region where branch coverpoints are excluded. +Line and function coverpoints within the region are not excluded. +.br + +Default is 'LCOV_EXCL_BR_START'. + +.PP + +.BR lcov_excl_br_stop " =" +.IR expression +.IP +Specify the regexp used to mark the end of a region where branch coverpoints are excluded. +.br + +Default is 'LCOV_EXCL_BR_STOP'. + + +.PP + +.BR lcov_excl_exception_br_start " =" +.IR expression +.IP +Specify the regexp used to mark the start of a region where branch coverpoints associated with exceptions are excluded. +Line, function, and non-exception branch coverpoints within the region are not excluded. + +Also see 'geninfo_no_exception_branch'; if nonzero, then all identified exception branches will be removed. + +Note that exception branch coverpoint identification requires support from your compiler - and thus may not ignore all exception-related coverpoints. +.br + +Default is 'LCOV_EXCL_EXCEPTION_BR_START'. + +.PP + +.BR lcov_excl_exception_br_stop " =" +.IR expression +.IP +Specify the regexp used to mark the end of a region where exception-related branch coverpoints are excluded. +.br + +Default is 'LCOV_EXCL_EXECEPTION_BR_STOP'. + +.BR lcov_unreachable_line " =" +.I expression +.IP +Specify the regular expression of unreachable line which should be excluded from reporting, but should generate an "inconsistent" error if hit. +That is: we believe that the marked code is unreachable, so there is a bug in the code, the placement of the directive, or both if the "unreachable" code is executed. +Line, branch, and function coverpoints are associated with lines where this regexp is found are dropped. +.br + +Default is 'LCOV_UNREACHABLE_LINE'. +.PP + +.BR lcov_unreachable_start " =" +.IR expression +.IP +Specify the regexp mark the start of an unreachable code block. +All coverpoints within exception regions are dropped, but the tool will generate +an "inconsistent" error if any code in the block is executed. +.br + +Default is 'LCOV_UNREACHABLE_START'. + +.PP + +.BR lcov_unreachable_stop " =" +.IR expression +.IP +Specify the regexp mark the end of the unreachable code block. +.br + +Default is 'LCOV_UNREACHABLE_STOP'. + +.PP + +.BR fail_under_branches " =" +.I percentage +.IP +Specify branch coverage threshold: if the branch coverage is below this threshold, lcov/genhtml/geninfo will generate all the normal result files and messages, but will return a non-zero exit code. +.br + +This option is equivalent to the \-\-fail\-under\-branches lcov/genhtml/geninfo command line argument. See +.B man lcov(1) +for more detailes. + +.br +The default is 0 (no threshold). +.PP + +.BR retain_unreachable_coverpoints_if_executed " =" +.I [0 | 1] +.IP +Specify whether coverpoints in "unreachable" regions which are 'hit' are +dropped (0) - because the region is excluded - or retained (1) - because +the directive appears to be incorrect. +.br +See the "Exclusion markers" section in man +.B geninfo(1) +for more details. + + +The default is 1 (retain the coverpoints). +.PP + +.BR fail_under_lines " =" +.I percentage +.IP +Specify line coverage threshold to lcov. If the line coverage is below this threshold, lcov/genhtml/geninfo will generate all the normal result files and messages, but will return a non-zero exit code. +.br + +This option is equivalent to the \-\-fail\-under\-lines lcov/genhtml/geninfo command line argument. + +.br +The default is 0 (no threshold). + +.PP + +.BR profile " =" +.IR filename +.IP +If set, tells genhtml, lcov, or geninfo to generate some execution +time/profile data which can be used to motivate future optimizations. +It is equivalent to the +.I \-\-profile +command line option. + +If +.I filename +is empty, then the profile is written to the default location chosen by the application. + +.br +This option is used by genhtml, lcov, and geninfo. + +The default is unset: no data generated. + +.PP + +.BR parallel " =" +.IR integer +.IP +Tells genhtml, lcov, or geninfo the maximum number of simultaneous processes +to use. Zero means to use as many cores as are available on the machine. +The default is 1 (one) - which means to process sequentially (no parallelism). + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR memory " =" +.IR integer_Mb +.IP +Tells genhtml, lcov, or geninfo the maximum memory to use during parallel processing +operations. Effectively, the process will not fork() if this limit would be +exceeded. +Zero means that there is no limit. +The default is 0 (zero) - which that there is no explicit limit. + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR memory_percentage " =" +.IR number +.IP +Tells genhtml, lcov, or geninfo the maximum memory to use during parallel processing +operations. Maximum is computed as a percentage of the total memory +available on the system; for example, '75' would use limit to 75% of +total memory, whereas 150.5 would limit to 150.5% ( +.I i.e., +larger than the total available. +Effectively, the process will not fork() if this limit would be +exceeded. +Note that this value is used only if the maximum memory value is not +set explicitly - either by a the +.I \-\-memory +command line option or the +.I memory = integer +configuration file setting. + +The default is not not set. + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR max_fork_fails " =" +.IR integer +.IP +Tells genhtml, lcov, or geninfo the number of consecutive fork() failures +to ignore during +.I \-\-parallel +execution before giving up. +Note that genhtml/lcov/geninfo fail and stop immediately unless the +.I fork +error message ignored - either via the +.I ignore_errors +directive (above), the +.I \-\-ignore\-errors +command line option, or if +.I stop_on_error +is disabled or the +.I \-\-keep-going +command line option is used. + +The default fork failure maximum is 5. + +.PP + +.BR fork_fail_timeout " =" +.IR integer_seconds +.IP +Tells genhtml, lcov, or geninfo how long to wait after a fork() failure +before retrying. + +The default is 10 (seconds). + +.PP + +.BR max_tasks_per_core " =" +.IR integer +.IP +This is the maximum number of files that genhtml will handle in a single +child process during parallel execution. + +The default is 20. + +.PP + +.BR genhtml_date_bins " =" +.IR integer[,integer..] +.IP +This option is equivalent to the "genhtml \-\-date\-bins" option. +See man +.B genhtml(1) +for details. + +This option can be used multiple times in the lcovrc file to set multiple cutpoints. + +.PP + +.BR genhtml_datelabels " =" +.IR string[,string..] +.IP +This option is equivalent to the "genhtml \-\-date\-labels" option. +See man +.B genhtml(1) +for details. + +This option can be used multiple times in the lcovrc file to set multiple labels. +The number of labels should equal one greater than number of cutpoints. + +.PP + +.BR genhtml_annotate_script " =" +.IR path_to_executable | parameter +.IP +This option is equivalent to the "genhtml \-\-annotate\-script" option. + +This option can be used multiple times in the lcovrc file to specify both an annotation script and additional options which are passed to the script. + +See the genhtml man page for details. + +.PP + +.BR genhtml_annotate_tooltip " =" +.IR tooltip_string +.IP + +This option sets the 'tooltip' popup which appears if user hovers mouse over +the associated source code. +Note that the tooltop is generated only if the annotation-script callback +is successful and returns a commit ID other than "NONE". +Set +.I tooltip_string +to "" (empty string) to force genhtml to not produce the tooltip. + +Substitutions are performed on +.I tooltip_string: + +.IP " %C:" +commit ID (from annotate callback - see +.I --anotate-script +entry in the +.B genhtml +man page) +.IP " %U:" +commit author abbreviated name (returned by annotate callback) +.IP " %F:" +commit author full name (returned by annotate callback) +.IP " %D:" +commit date (as returned by annotate callback) +.IP " %d:" +commit date with time of day removed (i.e., date part only) +.IP " %A:" +commit age. +.IP " %l" +source line number. + +.PP + + +.BR context_script " =" +.IR path_to_executable_or_module | parameter +.IP +This option is equivalent to the +.I \-\-context\-script +option of genhtml/lcov/geninfo + +This option can be used multiple times in the lcovrc file to specify both a criteria script and additional options which are passed to the script. + +See the genhtml man page for details. + +.PP + + + +.BR criteria_script " =" +.IR path_to_executable_or_module | parameter +.IP +This option is equivalent to the +.I \-\-criteria\-script +option of genhtml/lcov/geninfo + +This option can be used multiple times in the lcovrc file to specify both a criteria script and additional options which are passed to the script. + +See the genhtml man page for details. + +.PP + +.BR criteria_callback_data " =" +.IR comma_separated_list +.IP +This option is used to tell genhtml whether you want date and/or owner summary +data passed back to your criteria callback. +Note that summary data is always passed. + +Note that lcov and geninfo do not record date or owner data - and so do not pass +it to the callback. + +This option can be used multiple times in the lcovrc file to specify both date and owner data should be returned, or you can specify both in a comma-separated list. +Date and/or owner data will be returned if and only if your genhtml command +has enabled annotation. + +If this option is appears multiple times in the lcovrc file; the values are combined to form the list of binning types which are passed to your callback. + +See the genhtml man page for details. + +.PP + +.BR criteria_callback_levels " =" +.IR comma_separated_list +.IP +This option is used to tell genhtml whether criteria callbacks should occur +at the top, directory, or file level. + +If this option is appears multiple times in the lcovrc file, the values are combined to form the list of report levels when your callback will be executed. + +See the genhtml man page for details. + +.PP + +.BR check_existence_before_callback " =" +.IR 0 | 1 +.IP +This option configures the tool to check that the file exists before calling +the +.I annotate-script +or +.I version-script +callback. If set and file does not exist, a +.B source +error is triggered. (Note that the error may be ignored - see the +.I \-\-ignore\-error +option.) + +You may want to NOT check for file existence if your callback looks +up information in a non-local repository. + +The default is 1 (check for file existence). + +.PP + +.BR compute_file_version " =" +.IR 0 | 1 +.IP +This option is used to tell the tool to generate missing file version +information when reading a .info (coverage data) file. +Version information may be missing because the data was generated by a tool which did not support versioning, or because the data was generated without the required +.I \-\-version\-script +argument - or for some other reason. + +Note that this option has no effect without a version\-script callback - +defined by either the +.I \-\-version\-script +command line option or the +.I version_script +config file option. + +The default is 0: do not generate missing information. + +.PP + +.BR version_script " =" +.IR path_to_executable | parameter +.IP +This option is equivalent to the geninfo/lcov/genhtml "\-\-version\-script" option. + +This option can be used multiple times in the lcovrc file to specify both a version script and additional options which are passed to the script. + +See the genhtml man page for details. + +.PP + +.BR resolve_script " =" +.IR path_to_executable | parameter +.IP +This option is equivalent to the geninfo/lcov/genhtml "\-\-resolve\-script" option. + +This option can be used multiple times in the lcovrc file to specify both a resolve script and additional options which are passed to the script. + +The resolve script provides a mechanism to find a +source or data file that cannot be found by simply modify paths via substitution +patterns (see +.I "substitute = replace_regexp" +above) and searching along the corresponding directory list: +.RS +.IP +.B geninfo: +the +.I "'build_directory = dirname'" +config file entry +or +.I \-\-build\=directory +command line option, used to search for GCNO files, +.PP +.IP +.B geninfo/genhtml/lcov: +the +.I "'source_directory = dirname'" +config file entry +or +.I \-\-source\=directory +command line option, used to search for source files. +.PP +.RE + +.RS +The resolve script is called as: +.IP +.B resolve_script +[callback_args] +.I " file_name" +.PP + +or +.IP +.I $resolve_callback = +.B resolve_module +.I ->new([callback_args]) +.PP +to initialize the callback, then +.IP +.I $resolve_callback-> +.B resolve +.I (file_name) +.PP +to find the actual file location. + +If necessary, the callback can check the suffix of the filename to determine +whether it should look for either a source or data file. +.PP + +The script should return either empty string (file not found/no such file) or the actual +path name. The returned path may be either absolute or relative to CWD. +.RE +.PP + +.BR select_script " =" +.IR path_to_executable | parameter +.IP +This option is equivalent to the genhtml "\-\-select\-script" option. + +This option can be used multiple times in the lcovrc file to specify both a select script and additional options which are passed to the script. + +The select script provides a mechanism to decide whether a particular +source line is interesting - whether it should be included in the +generated +coverage report - or not. + +Lines which are not selected but fall within +.I num_context_lines +of a selected line are also included in the report. See below. + +Note that selection is fundamentally intended to show regions of code with some surrounding context. It might not do what you expect if there is no code - e.g., if the region of interest has been compiled out via compiler or exclusion directives. +For example: when selecting based on SHA or changelist ID, an inserted comment will not be selected unless it is within +.I num_context_lines +of an inserted or changed line of code. + +The select script is called as: + +.B " select_script" +[callback_args] +.I lineDataJson annotateDataJson fileName lineNumber + +or as: + +.I " $selectCallback =" +.B select_module +.I ->new([callback_args]) + +to initialize the callback object, then as + +.I " " $selectCallback +.B select +.I (lineDataRef annotateDataRef fileName lineNumber) + +.RS +to determine selection, +where +.IP \- 3 +.I fileName +is the name of the source file and +.PP +.IP \- 3 +.I lineNumber +is the source file line number, indexed from zero, +.PP +.IP \- 3 +.I lineDataJson +is a json-encoded LineData structure (see the lcovutil.pm source code), and +.PP +.IP \- 3 +.I annotateDataJson +is the json-encoded data returned by your +.I annotate\-script +(see the +.I \-\-annotate\-script +parameter in man +.B genhtml(1). +), or the empty string if there are no annotations for this file. +.PP +The module callback is similar except that is is passed objects rather than JSON encodings of the objects. +.RE + +The script should return "1" or "0". + +See example implementation +.I $LCOV_HOME/share/lcov/support-scripts/select.pm. +.RE + + +.PP + +.BR num_context_lines " = " +.IR integer +.IP +Set the number of lines around each selected line which is included in the +report - see +.I select_script = ... +above and the +.I \-\-select\-script +command line option in man +.B genhtml(1). +.PP + +.BR filter " =" +.IR str[,str...] +.IP + +This option is equivalent to the \-\-filter option to geninfo, lcov, and genhtml. +See the genhtml man page for details. + +This option can be used multiple times in the lcovrc file to enable multiple filters. +The filters specified in the lcovrc file are appended to the list specified on the command line. + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR exclude " =" +.IR glob_pattern +.IP + +This option is equivalent to the \-\-exclude option to geninfo, lcov, and genhtml. +See the lcov man page for details.; + +This option can be used multiple times in the lcovrc file to specify multiple patterns to exclude. +The patterns specified in the lcovrc file are appended to the list specified on the command line. + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR include " =" +.IR glob_pattern +.IP + +This option is equivalent to the \-\-include option to geninfo, lcov, and genhtml. +See the lcov man page for details.; + +This option can be used multiple times in the lcovrc file to specify multiple patterns to include. +The patterns specified in the lcovrc file are appended to the list specified on the command line. + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR simplify_script " =" +.IR path_to_executable | parameter +.IP + +This option is equivalent to the genhtml +.I \-\-simplify\-script + option. +This option can be used multiple times in the lcovrc file to specify both a simplify script and additional options which are passed to the script. + +See man +.B genhtml(1) +for details. + +.PP + +.BR substitute " =" +.IR regexp +.IP + +This option is equivalent to the \-\-substitute option to geninfo, lcov, and genhtml. +See the lcov man page for details.; + +This option can be used multiple times in the lcovrc file to specify multiple substitution patterns. +If patterns are specified on both the command line and in the lcovrc file, then +the command line patterns are used and the lcovrc patterns are dropped. + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR omit_lines " =" +.IR regexp +.IP + +This option is equivalent to the \-\-omit\-lines option to geninfo, lcov, and genhtml. +See the genhtml man page for details. + +This option can be used multiple times in the lcovrc file to specify multiple patterns to exclude. +The patterns specified in the lcovrc file are appended to the list specified on the command line. + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR erase_functions " =" +.IR regexp +.IP + +This option is equivalent to the \-\-erase\-functions option to geninfo, lcov, and genhtml. +See the genhtml man page for details. + +This option can be used multiple times in the lcovrc file to specify multiple patterns to exclude. +The patterns specified in the lcovrc file are appended to the list specified on the command line. + +.br +This option is used by genhtml, lcov, and geninfo. + +.PP + +.BR lcov_json_module " =" +.IR module | auto +.IP +Specify the JSON module to use, or choose best available from a set of +alternatives if set to 'auto'. Note that some JSON modules are slower than +others (notably JSON::PP can be very slow compared to JSON::XS). +.br + +Default is 'auto'. + +.PP + +.BR split_char " =" +.IR char +.IP +Specify the character (or regexp) used to split list-like parameters which have +been passed as a single string. +This parameter is useful in the case that you need want to use a multi-option +string but one or more of the options contains a comma character which would +otherwise be seen as a delimiter. +.br + +Default is ',' (comma - no quotes). + +.PP + +.BR scope_regexp " =" +.IR regexp +.IP +Print debug messages for data in filenames which match +.I regexp. +.br +Only certain categories of message are logged; the set changes from time +to time - depending on debug need. + +.PP + +.BR case_insensitive " =" +.IR [0|1] +.IP +Specify whether string comparison is case insensitive when finding matching +filenames, checking include/exclude directives, etc. +.br + +Note that mixed-case or lower-case pathnames may be passed to your \-\-version\-script and \-\-annotate\-script callbacks when case-insensitive matching is used. Your callbacks must handle potential differences in case. + +Default is '0': case sensitive matching. + +.PP + +.BR sort_input " =" +.IR [0|1] +.IP +Specify whether to sort file names before capture and/or aggregation. +Sorting reduces certain types of processing order-dependent output differences - +.I e.g., +due to ambiguities in branch data generated by gcc. +.br + +Default is '0': no sorting - process files in the order they were specified on the command line and/or were found during traversal of the filesystem. + +.PP + + .SH FILES .TP -.I /etc/lcovrc +.I $LCOV_HOME/etc/lcovrc The system\-wide .B lcov configuration file. diff --git a/rpm/lcov.spec b/rpm/lcov.spec index 9eca78bd..591cb163 100644 --- a/rpm/lcov.spec +++ b/rpm/lcov.spec @@ -1,20 +1,37 @@ -Summary: A graphical GCOV front-end +Summary: A graphical code coverage front-end Name: lcov -Version: 1.15 +Version: 2.4 Release: 1 License: GPLv2+ Group: Development/Tools -URL: http://ltp.sourceforge.net/coverage/lcov.php -Source0: http://downloads.sourceforge.net/ltp/%{name}-%{version}.tar.gz +URL: https://github.com/linux-test-project/lcov +Source0: https://github.com/linux-test-project/%{name}/releases/download/v%{version}/%{name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-root BuildArch: noarch Requires: perl >= 5.8.8 +Prefix: /usr +Prefix: /etc + +# Force older/more compatible payload compression and digest versions +%define _binary_filedigest 1 +%define _binary_payload w9.gzdio +%global __python %{__python3} + +# lcov Perl modules are not intended for use by other packages +%define __requires_exclude ^perl\\(lcovutil\\)$|^perl\\((criteria)\\)$|^perl\\((annotateutil)\\)$|^perl\\((gitblame)\\)$|^perl\\((gitversion)\\)$|^perl\\((select)\\)$|^perl\\((p4annotate)\\) +%define __provides_exclude ^perl.*$ + +%define _binaries_in_noarch_packages_terminate_build 0 %description -LCOV is a graphical front-end for GCC's coverage testing tool gcov. It collects -gcov data for multiple source files and creates HTML pages containing the -source code annotated with coverage information. It also adds overview pages -for easy navigation within the file structure. +LCOV is a set of command line tools that can be used to collect, process, and +visualize code coverage data in an easy-to-use way. It aims to be suitable for +projects of a wide range of sizes, with particular focus on deployment in +automated CI/CD systems and large projects implemented using multiple languages. + +LCOV works with existing environment-specific profiling mechanisms including, +but not limited to, the gcov tool that is part of the GNU Compiler Collection +(GCC). %prep %setup -q -n %{name}-%{version} @@ -32,7 +49,9 @@ rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root) /usr/bin/* +/usr/lib/* /usr/share/man/man*/* +/usr/share/lcov/* %config /etc/* %changelog diff --git a/scripts/P4version.pm b/scripts/P4version.pm new file mode 100644 index 00000000..8a572ff5 --- /dev/null +++ b/scripts/P4version.pm @@ -0,0 +1,250 @@ +#!/usr/bin/env perl +# Copyright (c) MediaTek USA Inc., 2022-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# P4version.pm [--md5] [--allow-missing] [--local-edit] [--prefix path] depot_path +# +# Called as: +# $callback = P4version->new(@args) : constructor +# $version = $callback->version($filepath) : extract version +# $callback->compare_version($version1, $version2, $filepath) : compare versions +# +# Options: +# depot_path: +# Root of P4 repository +# --allow-missing +# If set, do not error out if called with file which is not in git. +# Default is to error out. +# --local-edit +# Look for - and support - local edit +# --prefix +# If specified, 'path' is prependied to 'pathname' (as 'path/pathname') +# before processing. +# --md5 +# Return MD5 signature for files that are not in git + +# This is a sample script which uses p4 commands to determine +# the version of the filename parameter. +# Version information (if present) is used during ".info" file merging +# to verify that the data the user is attempting to merge is for the same +# source code/same version. +# If the version is not the same - then line numbers, etc. may be different +# and some very strange errors may occur. + +package P4version; + +use strict; +use POSIX qw(strftime); +use File::Spec; +use Cwd qw(abs_path); +use File::Basename qw(dirname basename); +use Getopt::Long qw(GetOptionsFromArray); + +use annotateutil qw(get_modify_time compute_md5); + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new extract_version compare_version); + +use constant { + ALLOW_MISSING => 0, + LOCAL_EDIT => 1, + MD5 => 2, + PREFIX => 3, + DEPOT => 4, + HASH => 5, + + FULLNAME => 0, + DEPOT_PATH => 1, + TRIMMED => 2, + VERSION => 3, +}; + +sub usage +{ + my $exe = shift; + #$exe = basename($exe); + if (@_) { + print(STDERR "ERROR:\n $exe ", join(' ', @_), "\n"); + } + print(STDERR< \$use_md5, + '--prefix:s' => \$prefix, + '--allow-missing' => \$allow_missing, + '--local-edit' => \$local_edit, + '--help' => \$help) || + $help || + scalar(@_) > 1 + ) { + usage($script, @args); + exit(defined($help) ? 0 : 1) if ($script eq $0); + return undef; + } + my $depot = ''; + my $cd = ''; + my $dots = '...'; + if (@_) { + $depot = $_[0]; + die("depot root '$depot' is not a directory") unless -d $depot; + $cd = "cd $depot ; "; + $dots = '/...'; + } + my $root = Cwd::abs_path($depot ? $depot : '.'); + my $len = length($root); + + my %filehash; + open(P4, '-|', "$cd p4 have $depot$dots") or + die("unable to execute 'p4 have': $!"); + while () { + if (/^(.+?)#([0-9]+) - (.+)$/) { + my $depot_path = $1; + my $version = $2 ? "#$2" : '@head'; + my $filename = $3; + next unless -e $filename; # filename has ben deleted + my $full = Cwd::abs_path($filename); + die("unexpected depot filename $filename") + unless $root eq substr($filename, 0, $len); + my $trimmed = substr($filename, $len); + die("unexpected duplicate $trimmed") if exists($filehash{$trimmed}); + my $data = [$full, $depot_path, $trimmed, $version]; + $filehash{$trimmed} = $data; + $filehash{$depot_path} = $data; + next if $full eq $trimmed; + die("unexpected duplicate '$full' for '$filename'") + if exists($filehash{$full}); + $filehash{$full} = $data; + } else { + die("unexpected p4 have line '$_'"); + } + } + unless (close(P4)) { + # there might not be a repo here.. + die("error on close 'p4 have' pipe: $!") if %filehash; + $depot = '.' unless $depot; + lcovutil::ignorable_error($lcovutil::ERROR_USAGE, + "'$depot' seems to not be a perforce repo."); + goto done; + } + + open(WHERE, '-|', "$cd p4 where $depot") or + die("unable to execute p4 where: $!"); + my ($depot_path, $workspace_path, $workspace_dir); + while () { + if (/^(\S+)\s*(\S+)\s*(\S+)$/) { + $depot_path = substr($1, 0, -4); # remove the '/...' tail + $workspace_path = substr($2, 0, -4); + $workspace_dir = substr($3, 0, -4); + last; + } + } + close(WHERE) or die("error on close 'p4 where' pipe: $!"); + + # check for local edits... + open(EDITS, '-|', "$cd p4 opened $depot$dots") or + die("unable to execute p4 opened: $!"); + while () { + if ( + /^(.+?)(#[0-9]+) - (edit|add|delete) (default change|change (\S+)) /) + { + # file is locally edited...append modify time or MD5 signature to the version ID + my $data; + if (exists($filehash{$1})) { + die("unexpected 'add' state") if 'add' eq $3; + $data = $filehash{$1}; + } else { + die("unexpected 'add' state") unless 'add' eq $3; + my $trimmed = substr($1, length($depot_path)); + my $full_name = $workspace_dir . $trimmed; + $data = [$full_name, $1, $trimmed, '#add']; + $filehash{$full_name} = $data; + $filehash{$1} = $data; + $filehash{$trimmed} = $data unless $trimmed eq $full_name; + } + if (!$local_edit) { + die("$1$2 has local changes - see '--local-edit' flag"); + } + my $fullpath = $data->[FULLNAME]; + my $version = $1 + . + ($use_md5 ? (' md5:' . compute_md5($fullpath)) : + (' edited ' . get_modify_time($fullpath))); + $data->[VERSION] = $version; + } else { + die("unexpected 'p4 opened' line '$_'"); + } + } + close(EDITS) or die("error on close 'p4 opened' pipe: $!"); + + done: + my $self = + [$allow_missing, $local_edit, $use_md5, $prefix, $depot, \%filehash]; + return bless $self, $class; +} + +sub extract_version +{ + my ($self, $filename) = @_; + + if (!File::Spec->file_name_is_absolute($filename) && + defined($self->[PREFIX])) { + $filename = File::Spec->catfile($self->[PREFIX], $filename); + } + + unless (-e $filename) { + if ($self->[ALLOW_MISSING]) { + return ''; # empty string + } + die("Error: $filename does not exist - perhaps you need the '--allow-missing' flag" + ); + } + my $pathname = abs_path($filename); + + return $self->[HASH]->{$pathname}->[VERSION] + if (exists($self->[HASH]->{$pathname})); + + # not in P4 - just print the modify time, so we have a prayer of + # noticing file differences + my $version = $self->[MD5] ? ('md5:' . compute_md5($pathname)) : + get_modify_time($pathname); + return $version; +} + +sub compare_version +{ + my ($self, $new, $old, $filename) = @_; + + return ($old ne $new); # for the moment, just look for exact match +} + +1; diff --git a/scripts/analyzeInfoFiles b/scripts/analyzeInfoFiles new file mode 100755 index 00000000..8d99caef --- /dev/null +++ b/scripts/analyzeInfoFiles @@ -0,0 +1,571 @@ +#!/usr/bin/env perl + +# a bit of a hack: +# Look through a set of .info files +# - assumption is that all the files are for an identical code base +# Find places where the coverpoints in the files differ: +# - one file contains source that is not in the other +# - source from one file contains coverpoints not found in the other + +use strict; +use warnings; +use Getopt::Long; +use FindBin; + +use lib "$FindBin::RealBin/../lib"; +use lib "$FindBin::RealBin/../../../lib"; # path from 'support-scripts' + +use lcovutil; + +our ($keep_going, $dontDrop, $compact, $sortBySize, $reportAgreeBlocks, + $verbose); + +our @infoFileArray; + +package Region; + +sub new +{ + my ($class, $start, $finish, $in, $out) = @_; + + my $self = [$start, $finish, $in, $out]; + bless $self, $class; + return $self; +} + +sub start +{ + my $self = shift; + return $self->[0]; +} + +sub finish +{ + my $self = shift; + return $self->[1]; +} + +sub size +{ + my $self = shift; + return $self->[1] - $self->[0] + 1; +} + +sub consistent +{ + my $self = shift; + return !(scalar(@{$self->[2]}) && scalar(@{$self->[3]})); +} + +sub in +{ + my $self = shift; + return $self->[2]; +} + +sub out +{ + my $self = shift; + return $self->[3]; +} + +sub buildCodeKey +{ + my $self = shift; + my $code = $self->[2]; + my $notcode = $self->[3]; + my $key = ''; + for (my $i = 0; $i < $main::numInfoFiles; ++$i) { + #my $name = $main::infoFileArray[$i]; + my $char = $i ~~ @{$code} ? '1' : ($i ~~ @$notcode ? '_' : '.'); + $key .= $char; + } + return $key; +} + +sub toNames +{ + my $list = shift; + my @rtn; + foreach my $idx (@$list) { + push(@rtn, $main::infoFileArray[$idx]); + } + return @rtn; +} + +sub print +{ + my ($self, $dest) = @_; + + my ($first, $last, $code, $nonCode) = @$self; + + print($dest "$first : $last:"); + + if ($main::compact) { + print($dest ' ', $self->buildCodeKey(), ' ', $self->size(), " line", + $self->size() == 1 ? '' : 's', "\n"); + } else { + print($dest " ", $self->size(), " line", + $self->size() == 1 ? '' : 's', "\n"); + if (%$code) { + if ($main::verbose) { + print($dest "\tcode:\n\t\t" . + join("\n\t\t", toNames($code)) . "\n"); + } else { + print($dest "\tcode: " . join(' ', @$code) . "\n"); + } + } + if (%$nonCode) { + if ($main::verbose) { + print($dest "\tnot code:\n\t\t" . + join("\n\t\t", toNames($nonCode)) . "\n"); + } else { + print($dest "\tnot code: " . join(' ', @$nonCode) . "\n"); + } + } + } +} + +package FileData; + +sub new +{ + my ($class, $filename) = @_; + + # Data: filename, [TraceInfo, # info for this file + # list of [traceInfo, infoFileIndex] for each .info file containing this file + # list of regions in order: [first, last, [in], [out]] + my $self = [$filename, [], []]; + bless $self, $class; + + return $self; +} + +sub name +{ + my $self = shift; + return $self->[0]; +} + +sub traces +{ + # return list of [TraceInfo, infoFileIndex] for every .info file containing + # this source file + my $self = shift; + return $self->[1]; +} + +sub regions +{ + # return list of [first last in out] for every identified region + my $self = shift; + return $self->[2]; +} + +sub regionsBySize +{ + my $self = shift; + return + sort({ $b->size() <=> $a->size() or + $a->start() <=> $b->start() } @{$self->regions()}); +} + +sub totalRegionSize +{ + my $self = shift; + my $size = 0; + foreach my $r (@{$self->regions()}) { + $size += $r->size(); + } + return $size; +} + +sub printCodeGroups +{ + my ($code, $nonCode, $dest) = @_; + if ($main::compact) { + print($dest ' ', buildCodeKey($code, $nonCode, $main::numInfoFiles), + "\n"); + } else { + print($dest "\n"); + if (%$code) { + if ($main::verbose) { + print($dest "\tcode:\n\t\t" . + join("\n\t\t", hashToNames($code)) . "\n"); + } else { + print($dest "\tcode: " . join(' ', sort keys(%$code)) . "\n"); + } + } + if (%$nonCode) { + if ($main::verbose) { + print($dest "\tnot code:\n\t\t" . + join("\n\t\t", hashToNames($nonCode)) . "\n"); + } else { + print($dest "\tnot code: " . + join(' ', sort keys(%$nonCode)) . "\n"); + } + } + } +} + +sub print +{ + my ($self, $dest) = @_; + $dest = \*STDOUT unless defined($dest); + + my $title = + $self->name() . ":\n total: " . $self->totalRegionSize() . "\n"; + + foreach + my $r ($main::sortBySize ? $self->regionsBySize() : @{$self->regions()}) + { + print($dest $title); + $r->print($dest); + $title = ''; + } + return $title eq ''; # return non-zero if I printed something +} + +sub checkLineCoverageConsistency +{ + my $self = shift; + + my $traces = $self->traces(); + return 1 + if (scalar(@$traces) < 2); # nothing to check..only one set of data + + # first, collect line coverage data for all the .info files + # does everyone agree that this line is code or is not code? + my @lineCovData; + foreach my $t (@$traces) { + my ($traceInfo, $idx) = @$t; + my $d = $traceInfo->sum(); + push(@lineCovData, [$d, $idx]); + } + my $srcfile = $self->name(); + my $consistent = 1; + + my $numLines; + if (-f $srcfile) { + ($numLines) = split(' ', `wc -l $srcfile`); + } else { + # hard way: look through all the data to find the highest line number + $numLines = 0; + foreach my $lineData (@lineCovData) { + my @lines = sort $lineData->[0]->keylist(); + my $largest = $lines[-1]; + $numLines = $largest + if $largest > $numLines; + } + } + my $currentGroupStart; + my %currentCodeGroup; + my %currentNonCodeGroup; + + for (my $lineNo = 1; $lineNo < $numLines; ++$lineNo) { + my %codeGroup; + my %nonCodeGroup; + foreach my $lineData (@lineCovData) { + my ($d, $infoFileIdx) = @$lineData; + if (defined($d->value($lineNo))) { + $codeGroup{$infoFileIdx} = 1; + } else { + $nonCodeGroup{$infoFileIdx} = 1; + } + } + if (!defined($currentGroupStart)) { + $currentGroupStart = $lineNo; + %currentNonCodeGroup = %nonCodeGroup; + %currentCodeGroup = %codeGroup; + next; + } + if (!%codeGroup || + !%nonCodeGroup) { + # everyone agrees that this is is is not code.. + if (scalar(%currentCodeGroup) && + scalar(%currentNonCodeGroup)) { + + # there was disagreement before, but now we agree + my $region = Region->new($currentGroupStart, + $lineNo - 1, + [sort(keys %currentCodeGroup)], + [sort(keys %currentNonCodeGroup)]); + push(@{$self->regions()}, $region); + + #print($title, ' '); + #$title = ''; + #$region->print(\*STDOUT); + $currentGroupStart = $lineNo; + $consistent = 0; + } + %currentCodeGroup = %codeGroup; + %currentNonCodeGroup = %nonCodeGroup; + } else { + # we have some disagreement about whether this is code or not + # - is the 'in' and 'out' group the same as it was before? + if (!(hashIsSame(\%currentCodeGroup, \%codeGroup) && + hashIsSame(\%currentNonCodeGroup, \%nonCodeGroup))) { + if (defined($main::reportAgreeBlocks) || + (%currentCodeGroup && + %currentNonCodeGroup) + ) { + + my $region = + Region->new($currentGroupStart, + $lineNo - 1, + [sort(keys %currentCodeGroup)], + [sort(keys %currentNonCodeGroup)]); + push(@{$self->regions()}, $region); + #print($title, ' '); + #$title = ''; + #$region->print(\*STDOUT); + } + %currentNonCodeGroup = %nonCodeGroup; + %currentCodeGroup = %codeGroup; + $currentGroupStart = $lineNo; + $consistent = 0; + } + } + } + if (%currentCodeGroup && + %currentNonCodeGroup) { + $consistent = 0; + } + # got to end of file.. + if ($reportAgreeBlocks || (%currentCodeGroup && %currentNonCodeGroup)) { + my $region = Region->new($currentGroupStart, + $numLines, + [sort(keys %currentCodeGroup)], + [sort(keys %currentNonCodeGroup)]); + push(@{$self->regions()}, $region); + #print($title, " "); + #$region->print(\*STDOUT); + } + return $consistent; +} + +sub hashIsSame +{ + my ($hash1, $hash2) = @_; + + while (my ($k, $v) = each(%$hash1)) { + return 0 unless exists($hash2->{$k}); + } + while (my ($k, $v) = each(%$hash2)) { + return 0 unless exists($hash1->{$k}); + } + return 1; +} + +package main; + +my $help; + +my $err = !GetOptions("verbose|v" => \$verbose, + 'substitute=s' => \@lcovutil::file_subst_patterns, + 'exclude=s' => \@lcovutil::exclude_file_patterns, + 'include=s' => \@lcovutil::include_file_patterns, + "keep-going" => \$keep_going, + "drop" => \$dontDrop, + "all" => \$reportAgreeBlocks, + "compact" => \$compact, + "sort" => \$sortBySize, + "help|h" => \$help,); + +if ($err) { + print(STDERR "$0: invalid argument:\n"); +} +if ($err || $help) { + if (!$err) { + print("Check for consistency in set of .info files\n"); + } + my $dest = $help ? \*STDOUT : \*STDERR; + print($dest <) { + chomp($_); + next + if ($_ eq '' || + $_ =~ /\s*#/); # comment character + push(@infoFileArray, $_); + } + close(INPUT); + } +} + +my %infoFiles; +my %sourceFiles; + +our $numInfoFiles = scalar(@infoFileArray); + +my $status = 0; + +my $idx = -1; +print("Info file mapping:\n"); +foreach my $f (@infoFileArray) { + $idx++; + print(" $idx: $f\n"); +} +print("\n\n"); + +$idx = -1; +foreach my $f (@infoFileArray) { + my $info = TraceFile->load($f); + $infoFiles{$f} = [$info, ++$idx]; + # and collect list of source files that appear in each + foreach my $src ($info->files()) { + my $sourceFileData; + if (!exists($sourceFiles{$src})) { + $sourceFileData = FileData->new($src); + $sourceFiles{$src} = $sourceFileData; + } else { + $sourceFileData = $sourceFiles{$src}; + } + + my $traces = $sourceFileData->traces(); + my $fInfo = $info->data($src) + ; # the TraceInfo for this source, in this .info file + if (@$traces) { + # this isn't the first time we see this source file - check for version mismatch + my $fv = $fInfo->version(); + my $v = $traces->[0]->[0]->version(); + if ((defined($fv) && !defined($v)) || + (!defined($fv) && defined($v)) || + (defined($fv) && defined($v) && $fv ne $v)) { + # versions don't match - so don't bother to check for matching + # line coverage data + print("Error: version mismatch for $src between:\n\t" . + $traces->[0]->[1] . + ": " . ($v ? $v : 'undef') . "\n\t" . $f . ": " . + ($fv ? $fv : 'undef') . "\n"); + $status = 1; + # version mismatch - so don't bother checking data for this file + next; + } + } + push(@$traces, [$fInfo, $idx]); + } +} + +# we have a list of all the source files now... +# check that all of them appear in every .info file we read +my %missing; +foreach my $filename (sort keys %sourceFiles) { + my $data = $sourceFiles{$filename}; + my $traces = $data->traces(); + if (scalar(@$traces) != $numInfoFiles) { + $status = 1; + my %names; # which info files DO contain this source file? + foreach my $d (@$traces) { + $names{$d->[1]} = 1; + } + print("Error: Source file '$filename' missing from:\n") + if $verbose; + my $idx = -1; + foreach my $f (@infoFileArray) { + ++$idx; + next if exists($names{$idx}); + print("\t$f\n") + if $verbose; + if (exists($missing{$f})) { + push(@{$missing{$f}}, $filename); + } else { + $missing{$f} = [$filename]; + } + } + } +} + +if (%missing) { + print("Files missing from .info data:\n"); + + foreach my $f (sort keys %missing) { + print("\t$f:\n"); + foreach my $src (@{$missing{$f}}) { + print("\t\t$src\n"); + # and don't look at this file + delete $sourceFiles{$src} unless $dontDrop; + } + } +} + +exit($status) unless defined($keep_going) || $status == 0; + +# now go through the source files to check that line coverpoints are the same +# in all of them +foreach my $srcfile (sort keys %sourceFiles) { + + my $srcData = $sourceFiles{$srcfile}; + if (!$srcData->checkLineCoverageConsistency()) { + $status = 1; + } +} + +# now sort the data and print it.. +my @fileOrder = + $sortBySize ? + sort({ $sourceFiles{$b}->totalRegionSize() + <=> $sourceFiles{$a}->totalRegionSize() or + $a cmp $b } keys(%sourceFiles)) : + sort(keys %sourceFiles); + +foreach my $srcFile (@fileOrder) { + my $printed = $sourceFiles{$srcFile}->print(\*STDOUT); + print("\n") if $printed; +} + +exit($status); diff --git a/scripts/annotateutil.pm b/scripts/annotateutil.pm new file mode 100644 index 00000000..1435b12f --- /dev/null +++ b/scripts/annotateutil.pm @@ -0,0 +1,172 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2020-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# annotateutil.pm: some common utilities used by sample 'annotate' scripts +# +package annotateutil; + +use strict; +use POSIX qw(strftime); +use Cwd qw(abs_path); + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(get_modify_time compute_md5 not_in_repo + resolve_cache_dir find_in_cache store_in_cache + call_annotate call_get_version); + +sub get_modify_time($) +{ + my $filename = shift; + my @stat = stat $filename; + my $tz = strftime("%z", localtime($stat[9])); + $tz =~ s/([0-9][0-9])$/:\1/; + return strftime("%Y-%m-%dT%H:%M:%S", localtime($stat[9])) . $tz; +} + +sub not_in_repo +{ + my ($pathname, $lines) = @_; + my $context = ''; + eval { $context = MessageContext::context(); }; + my $mtime = get_modify_time($pathname); # when was the file last modified? + # who does the filesystem think owns it? + my $owner = getpwuid((stat($pathname))[4]); + + open(HANDLE, $pathname) or die("unable to open '$pathname'$context: $!"); + while (my $line = ) { + chomp $line; + # Also remove CR from line-end + s/\015$//; + + push(@$lines, [$line, $owner, undef, $mtime, "NONE"]); + } + close(HANDLE) or die("unable to close '$pathname'$context"); +} + +sub compute_md5 +{ + my $filename = shift; + die("$filename not found") unless -e $filename; + my $null = File::Spec->devnull(); + my $md5 = `md5sum $filename 2>$null`; + $md5 =~ /^(\S+)/; + return $1; +} + +sub call_annotate +{ + my $cb = shift; + my $class; + my $filename = pop; + eval { $class = $cb->new(@_); }; + die("$cb construction error: $@") if $@; + my ($status, $list) = $class->annotate($filename); + foreach my $line (@$list) { + my ($text, $abbrev, $full, $when, $cl) = @$line; + print("$cl|$abbrev", $full ? ";$full" : '', "|$when|$text\n"); + } + exit $status; +} + +sub call_get_version +{ + my $cb = shift; + my $class; + my $filename = pop; + eval { $class = $cb->new(@_); }; + die("$cb construction error: $@") if $@; + my $v = $class->extract_version($filename); + print($v, "\n"); + exit 0; +} + +sub resolve_cache_dir +{ + my $cache_dir = shift; + if ($cache_dir) { + lcovutil::ignorable_warning($lcovutil::ERROR_USAGE, + 'It is unwise to use an --annotate-script callback with --cache-dir without a --version-script to verify version match.' + ) unless $lcovutil::versionCallback; + if (-e $cache_dir) { + die("cache '$cache_dir' not writable directory") + unless -d $cache_dir && -w $cache_dir; + } else { + File::Path::make_path($cache_dir) or + die("unable to create '$cache_dir': $!"); + } + $cache_dir = abs_path($cache_dir); + } + return $cache_dir; +} + +sub find_in_cache +{ + my ($cache_dir, $filename) = @_; + + my ($cachepath, $version); + my $cachepath = File::Spec->catfile($cache_dir, + File::Spec->file_name_is_absolute( + $filename) ? + substr($filename, 1) : + $filename); + if (-f $cachepath) { + # matching version? + my ($cache_version, $lines); + eval { + my $data = Storable::retrieve($cachepath); + if (defined($data)) { + ($cache_version, $lines) = @$data; + $version = lcovutil::extractFileVersion($filename); + } + }; + if ($@) { + lcovutil::ignorable_error($lcovutil::ERROR_CORRUPT, + "unable to deserialize $cachepath for $filename annotation: $@\n"); + } + if (defined($lines)) { + # pass 'silent' to version check so we don't get error on mismatch + return (0, $version, $lines) + if (!$lcovutil::versionCallback || + lcovutil::is_ignored($lcovutil::ERROR_VERSION) || + !(defined($version) != defined($cache_version)) + || + lcovutil::checkVersionMatch( + $filename, $version, $cache_version, "annotate-cache", 1 + )); + lcovutil::info(1, "annotate: cache version check failed\n"); + } + } + return ($cachepath, $version); +} + +sub store_in_cache +{ + my ($cache_path, $filename, $version, $lines) = @_; + + $version = lcovutil::extractFileVersion($filename) + unless $version; + my $parent = File::Basename::dirname($cache_path); + unless (-d $parent) { + File::Path::make_path($parent) or + die("unable to create cache directory $parent: $!"); + } + Storable::store([$version, $lines], $cache_path) or + die("unable to store $cache_path"); +} + +1; diff --git a/scripts/batchGitVersion.pm b/scripts/batchGitVersion.pm new file mode 100644 index 00000000..b8edb710 --- /dev/null +++ b/scripts/batchGitVersion.pm @@ -0,0 +1,331 @@ +#!/usr/bin/env perl +# Copyright (c) MediaTek USA Inc., 2023 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# This implementation creates an initial database to hold the version stamps +# for all files in the repo - then simply queries that DB during +# execution. See 'usage' for details:# +# .../batchGitVersion.pm --help +# +# This is a sample script which uses git commands to determine +# the version of the filename parameter. +# Version information (if present) is used during ".info" file merging +# to verify that the data the user is attempting to merge is for the same +# source code/same version. +# If the version is not the same - then line numbers, etc. may be different +# and some very strange errors may occur. + +package batchGitVersion; + +use strict; +use Getopt::Long qw(GetOptionsFromArray); +use File::Spec; +use File::Basename qw(dirname basename); +use Cwd qw/getcwd/; + +use FindBin; +use lib "$FindBin::RealBin"; +use annotateutil qw(get_modify_time not_in_repo compute_md5 call_get_version); + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new extract_version compare_version usage); + +# should probably use "BLOB" as the token - so anyone we call can know that this +# is a blob sha - and can look up the file sha, if desired +my $shaToken = 'BLOB'; + +sub usage +{ + my ($script, $help) = @_; + $script = $0 unless defined $script; + my $exe = basename $script; + print(STDERR<new([--md5] [--allow-missing [--repo repo] \\ + [--prepend path] [--prefix dir]* \\ + [--token string] \\ + [-v | --verbose]*) + $exe->extract_version(pathname) + $exe->compare(old_version, new_version, pathname) +EOF + + if ($help) { + print(STDERR<new(...) # to initialize git_data + + --md5 : return MD5 signature if pathname not found in repo + --allow-missing: if set: return empty string if pathname not found + otherwise: die (fatal error) + --repo dir : where to find the git repo + --prepend path : prepend path to names found in repo before storing + e.g., if path is 'x/y' and object 'dir/file' is found + in the repo, then 'x/y/dir/file' is stored. + --prefix dir : add dir to the list of directories to search, to find + pathname. + --token string : use string as the blob sha token in the version string. + default value is 'BLOB' - so application can + distinguish between SHA types - say, to complare + to compare a BLOB SHA to a file SHA. + For backward compatibility with earlier versions of + this script, use '--token SHA'. + -v | --verbose : increase verbosity + + Setting the verbosity flag causes the script to print some (hopefully useful) + debug information - so you can see why your use is not working the way you + might have expected. + + The second call queries DB to find 'pathname'. + - 'pathname' may be be a file name which is found in the git repo, but + with some prefix prepended. For example: + pathname: /build/directory/path/repo/dir/file + filename: repo/dir/file + - if script is called as + \$ $exe --prefix /build/directory/path my_git_data \ + /build/directory/path/repo/dir/file + $shaToken git_sha_string + - zero or more --prefix arguments can be specified. + $exe will look at each, in the order specified. + - if pathname is not found in the DB: + - if pathname does not resolve to a file: + - '' (empty string) if '--allow-missing' flag used, + - else, error + - if '--md5' is passed: return MD5 checksum of the file + - else return file creation timestamp + NOTE: $exe DOES NOT CHECK FOR LOCAL CHANGES that are not checked in + to your git repo - so versions will compare as identical even + if the local file has been edited. + Please commit your changes before running $exe. + + The third call passes two version strings which are expected to be the same. + Under normal circumstances, the version strings will have been returned by + some call(s) to $exe. + Exit status is 0 when files match, 1 otherwise. + + To diagnose version mismatches using these SHAs: + - You can git diff them to see how they are different the same way you + 'git diff' commit shas (except you do not to specify a file) + - You can 'git log commit1..commit2' because you should also store + the overall sha of these two points (again if the scripting just + wants to know the delta). + Again note: the normal git way of asking these types of questions is to + just store a single commit shas, unlike perforce/svn that exactly + represents the current files, 'git diff --name-status' can VERY quickly + tell you what has changed. + There is also a mechanism for determining which commits contain which + blobs given a file and a starting point. Again it is just easier + to use 'git log commit1..commit2' +EOF + } else { + print(STDERR "\n see '$exe --help' for more information\n"); + } +} + +use constant { + DB => 0, + PREFIX => 1, + MD5 => 2, + MISSING => 3, + VERBOSE => 4, +}; + +sub new +{ + my $class = shift; + my $script = shift; + my $stand_alone = $0 eq $script; + # script should be me... + my $use_md5; + my $allow_missing; + my $repo; + my $prepend; + my @prefix; + my $help; + my $verbose = 0; + + if (!GetOptionsFromArray(\@_, + ("md5" => \$use_md5, + 'prefix:s' => \@prefix, + 'repo:s' => \$repo, + 'allow-missing' => \$allow_missing, + 'prepend:s' => \$prepend, + "verbose|v+" => \$verbose, + 'token:s' => \$shaToken, + 'help' => \$help,)) || + ($stand_alone && 0 != scalar(@_)) || + $help + ) { + usage($script, $help); + exit(defined($help) && 0 == scalar(@_) ? 0 : 1) if $stand_alone; + return undef; + } + my %db; + my $cd = $repo ? "cd $repo ;" : ''; + open(GIT, '-|', "$cd git ls-tree -r --full-tree HEAD") or + die("unable to execute git: $!"); + my @prepend; + if ($prepend) { + push(@prepend, $prepend); + } + my $errLeader = "unexpected git ls-tree entry:\n "; + my %submodule; + while () { + if (/^\d+\s+blob\s+(\S+)\s+(.+)$/) { + # line format: mode blob sha path + $db{File::Spec->catfile(@prepend, $2)} = $1; + } elsif (/^\d+\s+commit\s+(\S+)\s+(\S+)$/) { + # line format: mode commit sha path + die("duplicate submodule etnry for $2") if exists($submodule{$2}); + $submodule{$2} = $1; + } else { + print(STDERR "$errLeader$_"); + $errLeader = ' '; + } + } + close(GIT) or die("error on close $repo pipe: $!"); + # now look for submodules + open(GIT, '-|', + "$cd git submodule foreach 'git ls-tree -r --full-tree HEAD ; echo done'") + or + die("unable to execute git: $!"); + my $current; + my @stack; + my $number = 2; + my $countdown = $number * $verbose; + my $prefix = ''; + while () { + if (/^\d+\s+blob\s+(\S+)\s+(.+)$/) { + # line format: mode blob sha path + die("unknown current submodule") unless defined($current); + $db{File::Spec->catfile(@prepend, $current, $2)} = $1; + if ($countdown) { + --$countdown; + print("${prefix}storing " . + File::Spec->catfile(@prepend, $current, $2) . + " => $1\n"); + print("$prefix ...\n") unless $countdown; + } + } elsif (/^\d+\s+commit(\S+)\s+(\s+)$/) { + # line format: mode commit sha path + my $s = File::Spec->catfile(@prepend, $current, $2); + die("duplicate submodule etnry for $s") if exists($submodule{$s}); + $submodule{$s} = $1; + } elsif (/^Entering '([^']+)'$/) { + $current = File::Spec->catfile(@stack, $1); + push(@stack, $1); + die("found unexpected submodule '$current'") + unless exists($submodule{$current}); + $countdown = $number * $verbose; + if ($countdown) { + print("${prefix}enter submodule $current\n"); + $prefix .= ' '; + } + } elsif (/^done$/) { + die("empty stack") unless @stack; + pop(@stack); + if (@stack) { + $current = File::Spec->catfile(@stack); + } else { + $current = undef; + } + if ($verbose) { + print("${prefix}exit submodule $current\n"); + $prefix = substr($prefix, 2); + } + } else { + print(STDERR "$errLeader$_"); + $errLeader = ' '; + } + } + close(GIT) or die("error on close submodule pipe: $!"); + + $repo = getcwd() unless $repo; + push(@prefix, $repo) unless grep(/^$repo/, @prefix); + + # @todo enhancement: could look for local edits and store + # them into the DB here + foreach my $p (@prefix) { + # want all the prefixes to end with dir separator so we can + # just concat them + $p .= '/' unless substr($p, -1) eq '/'; + } + + my $self = [\%db, \@prefix, $use_md5, $allow_missing, $verbose]; + return bless $self, $class; +} + +sub extract_version +{ + my ($self, $file) = @_; + my $db = $self->[DB]; + my $prefix = $self->[PREFIX]; + my $verbose = $self->[VERBOSE]; + print("extract_version($file)\n") if $verbose; + if (@$prefix) { + # check we we can strip the prefix off the filename - to find it in the DB + foreach my $p (@$prefix) { + print(" check prefix $p ..\n") if $verbose; + if (0 == index($file, $p)) { + print(" .. match\n") if $verbose; + my $tail = substr($file, length($p)); + if (exists($db->{$tail})) { + print(" .. found\n") if $verbose; + return $shaToken . ' ' . $db->{$tail}; + } + } + } + } + + if (exists($db->{$file})) { + print(" .. found\n") if $verbose; + return $shaToken . ' ' . $db->{$file}; + } + + unless (-e $file) { + if ($self->[MISSING]) { + return ''; # empty string + } + die("Error: $file does not exist - perhaps you need the '--allow-missing' flag" + ); + } + my $version = get_modify_time($file); + $version .= ' md5:' . compute_md5($file) + if ($self->[MD5]); + return $version; +} + +sub compare_version +{ + my ($self, $new, $old, $file) = @_; + + if ($self->[MD5] && + $old !~ /^$shaToken/ && + $old =~ / md5:(.+)$/) { + my $o = $1; + if ($new =~ / md5:(.+)$/) { + return ($o ne $1); + } + # otherwise: 'new' was not an MD5 signature - so fall through to exact match + } + return ($old ne $new); # just look for exact match +} + +unless (caller) { + call_get_version("batchGitVersion", $0, @ARGV); +} + +1; diff --git a/scripts/context.pm b/scripts/context.pm new file mode 100644 index 00000000..45785032 --- /dev/null +++ b/scripts/context.pm @@ -0,0 +1,89 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# context +# +# This script is used as a lcov/geninfo/genhtml "--context-script context" callback. +# It is called at the end of tool execution to collect and store data which +# might be useful for infrastructure debugging and/or tracking. +# +# The result is a hash of key/value pairs - see man genhtml(1) for more +# details. +# +# You may want to collect and entirely different set of data. +# You can also add operations to the constructor to do something earlier in +# processing - e.g., to write data to some other files(s), etc. + +package context; + +use strict; +use Getopt::Long qw(GetOptionsFromArray); +use lcovutil; + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new); + +sub new +{ + my $class = shift; + my $script = shift; + my $standalone = $script eq $0; + my @options = @_; + my $comment; + + if (!GetOptionsFromArray(\@_, ('comment' => \$comment)) || + (!$standalone && @_)) { + print(STDERR "Error: unexpected option:\n " . + join(' ', @options) . "\nusage: [--comment]\n"); + exit(1) if $standalone; + return undef; + } + my $self = [$script]; + + $self = bless $self, $class; + if ($comment) { + # 'genhtml' and certain 'lcov' modes do not write a '.info' file + # so the comments won't go anywhere + my $data = $self->context(); + foreach my $key (sort keys %$data) { + push(@lcovutil::comments, $key . ': ' . $data->{$key}); + } + } + + return $self; +} + +sub context +{ + my $self = shift; + + my %data; + $data{user} = `whoami`; + $data{perl_version} = $^V->{original}; + $data{perl} = `which perl`; + $data{PERL5LIB} = $ENV{PERL5LIB} + if exists($ENV{PERL5LIB}); + + foreach my $k (keys %data) { + chomp($data{$k}); + } + + return \%data; +} + +1; diff --git a/scripts/criteria b/scripts/criteria new file mode 100755 index 00000000..b1d75cfe --- /dev/null +++ b/scripts/criteria @@ -0,0 +1,67 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2021-2023 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# criteria +# +# This script is used as a genhtml "--criteria-script criteria" callback. +# It is called by genhtml at each level of hierarchy - but ignores all but +# the top level, and looks only at line coverage. +# +# Format of the JSON input is: +# {"line":{"found":10,"hit:2,"UNC":2,..},"function":{...},"branch":{}" +# Only non-zero elements are included. +# See the 'criteria-script' section in "man genhtml" for details. +# +# The coverage criteria implemented here is "UNC + LBC + UIC == 0" +# If the criterial is violated, then this script emits a single line message +# to stdout and returns a non-zero exit code. +# +# If passed the "--suppress" flag, this script will exit with status 0, +# even if the coverage criteria is not met. +# genhtml --criteria-script 'path/criteria --signoff' .... +# +# It is not hard to envision much more complicated coverage criteria. + +use strict; +use FindBin; +use Getopt::Long; +use lib "$FindBin::RealBin"; +use criteria qw(new); + +use lib "$FindBin::RealBin/../lib"; +use lib "$FindBin::RealBin/../../../lib"; # path from 'support-scripts' + +use lcovutil; + +my $obj = criteria->new($0, @ARGV); + +my $signoff; +if (!defined($obj) || + !GetOptions('signoff' => \$signoff)) { + print(STDERR "usage: name type json-string [--signoff]\n"); + exit(1) if caller; + return undef; +} +my $json = pop(@ARGV); +my $db = JsonSupport::decode($json); + +my ($status, $msgs) = $obj->check_criteria(@ARGV, $db); +foreach my $m (@$msgs) { + print($m, "\n"); +} +exit $status; diff --git a/scripts/criteria.pm b/scripts/criteria.pm new file mode 100644 index 00000000..dc7088a7 --- /dev/null +++ b/scripts/criteria.pm @@ -0,0 +1,107 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2021-2023 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# criteria +# +# This script is used as a genhtml "--criteria-script criteria" callback. +# It is called by genhtml at each level of hierarchy - but ignores all but +# the top level, and looks only at line coverage. +# +# Format of the JSON input is: +# {"line":{"found":10,"hit:2,"UNC":2,..},"function":{...},"branch":{}" +# Only non-zero elements are included. +# See the 'criteria-script' section in "man genhtml" for details. +# +# The coverage criteria implemented here is "UNC + LBC + UIC == 0" +# If the criterial is violated, then this script emits a single line message +# to stdout and returns a non-zero exit code. +# +# If passed the "--suppress" flag, this script will exit with status 0, +# even if the coverage criteria is not met. +# genhtml --criteria-script 'path/criteria --signoff' .... +# +# It is not hard to envision much more complicated coverage criteria. +package criteria; + +use strict; +use Getopt::Long qw(GetOptionsFromArray); + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new); + +use constant {SIGNOFF => 0,}; + +sub new +{ + my $class = shift; + my $signoff = 0; + my $script = shift; + my $standalone = $script eq $0; + my @options = @_; + + if (!GetOptionsFromArray(\@_, ('signoff' => \$signoff)) || + (!$standalone && @_)) { + print(STDERR "Error: unexpected option:\n " . + join(' ', @options) . + "\nusage: name type json-string [--signoff]\n"); + exit(1) if $standalone; + return undef; + } + + my $self = [$signoff]; + return bless $self, $class; +} + +sub check_criteria +{ + my ($self, $name, $type, $db) = @_; + + my $fail = 0; + my @messages; + if ($type eq 'top') { + # for the moment - only worry about the top-level coverage + + if (exists($db->{'line'})) { + # our criteria is LBC + UNC + UIC == 0 + my $sep = ''; + my $sum = 0; + my $msg = ''; + my $counts = ''; + my $lines = $db->{'line'}; + foreach my $tla ('UNC', 'LBC', 'UIC') { + $msg .= $sep . $tla; + $counts .= $sep; + if (exists $lines->{$tla}) { + my $count = $lines->{$tla}; + $sum += $count; + $counts .= "$count"; + } else { + $counts .= "0"; + } + $sep = ' + '; + } + $fail = $sum != 0; + push(@messages, $msg . " != 0: " . $counts . "\n") + if $fail; + } + } + + return ($fail && !$self->[SIGNOFF], \@messages); +} + +1; diff --git a/scripts/get_signature b/scripts/get_signature new file mode 100755 index 00000000..86e9b1ff --- /dev/null +++ b/scripts/get_signature @@ -0,0 +1,74 @@ +#!/usr/bin/env perl +# Copyright (c) MediaTek USA Inc., 2022 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# get_signature +# +# This is a sample script which uses uses md5sum to compare file versions +# If the checksum is not the same - then line numbers, etc. may be different +# and some very strange errors may occur. +# md5sum is not secure - so could use sha512sum or some other program, if we +# really wanted to + +use POSIX qw(strftime); +use Getopt::Long; +use Cwd qw(abs_path); + +sub usage +{ + print(STDERR "usage: $0 --compare old_version new_version filename OR\n" . + " $0 [--allow-missing] filename\n"); +} + +my $compare; +my $allow_missing; +my $help; +if (!GetOptions("--compare" => \$compare, + '--allow-missing' => \$allow_missing, + '--help' => \$help) || + $help || + ($compare && scalar(@ARGV) != 3) || + (!$compare && scalar(@ARGV) != 1) +) { + usage(); + exit(defined($help) ? 0 : 1); +} + +my $filename = $ARGV[$compare ? 2 : 0]; + +if ($compare) { + my ($old, $new) = @ARGV; + exit($old ne $new); # for the moment, just look for exact match +} + +unless (-e $filename) { + if ($allow_missing) { + print("\n"); # empty string + exit 0; + } + die("Error: $filename does not exist - perhaps you need the '--allow-missing' flag" + ); +} +$pathname = abs_path($filename); + +#my $sum = `sha512sum $pathname`; +my $sum = `md5sum $pathname`; +my $rtn = $?; +$sum =~ /^(\S+)/; +print($1 . "\n"); +exit $rtn; + diff --git a/scripts/getp4version b/scripts/getp4version new file mode 100755 index 00000000..ba22c25a --- /dev/null +++ b/scripts/getp4version @@ -0,0 +1,114 @@ +#!/usr/bin/env perl +# Copyright (c) MediaTek USA Inc., 2022-2023 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# getp4version +# +# This is a sample script which uses perforce commands to determine +# the version of the filename passed in. +# Version information (if present) is used during ".info" file merging +# to verify that the data the user is attempting to merge is for the same +# source code/same version. +# If the version is not the same - then line numbers, etc. may be different +# and some very strange errors may occur. + +use strict; +use POSIX qw(strftime); +use Getopt::Long; +use File::Spec; +use Cwd qw(abs_path); +use FindBin; + +use lib "$FindBin::RealBin"; +use annotateutil qw(get_modify_time not_in_repo compute_md5); + +sub usage +{ + print(STDERR "usage: $0 --compare old_version new_version filename OR\n" . + " $0 [--md5] [--allow-missing] filename\n"); +} + +my $compare; +my $use_md5; # if set, append md5 checksum to the P4 version string +my $allow_missing; +my $help; +if (!GetOptions("--compare" => \$compare, + "--md5" => \$use_md5, + '--allow-missing' => \$allow_missing, + '--help' => \$help) || + $help || + ($compare && scalar(@ARGV) != 3) || + (!$compare && scalar(@ARGV) != 1) +) { + usage(); + exit(defined($help) ? 0 : 1) unless caller; + return 1; +} + +my $filename = $ARGV[$compare ? 2 : 0]; + +if ($compare) { + my ($old, $new) = @ARGV; + if ($use_md5 && + $old !~ /^(\@head|#[0-9]+)/ && + $old =~ / md5:(.+)$/) { + my $o = $1; + if ($new =~ / md5:(.+)$/) { + exit($o ne $1); + } + # otherwise: 'new' was not an MD5 signature - so fall through to exact match + } + exit($old ne $new); # for the moment, just look for exact match +} + +unless (-e $filename) { + if ($allow_missing) { + print("\n"); # empty string + exit 0; + } + die("Error: $filename does not exist - perhaps you need the '--allow-missing' flag" + ); +} +my $pathname = abs_path($filename); +my $null = File::Spec->devnull(); # more portable way to do it + +my $version; +if (0 == + system("p4 files $pathname 2>$null|grep -qv -- '- no such file' >$null")) { + my $have = `p4 have $pathname`; + if ($have =~ /#([0-9]+) - /) { + $version = "#$1"; + } else { + $version = '\@head'; + } + + my $opened = `p4 opened $pathname 2>$null`; + if ($opened =~ /edit (default change|change (\S+)) /) { + # file is locally edited...append modify time to the version ID + $version .= ' edited ' . get_modify_time($pathname); + } + $version .= ' md5:' . compute_md5($pathname) + if $use_md5; +} else { + # not in P4 - just print the modify time, so we have a prayer of + # noticing file differences + $version = get_modify_time($pathname); + $version .= ' md5:' . compute_md5($pathname) + if ($use_md5); +} +print($version . "\n"); + diff --git a/scripts/gitblame b/scripts/gitblame new file mode 100755 index 00000000..cef32161 --- /dev/null +++ b/scripts/gitblame @@ -0,0 +1,49 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2020-2023 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# gitblame [--p4] [--prefix path] [--abbrev regexp] [domain] pathname +# +# This script runs "git blame" for the specified file and formats the result +# to match the diffcov(1) age/ownership annotation specification. +# +# If the '--p4' flag is used: +# we assume that the GIT repo is cloned from Perforce - and look for +# the line in the generated commit log message which tells us the perforce +# changelist ID that we actually want. +# +# If specified, 'path' is prepended to 'pathname' (as 'path/pathname') +# before processing. +# +# If passed a domain name (or domain regexp): +# strip that domain from the author's address, and treat all users outside +# the matching domain as "External". +# The --abbrev argument enables you to specify one or more regexp patterns +# which are used to compute the user name abbreviation that are applied. + +use strict; +use FindBin; +use lib "$FindBin::RealBin"; +use gitblame qw(new); +use annotateutil qw(call_annotate); + +if (-f $ARGV[-1] || '-' ne index($ARGV[-1], 1)) { + call_annotate('gitblame', $0, @ARGV); +} else { + gitblame->new($0, @ARGV); +} diff --git a/scripts/gitblame.pm b/scripts/gitblame.pm new file mode 100644 index 00000000..a26b9d70 --- /dev/null +++ b/scripts/gitblame.pm @@ -0,0 +1,264 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2020-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# gitblame [--p4] [--prefix path] [--abbrev regexp] [--cache dir] [domain] pathname +# +# This script runs "git blame" for the specified file and formats the result +# to match the diffcov(1) age/ownership annotation specification. +# +# If the '--cache' flag is used: +# Goal is to improve runtime performance by not calling GIT if file is +# unchanged and previous result is available. +# - First look into the provided cache before calling GIT. +# Hope to find that we already have data for the file we wanted. +# - If we do call GIT - then store the result back into cache. +# Note that this callback uses the `--version-script' (if specified) +# to extract and compare file versions. +# Also note that ignoring "version" errors will disable version checking +# of cached files - and may result in out-of-sync annotated file data. +# +# If the '--p4' flag is used: +# we assume that the GIT repo is cloned from Perforce - and look for +# the line in the generated commit log message which tells us the perforce +# changelist ID that we actually want. +# +# If specified, 'path' is prependied to 'pathname' (as 'path/pathname') +# before processing. +# +# If passed a domain name (or domain regexp): +# strip that domain from the author's address, and treat all users outside +# the matching domain as "External". +# The --abbrev argument enables you to specify one or more regexp patterns +# which are used to compute the user name abbreviation that are applied. + +package gitblame; + +use strict; +use File::Basename qw(dirname basename); +use File::Spec; +use Getopt::Long qw(GetOptionsFromArray); +use Cwd qw(abs_path); + +use annotateutil qw(not_in_repo + resolve_cache_dir find_in_cache store_in_cache); + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new); + +use constant { + P4 => 0, + ABBREV => 1, + PREFIX => 2, + SCRIPT => 3, + CACHE => 4, +}; + +sub new +{ + my $class = shift; + my $script = shift; + + my $mapP4; + my $cache_dir; + my $prefix; + my @args = @_; + my @abbrev; + my $exe = basename($script ? $script : $0); + my $standalone = $script eq $0; + my $help; + + if (!GetOptionsFromArray(\@_, + ("p4" => \$mapP4, + "prefix:s" => \$prefix, + 'abbrev:s' => \@abbrev, + 'cache:s' => \$cache_dir, + 'help' => \$help)) || + (scalar(@_) >= 2) || + $help + ) { + print(STDERR + "usage: $exe [--p4] [--abbrev regexp]* [--cache dir] [domain] pathname\n" + ); + exit(scalar(@_) >= 2 && $help ? 0 : 1) if $standalone; + return undef; + } + my $internal_domain = shift; + if ($internal_domain) { + push(@abbrev, 's/^([^@]+)\@' . $internal_domain . '$/$1/'); + push(@abbrev, 's/^([^@]+)\@.+$/External/'); + # else leave domain in place + } + $cache_dir = resolve_cache_dir($cache_dir); + my @prefix; + push(@prefix, $prefix) if $prefix; + my $self = [$mapP4, \@abbrev, \@prefix, $exe, $cache_dir]; + return bless $self, $class; +} + +sub annotate +{ + my ($self, $file) = @_; + + # do we have a cached version of this file? + my ($cachepath, $version); + if ($self->[CACHE]) { + my $lines; + ($cachepath, $version, $lines) = find_in_cache($self->[CACHE], $file); + return (0, $lines) if defined($lines); # cache hit + } + my $pathname = File::Spec->catfile(@{$self->[PREFIX]}, $file); + # if running as module, then context might be available + my $context = ''; + eval { $context = MessageContext::context(); }; + unless (defined($pathname) && + (-f $pathname || -l $pathname) && + -r $pathname) { + $context = ':' . $context if $context; + die($self->[SCRIPT] . $context . ' expected readable file, found \'' . + (defined($pathname) ? $pathname : '') . "'"); + } + + # set working directory to account for nested repos and submodules + my $dir = dirname($pathname); + my $basename = basename($pathname); + -d $dir or die("no such directory '$dir'$context"); + + my %changelists; + my $status = 0; + my @lines; + + my $null = File::Spec->devnull(); + if (0 == system("cd $dir ; git rev-parse --show-toplevel >$null 2>&1")) { + # in a git repo + if (0 == system( + "cd $dir ; git ls-files --error-unmatch $basename >$null 2>&1") + ) { + # matched a tracked pathname + my $matched; + if ( + open(HANDLE, "-|", + "cd $dir ; git blame -e $basename 2> /dev/null") + ) { + my %abbrev; # user name abbreviations + while (my $line = ) { + chomp $line; + # Also remove CR from line-end + s/\015$//; + + if ($line =~ + m/^(\S+)[^(]+\(<([^>]*)>\s+([-0-9]+\s+[0-9:]+\s+[-+0-9]+)\s+([0-9]+)\) (.*)$/ + ) { + my $commit = $1; + my $owner = $2; # apparently, this can be empty + my $when = $3; + my $text = $5; + + # found empty name in .../clang/include/AST/StmtOpenMP.h + $owner = 'unknown@nowhere.com' unless $owner; + + if ($self->[P4]) { + if (!exists($changelists{$commit})) { + if ( + open(GITLOG, '-|', + "cd $dir ; git show -s $commit") + ) { + while (my $l = ) { + # p4sync puts special comment in commit log. + # pull the CL out of that. + if ($l =~ /git-p4:.+change = ([0-9]+)/) + { + $changelists{$commit} = $1; + $commit = $1; + last; + } + } + } else { + die("unable to execute 'git show -s $commit'$context: $!" + ); + } + close(GITLOG) or die("unable to close$context"); + } else { + $commit = $changelists{$commit}; + } + } + # line owner filtering to canonical form + $owner =~ s/ dot /./g; + $owner =~ s/ at /\@/; + my $fullname = $owner; + + if (exists($abbrev{$fullname})) { + $owner = $abbrev{$fullname}; + } else { + # compute only once... + foreach my $re (@{$self->[ABBREV]}) { + ## strip domain part for internal users... + eval '$owner =~ ' . $re . ';'; + die("invalid domain pattern '$re'$context: $@") + if $@; + } + $abbrev{$fullname} = $owner; + } + # Convert Git date/time to diffcov canonical format + # replace space between date and time with 'T' + $when =~ s/\s/T/; + # remove space between time and zone offset + $when =~ s/\s//; + # insert ':' between hour and minute digits of zone offset + $when =~ s/([0-9][0-9])$/:$1/; + # ';' is not a legal character in an email address - + # so use it as a delimiter + push(@lines, + [$text, $owner, $fullname, $when, $commit]); + # expect all lines to eitehr match the git blame regexp + # or none of them to match + die("$basename has both matching and not matching lines$context" + ) if defined($matched) && !$matched; + $matched = 1; + } else { + push(@lines, [$line, "NONE", undef, "NONE", "NONE"]); + # expect all lines to eitehr match the git blame regexp + # or none of them to match + die("$basename has both not matching and matching lines$context" + ) if defined($matched) && $matched; + $matched = 0; + } + } + close(HANDLE) or + die("unable to close git blame pipe$context: $!\n"); + $status = $?; + #if (0 != $?) { + # $? & 0x7F & + # die("git blame died from signal ", ($? & 0x7F), "\n"); + # die("git blame exited with error ", ($? >> 8), "\n"); + #} + if (0 == $status && + defined($cachepath)) { + store_in_cache($cachepath, $file, $version, \@lines); + } + return ($status, \@lines); + } + } + } + + # fallthrough from error conditions + not_in_repo($pathname, \@lines); + return ($status, \@lines); +} + +1; diff --git a/scripts/gitdiff b/scripts/gitdiff new file mode 100755 index 00000000..edd96d82 --- /dev/null +++ b/scripts/gitdiff @@ -0,0 +1,151 @@ +#!/usr/bin/env perl + +use Getopt::Long; +use strict; +use Cwd /realpath/; + +my $verbose = 0; + +sub include_me +{ + my ($path, $includes, $excludes) = @_; + + foreach my $pat (@$excludes) { + if ($path =~ /$pat/) { + if ($verbose) { + print(STDERR "exclude '$path', pattern '$pat'\n"); + } + return 0; + } + } + return 1 + if 0 == scalar(@$includes); + + foreach my $pat (@$includes) { + if ($path =~ /$pat/) { + if ($verbose) { + print(STDERR "include '$path', pattern '$pat'\n"); + } + return 1; + } + } + if ($verbose > 1) { + print(STDERR "exclude '$path': no match\n"); + } + return 0; +} + +my @exclude_patterns; +my @include_patterns; +my $suppress_unchanged; +my $prefix = ''; +my $ignore_whitespace; +my $repo = '.'; # cwd + +if (!GetOptions("exclude=s" => \@exclude_patterns, + "include=s" => \@include_patterns, + 'no-unchanged' => \$suppress_unchanged, + 'prefix=s' => \$prefix, + 'b|blank' => \$ignore_whitespace, + 'repo=s' => \$repo, + 'verbose+' => \$verbose) || + (2 != scalar(@ARGV) && + 3 != scalar(@ARGV)) +) { + print(STDERR + "usage: [(--exclude|include) regexp[,regexp]] [--repo repo] [-b] [dir] base_changelist current_changelist\n'exclude' wins if both exclude and include would match.\n" + ); + exit(1); +} + +@exclude_patterns = split(',', join(',', @exclude_patterns)); +@include_patterns = split(',', join(',', @include_patterns)); +$ignore_whitespace = '-b' if $ignore_whitespace; + +if ($verbose) { + print(STDERR join(' ', @ARGV)); + if (scalar(@exclude_patterns)) { + print(STDERR " --exclude " . join(" --exclude ", @exclude_patterns)); + } + if (scalar(@include_patterns)) { + print(STDERR " --include " . join(" --include ", @include_patterns)); + } + print(STDERR "\n"); +} + +$prefix .= '/' if $prefix; +if (3 == scalar(@ARGV)) { + my $dir = shift @ARGV; + push(@include_patterns, "$dir"); +} +my $base_sha = shift @ARGV; +my $current_sha = shift @ARGV; + +$repo = Cwd::realpath($repo); + +my $cmd = "cd $repo ; git diff $ignore_whitespace $base_sha $current_sha"; + +open(HANDLE, "-|", $cmd) or + die("failed to exec git diff: $!"); + +my $includeCurrentFile; +my %allFiles; + +while () { + chomp($_); + s/\r//g; + my $line = $_; + + if ($line =~ /(^diff|\+\+\+|---) /) { + # remove the a/b leader from the + $line =~ s# [ab]/# $prefix#g; + } + + if ($line =~ /^diff --git (\S+) (\S+)/) { + # git diff header + my $fileA = $1; + my $fileB = $2; + $includeCurrentFile = + (include_me($fileA, \@include_patterns, \@exclude_patterns) || + include_me($fileB, \@include_patterns, \@exclude_patterns)); + if ($includeCurrentFile) { + $allFiles{$fileB} = $fileA; + $line =~ s/($fileA|$fileB)/$1/g; + } + } + + printf("%s\n", $line) + if $includeCurrentFile; +} +close(HANDLE) or die("failed to close git diff pipe: $!"); +if (0 != $?) { + $? & 0x7F & die("git diff died from signal ", ($? & 0x7F), "\n"); + die("git diff exited with error ", ($? >> 8), "\n"); +} + +exit 0 if defined($suppress_unchanged); + +# now find the list of files in the current SHA - we can process that +# to find the list of all current files which were unchanged since the +# baseline SHA +die("failed to exec git ls-tree") + unless ( + open(HANDLE, "-|", "cd $repo ; git ls-tree -r --name-only $current_sha")); + +while () { + chomp($_); + s/\r//g; + my $filename = $repo . '/' . $_; + my $path = $prefix . $filename; + if (!exists($allFiles{$path}) && + include_me($path, \@include_patterns, \@exclude_patterns)) { + printf("diff --git $prefix$filename $prefix$filename\n"); + printf("=== $path\n"); + } +} +close(HANDLE) or die("failed to close git ls-tree pipe: $!"); +if (0 != $?) { + $? & 0x7F & die("git ls-tree died from signal ", ($? & 0x7F), "\n"); + die("git ls-tree exited with error ", ($? >> 8), "\n"); +} +exit 0; diff --git a/scripts/gitversion b/scripts/gitversion new file mode 100755 index 00000000..a295d854 --- /dev/null +++ b/scripts/gitversion @@ -0,0 +1,64 @@ +#!/usr/bin/env perl +# Copyright (c) MediaTek USA Inc., 2022-2023 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# gitversion [--p4] [--md5] [--prefix path] pathname OR +# gitversion [--p4] [--md5] [--prefix path] --compare old_version new_version pathname +# +# If the '--p4' flag is used: +# we assume that the GIT repo is cloned from Perforce - and look for +# the line in the generated commit log message which tells us the perforce +# changelist ID that we actually want. +# If specified, 'path' is prependied to 'pathname' (as 'path/pathname') +# before processing. + +# This is a sample script which uses git commands to determine +# the version of the filename parameter. +# Version information (if present) is used during ".info" file merging +# to verify that the data the user is attempting to merge is for the same +# source code/same version. +# If the version is not the same - then line numbers, etc. may be different +# and some very strange errors may occur. + +use strict; +use Getopt::Long; +use FindBin; +use lib "$FindBin::RealBin"; +use gitversion qw(new usage); + +my $class = gitversion->new($0, @ARGV); +# need to check if this is a --compare call or not +my ($compare, $mapp4, $use_md5, $prefix, $allow_missing, $help); +if (!GetOptions("--compare" => \$compare, + "--md5" => \$use_md5, + "--p4" => \$mapp4, + '--prefix:s' => \$prefix, + '--allow-missing' => \$allow_missing, + '--help' => \$help) || + $help || + $compare && scalar(@ARGV) != 3 +) { + gitversion::usage($help); + exit $help ? 0 : 1; +} + +if ($compare) { + exit $class->compare_version(@ARGV); +} else { + print $class->extract_version(@ARGV) . "\n"; +} +exit 0; diff --git a/scripts/gitversion.pm b/scripts/gitversion.pm new file mode 100644 index 00000000..1553081a --- /dev/null +++ b/scripts/gitversion.pm @@ -0,0 +1,198 @@ +#!/usr/bin/env perl +# Copyright (c) MediaTek USA Inc., 2022-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# gitversion [--p4] [--md5] [--local-change] [--prefix path] pathname OR +# gitversion [--p4] [--md5] [--prefix path] --compare old_version new_version pathname +# +# If the '--p4' flag is used: +# we assume that the GIT repo is cloned from Perforce - and look for +# the line in the generated commit log message which tells us the perforce +# changelist ID that we actually want. +# if the '--local-change' flag is used: +# we assume that there may be local changes which are not committed to +# the repo. If flag is not set: do not check for local change +# If specified, 'path' is prependied to 'pathname' (as 'path/pathname') +# before processing. + +# This is a sample script which uses git commands to determine +# the version of the filename parameter. +# Version information (if present) is used during ".info" file merging +# to verify that the data the user is attempting to merge is for the same +# source code/same version. +# If the version is not the same - then line numbers, etc. may be different +# and some very strange errors may occur. + +package gitversion; + +use strict; +use POSIX qw(strftime); +use File::Spec; +use Cwd qw(abs_path); +use File::Basename qw(dirname basename); +use Getopt::Long qw(GetOptionsFromArray); + +use annotateutil qw(get_modify_time compute_md5); + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new extract_version compare_version usage); + +use constant { + MD5 => 0, + P4 => 1, + PREFIX => 2, + ALLOW_MISSING => 3, + CHECK_LOCAL_CHANGE => 4, +}; + +sub usage +{ + my $exe = shift; + $exe = basename($exe); + print(STDERR< \$compare, + "--md5" => \$use_md5, + "--p4" => \$mapp4, + '--prefix:s' => \$prefix, + '--allow-missing' => \$allow_missing, + '--local-change' => \$local_change, + '--help' => \$help) || + $help || + $compare && scalar(@_) != 3 + ) { + usage($script); + exit(defined($help) ? 0 : 1) if ($script eq $0); + return undef; + } + + my $self = [$use_md5, $mapp4, $prefix, $allow_missing, $local_change]; + return bless $self, $class; +} + +sub extract_version +{ + my ($self, $filename) = @_; + + if (!File::Spec->file_name_is_absolute($filename) && + defined($self->[PREFIX])) { + $filename = File::Spec->catfile($self->[PREFIX], $filename); + } + + unless (-e $filename) { + if ($self->[ALLOW_MISSING]) { + return ''; # empty string + } + die("Error: $filename does not exist - perhaps you need the '--allow-missing' flag" + ); + } + my $pathname = abs_path($filename); + my $null = File::Spec->devnull(); + + my $dir = dirname($pathname); + my $file = basename($pathname); + -d $dir or die("no such directory '$dir'"); + + my $version; + if (0 == system("cd $dir ; git rev-parse --show-toplevel >$null 2>&1")) { + # in a git repo - use full SHA. + my $log = `cd $dir ; git log --no-abbrev --oneline -1 $file 2>$null`; + if (0 == $? && + $log =~ /^(\S+) /) { + $version = $1; + if ($self->[P4]) { + if (open(GITLOG, '-|', "cd $dir ; git show -s $version")) { + while (my $l = ) { + # p4sync puts special comment in commit log. + # pull the CL out of that. + if ($l =~ /git-p4:.+change = ([0-9]+)/) { + $version = "CL $1"; + last; + } + } + } else { + die("unable to execute 'git show -s $version': $!"); + } + close(GITLOG) or die("unable to close"); + if (0 != $?) { + $? & 0x7F & + die("git show died from signal ", ($? & 0x7F), "\n"); + die("git show exited with error ", ($? >> 8), "\n"); + } + + } else { + $version = "SHA $version"; + } + if ($self->[CHECK_LOCAL_CHANGE]) { + my $diff = `cd $dir ; git diff $file 2>$null`; + if ('' ne $diff) { + $version .= ' edited ' . get_modify_time($file); + $version .= ' md5:' . compute_md5($pathname) + if $self->[MD5]; + } + } + } + } + if (!$version) { + # not in git - just print the modify time, so we have a prayer of + # noticing file differences + $version = get_modify_time($pathname); + $version .= ' md5:' . compute_md5($pathname) + if ($self->[MD5]); + } + return $version; +} + +sub compare_version +{ + my ($self, $new, $old, $filename) = @_; + + if ($self->[MD5] && + ( $old !~ /^SHA/ || + ($self->[P4] && + $old !~ /^CL/)) && + $old =~ / md5:(.+)$/ + ) { + my $o = $1; + if ($new =~ / md5:(.+)$/) { + return ($o ne $1); + } + # otherwise: 'new' was not an MD5 signature - so fall through to exact match + } + return $old ne $new; # just look for exact match +} + +1; diff --git a/scripts/p4annotate b/scripts/p4annotate new file mode 100755 index 00000000..f0f31c01 --- /dev/null +++ b/scripts/p4annotate @@ -0,0 +1,45 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# p4annotate +# +# This script runs "p4 annotate" for the specified file and formats the result +# to match the diffcov(1) 'annotate' callback specification: +# use p4annotate; +# my $callback = p4annotate->new([--md5] [--log logfile] [--verify]); +# $callback->annotate(filename); +# +# It is implemented so that it can be loaded as a Perl module such that the +# callback can be executed without incurring an additional process overhead - +# which appears to be large and hightly variable in our compute farm environment. +# +# It can also be called directly, as +# p4annotate [--md5] [--log logfild] [--verify] filename + +use strict; +use FindBin; +use lib "$FindBin::RealBin"; +use p4annotate qw(new); +use annotateutil qw(call_annotate); + +if (-f $ARGV[-1] || '-' ne index($ARGV[-1], 1)) { + call_annotate('p4annotate', $0, @ARGV); +} else { + p4annotate->new($0, @ARGV); +} diff --git a/scripts/p4annotate.pm b/scripts/p4annotate.pm new file mode 100644 index 00000000..1ab0dd2d --- /dev/null +++ b/scripts/p4annotate.pm @@ -0,0 +1,325 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2020-2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# p4annotate.pm +# +# This script runs "p4 annotate" for the specified file and formats the result +# to match the diffcov(1) 'annotate' callback specification: +# use p4annotate; +# my $callback = p4annotate->new([--log logfile] [--cache cache_dir] [--verify]); +# $callback->annotate(filename); +# +# If the '--cache' flag is used: +# Goal is to improve runtime performance by not calling GIT if file is +# unchanged and previous result is available. +# - First look into the provided cache before calling GIT. +# Hope to find that we already have data for the file we wanted. +# - If we do call GIT - then store the result back into cache. +# Note that this callback uses the `--version-script' (if specified) +# to extract and compare file versions. +# Also note that ignoring "version" errors will disable version checking +# of cached files - and may result in out-of-sync annotated file data. +# +# The '--verify' flag tells the tool to do some additional consistency +# checking when merging local edits into the annotated file. +# +# The '--log' flag specifies a file where the tool writes various annotation- +# related log messages. +# +# This utility is implemented so that it can be loaded as a Perl module such +# that the callback can be executed without incurring an additional process +# overhead - which appears to be large and hightly variable in our compute +# farm environment. +# +# It can also be called directly, as +# p4annotate [--log logfile] [--verify] filename + +package p4annotate; + +use strict; +use File::Basename; +use File::Spec; +use Getopt::Long qw(GetOptionsFromArray); +use Fcntl qw(:flock); +use annotateutil qw(get_modify_time not_in_repo call_annotate + resolve_cache_dir find_in_cache store_in_cache); + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new); + +use constant { + SCRIPT => 0, + CACHE => 1, + VERIFY => 2, + LOGFILE => 3, + LOG => 4, +}; + +sub printlog +{ + my ($self, $msg) = @_; + my $fh = $self->[LOG]; + return unless $fh; + flock($fh, LOCK_EX) or die('cannot lock ' . $self->[LOGFILE] . ": $!"); + print($fh $msg); + flock($fh, LOCK_UN) or die('cannot unlock ' . $self->[LOGFILE] . ": $!"); +} + +sub new +{ + my $class = shift; + my @args = @_; + my $script = shift; # this should be 'me' + #other arguments are as passed... + my $logfile; + my $cache_dir; + my $verify = 0; # if set, check that we merged local changes correctly + my $exe = basename($script ? $script : $0); + my $standalone = $0 eq $script; + + if (exists($ENV{LOG_P4ANNOTATE})) { + $logfile = $ENV{LOG_P4ANNOTATE}; + } + my $help; + if (!GetOptionsFromArray(\@_, + ("verify" => \$verify, + "log=s" => \$logfile, + 'cache:s' => \$cache_dir, + 'help' => \$help)) || + (!$standalone && scalar(@_)) || + $help + ) { + print(STDERR ($help ? '' : ("unexpected arg: $script " . join(@_, ' ')) + ), + "usage: $exe [--log logfile] [--cache dir] [--verify] filename\n" + ); + exit(scalar(@_) == 0 && $help ? 0 : 1) if $standalone; + return undef; + } + + my @notset; + foreach my $var ("P4USER", "P4PORT", "P4CLIENT") { + push(@notset, $var) unless exists($ENV{$var}); + } + if (@notset) { + die("$exe requires environment variable" . + (1 < scalar(@notset) ? 's' : '') . ' ' . + join(' ', @notset) . " to be set."); + } + $cache_dir = resolve_cache_dir($cache_dir); + + my $self = [$exe, $cache_dir, $verify]; + bless $self, $class; + if ($logfile) { + open(LOGFILE, ">>", $logfile) or + die("unable to open $logfile"); + + $self->[LOG] = \*LOGFILE; + $self->[LOGFILE] = $logfile; + $self->printlog("$exe " . join(" ", @args) . "\n"); + } + return $self; +} + +sub annotate +{ + my ($self, $pathname) = @_; + defined($pathname) or die("expected filename"); + + my ($cache_path, $version); + if ($self->[CACHE]) { + my $lines; + ($cache_path, $version, $lines) = + find_in_cache($self->[CACHE], $pathname); + return (0, $lines) if defined($lines); # cache hit + } + + if (-e $pathname && -l $pathname) { + $pathname = File::Spec->catfile(File::Basename::dirname($pathname), + readlink($pathname)); + my @c; + foreach my $component (split(m@/@, $pathname)) { + next unless length($component); + if ($component eq ".") { next; } + if ($component eq "..") { pop @c; next } + push @c, $component; + } + $pathname = File::Spec->catfile(@c); + } + + my $null = File::Spec->devnull(); # more portable + my @lines; + my $status; + if (0 == system( + "p4 files $pathname 2>$null|grep -qv -- '- no such file' >$null") + ) { + # this file is in p4.. + my $version; + my $have = `p4 have $pathname`; + if ($have =~ /#([0-9]+) - /) { + $version = "#$1"; + } else { + $version = '@head'; + } + $self->printlog(" have $pathname:$version\n"); + + my @annotated; + # check if this file is open in the current sandbox... + # redirect stderr because p4 print "$path not opened on this client" if file not opened + my $opened = `p4 opened $pathname 2>$null`; + my %localAdd; + my %localDelete; + my ($localChangeList, $owner, $now); + if ($opened =~ /edit (default change|change (\S+)) /) { + $localChangeList = $2 ? $2 : 'default'; + + $self->printlog(" local edit in CL $localChangeList\n"); + + $owner = $ENV{P4USER}; # current user is responsible for changes + $now = get_modify_time($pathname) + ; # assume changes happened when file was liast modified + + # what is different in the local file vs the one we started with + if (open(PIPE, "-|", "p4 diff $pathname")) { + my $line = ; # eat first line + die("unexpected content '$line'") + unless $line =~ m/^==== /; + my ($action, $fromStart, $fromEnd, $toStart, $toEnd); + while ($line = ) { + chomp $line; + # Also remove CR from line-end + s/\015$//; + if ($line =~ + m/^([0-9]+)(,([0-9]+))?([acd])([0-9]+)(,([0-9]+))?/) { + # change + $action = $4; + $fromStart = $1; + $fromEnd = $3 ? $3 : $1; + $toStart = $5; + $toEnd = $7 ? $7 : $5; + } elsif ($line =~ m/^> (.*)$/) { + $localAdd{$toStart++} = $1; + } elsif ($line =~ m/^< (.*)$/) { + $localDelete{$fromStart++} = $1; + } else { + die("unexpected line '$line'") + unless $line =~ m/^---$/; + } + } + close(PIPE) or die("unable to close p4 diff pipe: $!\n"); + if (0 != $?) { + $? & 0x7F & + die("p4 pipe died from signal ", ($? & 0x7F), "\n"); + die("p4 pipe exited with error ", ($? >> 8), "\n"); + } + } else { + die("unable to open pipe to p4 diff $pathname"); + } + } + # -i: follow history across branches + # -I: follow history across integrations + # (seem to be able to use -i or -I - but not both, together) + # -u: print user name + # -c: print changelist rather than file version ID + # -q: quiet - suppress the 1-line header for each line + my $annotateLineNo = 1; + my $emitLineNo = 1; + if (open(HANDLE, "-|", "p4 annotate -Iucq $pathname$version")) { + while (my $line = ) { + + if (exists $localDelete{$annotateLineNo++}) { + next; # line was deleted .. skip it + } + while (exists $localAdd{$emitLineNo}) { + my $l = $localAdd{$emitLineNo}; + push(@lines, [$l, $owner, undef, $now, $localChangeList]); + push(@annotated, $l) if ($self->[VERIFY]); + delete $localAdd{$emitLineNo}; + ++$emitLineNo; + } + + chomp $line; + # Also remove CR from line-end + s/\015$//; + + if ($line =~ m/([0-9]+):\s+(\S+)\s+([0-9\/]+)\s(.*)/) { + my $changelist = $1; + my $owner = $2; + my $when = $3; + my $text = $4; + $owner =~ s/^.*.*$//; + $when =~ s:/:-:g; + $when =~ s/$/T00:00:00-05:00/; + push(@lines, [$text, $owner, undef, $when, $changelist]); + push(@annotated, $text) if ($self->[VERIFY]); + } else { + push(@lines, [$line, 'NONE', undef, 'NONE', 'NONE']); + push(@annotated, $line) if ($self->[VERIFY]); + } + ++$emitLineNo; + } # while (HANDLE) + + # now handle lines added at end of file + die("lost track of lines") + unless (0 == scalar(%localAdd) || + exists($localAdd{$emitLineNo})); + + while (exists $localAdd{$emitLineNo}) { + my $l = $localAdd{$emitLineNo}; + push(@lines, [$l, $owner, undef, $now, $localChangeList]); + delete $localAdd{$emitLineNo}; + push(@annotated, $l) if ($self->[VERIFY]); + ++$emitLineNo; + die("lost track of lines") + unless (0 == scalar(%localAdd) || + exists($localAdd{$emitLineNo})); + } + if ($self->[VERIFY]) { + if (open(DEBUG, "<", $pathname)) { + my $lineNo = 0; + while (my $line = ) { + chomp($line); + my $a = $annotated[$lineNo]; + die("mismatched annotation at $pathname:$lineNo: '$line' -> '$a'" + ) unless $line eq $a; + ++$lineNo; + } + } + } + close(HANDLE) or die("unable to close p4 annotate pipe: $!\n"); + $status = $?; + } + if ($self->[CACHE] && + 0 == $status) { + store_in_cache($cache_path, $pathname, $version, \@lines); + } + } else { + $self->printlog(" $pathname not in P4\n"); + not_in_repo($pathname, \@lines); + } + return ($status, \@lines); +} + +unless (caller) { + call_annotate("p4annotate", @ARGV); +} + +1; + diff --git a/scripts/p4udiff b/scripts/p4udiff new file mode 100755 index 00000000..cdee98db --- /dev/null +++ b/scripts/p4udiff @@ -0,0 +1,403 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# p4udiff +# +# This script extracts a unified-diff between two Perforce changelists. + +use strict; +use DateTime; +use Getopt::Long; + +package P4File; + +sub new +{ + my $class = shift; + my $description = shift; + if ($description =~ + m/([^#]+)#([0-9]+)\s+-\s+(\S+)\s+change\s+([0-9]+)\s+\(([^)]+)\).*$/) { + my $self = {}; + bless $self, $class; + + $self->{path} = $1; + $self->{rev} = $2; + $self->{action} = $3; + $self->{changelist} = $4; + $self->{type} = $5; + + return $self; + } + return undef; +} + +sub path +{ + my $self = shift; + return $self->{path}; +} + +sub rev +{ + my $self = shift; + return $self->{rev}; +} + +sub action +{ + my $self = shift; + return $self->{action}; +} + +sub changelist +{ + my $self = shift; + return $self->{changelist}; +} + +sub type +{ + my $self = shift; + return $self->{type}; +} + +package P4FileList; + +sub new +{ + my ($class, $include, $exclude) = @_; + my $self = [{}, $include, $exclude]; + bless $self, $class; + + return $self; +} + +sub include_me +{ + my ($path, $include, $exclude) = @_; + + if (defined($exclude)) { + foreach my $pat (@$exclude) { + return 0 + if $path =~ /$pat/; + } + } + return 1 + if (!defined($include) || 0 == scalar(@$include)); + + foreach my $pat (@$include) { + return 1 + if ($path =~ /$pat/); + } + return 0; +} + +sub append +{ + my $self = shift; + my $entry = shift; + + return $self if (!defined($entry)); + my ($hash, $include, $exclude) = @$self; + my $key = $entry->path(); + if (defined($hash->{$key})) { + warn("WARNING: skipping duplicated path $key\n"); + return $self; + } + $hash->{$key} = $entry + if include_me($key, $include, $exclude); + return $self; +} + +sub files +{ + my $self = shift; + return sort keys %{$self->[0]}; +} + +sub get +{ + my $self = shift; + my $key = shift; + + if (!defined($self->[0]->{$key})) { + return undef; + } + return $self->[0]->{$key}; +} + +sub remove +{ + my $self = shift; + my $key = shift; + delete $self->[0]->{$key}; + return $self; +} + +package main; + +my @exclude_patterns; +my @include_patterns; +my $suppress_unchanged; +my $ignore_whitespace; + +if (!GetOptions("exclude=s" => \@exclude_patterns, + "include=s" => \@include_patterns, + 'b|blank' => \$ignore_whitespace, + 'no-unchanged' => \$suppress_unchanged) || + 3 != scalar(@ARGV) +) { + print(STDERR<new(\@include_patterns, \@exclude_patterns); +my $curr_files = P4FileList->new(\@include_patterns, \@exclude_patterns); + +# need "/..." on the pathname if this is a directory. +# - depot_path may not be the same as the client workspace path. +# - If the name is local, then append if it is a directory - otherwise, ask P4 +# Don't go to P4 unless necessary - as the interaction is pretty slow. + +# Ask the Perforce server to lookup the path and append a directory recursion +# pattern if the depot_path is not a file_type. +my $p4_path = $top_directory; +if (-e $p4_path) { + $p4_path .= "/..." + if (-d $p4_path); +} else { + system("p4 fstat $top_directory|grep depotFile >/dev/null 2>&1"); + if (($? >> 8) != 0) { + $p4_path .= "/..."; + } +} + +if (open(HANDLE, "-|", "p4 files ${p4_path}\@$base_changelist")) { + DIFF_FILE: + while (my $line = ) { + chomp $line; + s/\015$//; + + $base_files->append(P4File->new($line)); + } + close(HANDLE) or die("unable to close p4 files (baseline) pipe: $!\n"); + if (0 != $?) { + $? & 0x7F & + die("p4 files (baseline) died from signal ", ($? & 0x7F), "\n"); + die("p4 files (baseline) exited with error ", ($? >> 8), "\n"); + } +} + +my $curr = $curr_changelist eq "sandbox" ? "" : "\@$curr_changelist"; +if (open(HANDLE, "-|", "p4 files ${p4_path}$curr")) { + DIFF_FILE: + while (my $line = ) { + chomp $line; + s/\015$//; + $curr_files->append(P4File->new($line)); + } + close(HANDLE) or die("unable to close p4 files (current) pipe: $!\n"); + if (0 != $?) { + $? & 0x7F & + die("p4 files (current) died from signal ", ($? & 0x7F), "\n"); + die("p4 files (current) exited with error ", ($? >> 8), "\n"); + } +} + +my @unchanged; + +# prune files at the same rev; no difference to report +foreach my $f ($base_files->files()) { + my $b = $base_files->get($f); + my $c = $curr_files->get($f); + + if (defined($c) && + $b->rev() eq $c->rev() && + $b->action() eq $c->action() && + $b->changelist() eq $c->changelist()) { + $curr_files->remove($f); + $base_files->remove($f); + push(@unchanged, $c) + unless ($c->action() eq 'delete' || + $c->type() eq 'binary'); + } +} + +# prune files already deleted in base list +foreach my $f (grep { + $base_files->get($_)->action() eq "delete" || + $base_files->get($_)->action() eq "move/delete" + } $base_files->files() +) { + my $b = $base_files->get($f); + my $c = $curr_files->get($f); + + if (defined($c) && $b->action() eq $c->action()) { + # deleted again in curr with a different rev + $curr_files->remove($f); + } + + $base_files->remove($f); +} + +# prune files deleted in curr list +foreach my $f (grep { + $curr_files->get($_)->action() eq "delete" || + $curr_files->get($_)->action() eq "move/delete" + } $curr_files->files() +) { + my $c = $curr_files->get($f); + + $curr_files->remove($f); +} + +my %union; + +foreach my $k ($base_files->files()) { + #my $b = $base_files->get($k); + #printf("base: %s#%d %s change %d\n", $k, $b->rev(), $b->action(), $b->changelist()); + $union{$k} = 1; +} +foreach my $k ($curr_files->files()) { + #my $b = $curr_files->get($k); + #printf("curr: %s#%d %s change %d\n", $k, $b->rev(), $b->action(), $b->changelist()); + $union{$k} = 1; +} +#exit; + +#my $workspace = `p4 -F \%clientRoot\% -ztag info`; +my $where = + `p4 where $p4_path`; # need the "..." in the path or p4 gets confused +$where =~ s/\/\.\.\.//g; +my ($depot_path, $workspace_path, $sandbox_path) = split(' ', $where); + +sub reloc +{ + my $path = shift; + $path =~ s/$depot_path/$sandbox_path/; + return $path; +} + +foreach my $f (sort keys %union) { + my $b = $base_files->get($f); + my $c = $curr_files->get($f); + + if (defined($b) && !defined($c)) { + # deleted + next if ($b->type() eq "binary"); + printf("p4 diff $f#%d $f\n", $b->rev()); + printf("index %d..0\n", $base_changelist); + printf("--- %s\n", reloc($f)); + printf("+++ /dev/null\n"); + # p4 print -q $b->path() . '#' . $b->rev() |sed -e 's/^/-/' + my @lines; + open(HANDLE, "-|", "p4", "print", "-q", $b->path() . '#' . $b->rev()) + or + die("p4 print failed: $!\n"); + while (my $line = ) { + chomp $line; + $line =~ s/^/-/; + push @lines, $line; + } + close(HANDLE) or die("unable to close p4 print pipe: $!\n"); + if (0 != $?) { + $? & 0x7F & die("p4 print died from signal ", ($? & 0x7F), "\n"); + die("p4 print exited with error ", ($? >> 8), "\n"); + } + printf("@@ 1,%d 0,0 @@\n", scalar(@lines)); + printf("%s\n", join("\n", @lines)); + } elsif (!defined($b) && defined($c)) { + # added + next if ($c->type() eq "binary"); + printf("p4 diff $f $f#%d\n", $c->rev()); + printf("new file mode\n"); + printf("index 0..%d\n", $curr_changelist); + printf("--- /dev/null\n"); + printf("+++ %s\n", reloc($f)); + my @lines; + open(HANDLE, "-|", "p4", "print", "-q", $c->path() . '#' . $c->rev()) + or + die("p4 print failed: $!\n"); + + while (my $line = ) { + chomp $line; + $line =~ s/^/+/; + push @lines, $line; + } + close(HANDLE) or die("unable to close p4 print pipe: $!\n"); + if (0 != $?) { + $? & 0x7F & die("p4 print died from signal ", ($? & 0x7F), "\n"); + die("p4 print exited with error ", ($? >> 8), "\n"); + } + printf("@@ 0,0 1,%d @@\n", scalar(@lines)); + printf("%s\n", join("\n", @lines)); + } elsif (defined($b) && defined($c)) { + # check diff + next if ($b->type() eq "binary" || $c->type() eq "binary"); + # "p4 diff $ignore_whitespace -du ". $c->path() . '#' . $c->rev() . " " . $b->path() . '#' . $b->rev() + printf("p4 diff $f#%d $f#%d\n", $b->rev(), $c->rev()); + printf("index %d..%d\n", $base_changelist, $curr_changelist); + my @lines; + my @cmd = ("p4", "diff", "-du", + $b->path() . '#' . $b->rev(), + $c->path() . '#' . $c->rev()); + splice(@cmd, 2, 0, '-db') if $ignore_whitespace; + open(HANDLE, "-|", @cmd) or + die("p4 diff failed: $!\n"); + while (my $line = ) { + chomp $line; + if ($line =~ m/^(---|\+\+\+)/) { + $line =~ s/^(---\s+\S+).*$/$1/; + $line =~ s/^(\+\+\+\s+\S+).*$/$1/; + $line = reloc($line); + } + printf("%s\n", $line); + } + close(HANDLE) or die("unable to close p4 diff pipe: $!\n"); + if (0 != $?) { + $? & 0x7F & die("p4 diff died from signal ", ($? & 0x7F), "\n"); + die("p4 diff exited with error ", ($? >> 8), "\n"); + } + } else { + warn("WARNING: not in base or current for $f\n"); + } +} + +exit 0 if defined($suppress_unchanged); + +foreach my $f (@unchanged) { + my $name = $f->path(); + next + unless P4FileList::include_me($name, \@include_patterns, + \@exclude_patterns); + printf("p4 diff $name#%d $name\n", $f->rev()); + printf("=== %s\n", reloc($name)); +} diff --git a/scripts/select.pm b/scripts/select.pm new file mode 100644 index 00000000..a91f51b8 --- /dev/null +++ b/scripts/select.pm @@ -0,0 +1,171 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2023 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# select.pm [--tla tla[,tla]*]* [--range min_days:mex_days] \ +# [--owner regexp]* [--(sha|cl) id]* line_data annotate_data +# +# This is a sample 'genhtml --select-script' callback - used to decide +# whether a particular line information is interesting - and thus should +# be included in the coverage report or not. +# +# --tla: is a (possibly comma-separated list of) differential categories +# which should be retained: +# select.pm --tla LBC,UNC,UIC ... +# +# --sha/--cl: is a (possibly comma-separated list of) git SHAs or +# perforce changelists which should be retained. +# Match checks that the provided string matches the leading characters +# of the full SHA or changelist. +# +# --range: is a time period such that only code written or changed +# within the specified period is retained. +# One or more ranges may be specified either by using the argument +# multiple times or by passing a comma-separted list of ranges. +# select.pm --range 5:10,12:15 ... +# +# --owner: is a regular expression. A coverpoint is retained if its +# "full name" field matches the regexp. +# +# When multiple selection criteria are applied (e.g., both age and owner), +# then The coverpoint is retained if any of criteria match. +# +# Note that you --owner and --age require that source data is annotated - +# see the --annotate-script section of the genhtml man page. +# + +package select; +use strict; +use File::Basename qw(dirname basename); +use File::Spec; +use Getopt::Long qw(GetOptionsFromArray); +use Scalar::Util qw(looks_like_number); +use lcovutil; + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new); + +use constant { + AGE => 0, + TLA => 1, + OWNER => 2, + SHA => 3, +}; + +sub new +{ + my $class = shift; + my $script = shift; + + my (@range, @tla, @owner, @sha); + my @args = @_; + my $exe = basename($script ? $script : $0); + my $standalone = $script eq $0; + my $help; + if (!GetOptionsFromArray(\@_, + ("range:s" => \@range, + 'tla:s' => \@tla, + 'owner:s' => \@owner, + 'sha|cl:s' => \@sha, + 'help' => \$help)) || + $help || + (!$standalone && 0 != scalar(@_)) || + 0 == scalar(@args) # expect at least one selection criteria + ) { + print(STDERR <tla(); + return 1 if grep({ $tla eq $_ } @{$self->[TLA]}); + } + + if (defined($annotateData)) { + my $age = $annotateData->age(); + return 1 + if grep({ $age >= $_->[0] && $age <= $_->[1] } @{$self->[AGE]}); + + my $commit = $annotateData->commit(); + # match at head of commit ID string + return 1 + if (defined($commit) && + '' ne $commit && + grep({ $commit =~ /^$_/ } @{$self->[SHA]})); + + my $fullname = $annotateData->full_name(); + return 1 if grep({ $fullname =~ $_ } @{$self->[OWNER]}); + } + lcovutil::info(1, + "drop " + . + (defined($lineData) ? + $lineData->type() . ' ' . $lineData->tla() : + "$filename:$lineNo") . + "\n"); + # no match - not interesting + return 0; +} + +1; diff --git a/scripts/simplify.pm b/scripts/simplify.pm new file mode 100644 index 00000000..3108cea5 --- /dev/null +++ b/scripts/simplify.pm @@ -0,0 +1,114 @@ +#!/usr/bin/end perl + +# Copyright (c) MediaTek USA Inc., 2025 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# +# simplify.pm [--file pattern_file] [--re regexp] [--separator sep_char] +# +# This is a sample 'genhtml --simplify-script' callback - used to decide +# whether shorten the (possibly demangled) function names displayed in the +# 'function detail' tables. +# Note that the simplified names are ONLY used in the table - the +# coverage DB is not affected - so, for example '--erase-function' +# regexps must match the actual (possibly demangled) name of the function. +# +# --file: is the name of a file containing Perl regexpe, one per line +# +# --re: is a perl regexp or 'sep_char' separated list of regexps. +# +# --separator: is the character used to separate the list of regexpe. +# (',' is probably a poor choice as perl regexps often contain +# comma. + +package simplify; + +use strict; +use Getopt::Long qw(GetOptionsFromArray); +use File::Basename qw(dirname basename); +use lcovutil; + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new); + +sub new +{ + my $class = shift; + my $script = shift; + my (@patterns, $file, $sep); + my @args = @_; + my $exe = basename($script ? $script : $0); + my $standalone = $script eq $0; + my $help; + if (!GetOptionsFromArray(\@_, + ("file:s" => \$file, + 'separator:s' => \$sep, + 're:s' => \@patterns, + 'help' => \$help)) || + $help || + (!$standalone && 0 != scalar(@_)) || + 0 == scalar(@args) || # expect at least one pattern + (@patterns && $file) || !(@patterns || $file) + ) { + print(STDERR <) { + chomp; + next if /^#/ || !$_; # skip comment and blank + push(@patterns, $_); + } + close(HANDLE) or die("unable to close pattern file handle: $!\n"); + } elsif (defined($sep)) { + @patterns = split($sep, join($sep, @patterns)); + } + + # verify that the patterns are valid... + lcovutil::verify_regexp_patterns($script, \@patterns); + + return bless \@patterns, $class; +} + +sub simplify +{ + my ($self, $name) = @_; + + foreach my $p (@$self) { + # sadly, no support for pre-compiled patterns + eval "\$name =~ $p ;"; # apply pattern that user provided... + # $@ should never match: we already checked pattern validity during + # initialization - above. Still: belt and braces. + die("invalid 'simplify' regexp '$p->[0]': $@") + if ($@); + } + return $name; +} + +1; diff --git a/scripts/spreadsheet.py b/scripts/spreadsheet.py new file mode 100755 index 00000000..ebb37813 --- /dev/null +++ b/scripts/spreadsheet.py @@ -0,0 +1,753 @@ +#!/usr/bin/env python3 + +import xlsxwriter +import argparse +import json +import pdb +import datetime +import os.path +import os +import sys + +from xlsxwriter.utility import xl_rowcol_to_cell +from functools import cmp_to_key + +devMinThreshold = 1.5 +devMaxThreshold = 2.0 +thresholdPercent = 0.15 + +class GenerateSpreadsheet(object): + + def __init__(self, excelFile, files, args): + + s = xlsxwriter.Workbook(excelFile) + + # keep a list of sheets so we can insert a summary.. + geninfoSheets = [] + summarySheet = s.add_worksheet("geninfo_summary") if 1 < len(files) else None + + # order: order of processing + # file: time to process one GCDA file + # parse: time to generate and read gcov data + # exec: time to execute gcov + # append: to merge file info into parent + geninfoKeys = ('order', 'file', 'parse', 'exec', 'append') + + # work: productive time: process_one_chunk + merge chunk + # chunk: everything from fork() to end of filesystem cleanup after child merge + # child: time from entering child process to immediately before serialize + # process: time to call process_one_chunk + # undump: time to deserialize chunk data into master + # queue: time between child finish and start of merge in parent + # merge: time to merge returned chunk info + geninfoChunkKeys = ('work', 'chunk', 'queue', 'child', 'process', 'undump', 'merge') + geninfoSpecialKeys = ('total', 'parallel', 'filter', 'write') + + # keys related to filtering + filterKeys = ('filt_chunk', 'filt_queue', 'filt_child', 'filt_proc', 'filt_undump', 'filt_merge', 'derive_end') + if args.verbose: + geninfoKeys.extend(['read', 'translate']) + + self.formats = { + 'twoDecimal': s.add_format({'num_format': '0.00'}), + 'intFormat': s.add_format({'num_format': '0'}), + 'title': s.add_format({'bold': True, + 'align': 'center', + 'valign': 'vcenter', + 'text_wrap': True}), + 'italic': s.add_format({'italic': True, + 'align': 'center', + 'valign': 'vcenter'}), + 'highlight': s.add_format({'bg_color': 'yellow'}), + 'danger': s.add_format({'bg_color': 'red'}), + 'good': s.add_format({'bg_color': 'green'}), + } + intFormat = self.formats['intFormat'] + twoDecimal = self.formats['twoDecimal'] + + def insertConditional(sheet, avgRow, devRow, + beginRow, beginCol, endRow, endCol): + # absolute row, relative column + avgCell = xl_rowcol_to_cell(avgRow, beginCol, True, False) + devCell = xl_rowcol_to_cell(devRow, beginCol, True, False) + # relative row, relative column + dataCell = xl_rowcol_to_cell(beginRow, beginCol, False, False) + # absolute value of difference from the average + diff = 'ABS(%(cell)s - %(avg)s)' % { + 'cell' : dataCell, + 'avg' : avgCell, + } + + # min difference is difference > 15% of average + # only look at positive difference: taking MORE than average time + threshold = '(%(cell)s - %(avg)s) > (%(percent)f * %(avg)s)' % { + 'cell' : dataCell, + 'avg' : avgCell, + 'percent': thresholdPercent, + } + + # cell not blank and difference > 2X std.dev and > 15% of average + dev2 = '=AND(NOT(OR(ISBLANK(%(cell)s),ISBLANK(%(dev)s))), %(diff)s > (%(devMaxThresh)f * %(dev)s), %(threshold)s)' % { + 'diff' : diff, + 'threshold' : threshold, + 'cell' : dataCell, + 'avg' : avgCell, + 'dev' : devCell, + 'devMaxThresh': devMaxThreshold, + } + # yellow if between 1.5 and 2 standard deviations away + dev1 = '=AND(NOT(OR(ISBLANK(%(cell)s),ISBLANK(%(dev)s))), %(diff)s > (%(devMinThresh)f * %(dev)s), %(diff)s <= (%(devMaxThresh)f * %(dev)s), %(threshold)s) ' % { + 'diff' : diff, + 'threshold' : threshold, + 'cell' : dataCell, + 'avg' : avgCell, + 'dev' : devCell, + 'devMaxThresh': devMaxThreshold, + 'devMinThresh': devMinThreshold, + } + # yellow if between 1 and 2 standard deviations away + sheet.conditional_format(beginRow, beginCol, endRow, endCol, + { 'type': 'formula', + 'criteria': dev1, + 'format' : self.formats['highlight'], + }) + # red if more than 2 2 standard deviations away + sheet.conditional_format(beginRow, beginCol, endRow, endCol, + { 'type': 'formula', + 'criteria': dev2, + 'format' : self.formats['danger'], + }) + # green if more than 1.5 standard deviations better + good = '=AND(NOT(OR(ISBLANK(%(cell)s),ISBLANK(%(dev)s))), (%(cell)s - %(avg)s) < (%(devMaxThresh)f * -%(dev)s), %(threshold)s)' % { + 'cell' : dataCell, + 'threshold' : threshold, + 'cell' : dataCell, + 'avg' : avgCell, + 'dev' : devCell, + 'devMaxThresh': devMaxThreshold, + } + sheet.conditional_format(beginRow, beginCol, endRow, endCol, + { 'type': 'formula', + 'criteria': good, + 'format' : self.formats['good'], + }) + + def insertStats(keys, sawData, sumRow, avgRow, devRow, beginRow, endRow, col): + firstCol = col + col -= 1 + for key in keys: + col += 1 + if key in ('order',): + continue + if key not in sawData: + continue + + f = xl_rowcol_to_cell(beginRow, col) + t = xl_rowcol_to_cell(endRow, col) + + sum = "+SUM(%(from)s:%(to)s)" % { + "from" : f, + "to": t + } + sheet.write_formula(sumRow, col, sum, twoDecimal) + avg = "+AVERAGE(%(from)s:%(to)s)" % { + 'from': f, + 'to': t, + } + sheet.write_formula(avgRow, col, avg, twoDecimal) + if sawData[key] < 2: + continue + dev = "+STDEV(%(from)s:%(to)s)" % { + 'from': f, + 'to': t, + } + sheet.write_formula(devRow, col, dev, twoDecimal) + + insertConditional(sheet, avgRow, devRow, + beginRow, firstCol, endRow, col) + + for name in files: + try: + with open(name) as f: + data = json.load(f) + except Exception as err: + print("%s: unable to parse: %s" % (name, str(err))) + continue + + try: + cfg = data['config'] + + try: + tool = data['config']['tool'] + except: + tool = 'unknown' + print("%s: unknown tool" %(name)) + except: + print("%s: no 'config' data key - I think this is not lcov performance data - skipping" % (name)) + continue + + p, f = os.path.split(name) + if os.path.splitext(f)[0] == tool: + sheetname = os.path.split(p)[1] # the directory + else: + sheetname = f + if len(sheetname) > 30: + # take the tail of the string.. + sheetname = sheetname[-30:] + sn = sheetname + for i in range(1000): + try: + sheet = s.add_worksheet(sn[-31:]) + break + except: + sn = sheetname + "_" + str(i) + else: + print("%s in use..giving up" % (sheetname)) + sys.exit(1) + + try: + parallel = data['config']['maxParallel'] + except: + parallel = 0 + + row = 0 + sheet.write_string(row, 0, name) + row += 1 + sheet.write_string(row, 0, 'config') + for n in sorted(data['config'].keys()): + try: + sheet.write_string(row, 1, n) + if n in ("tool", 'date', ): + sheet.write_string(row, 2, data['config'][n]) + else: + sheet.write_number(row, 2, data['config'][n], intFormat) + row += 1 + except: + # old file format..skip it + pass + + if tool == 'geninfo': + for k in ('chunkSize', 'nChunks', 'nFiles', 'interval'): + try: + sheet.write_number(row, 2, data[k], intFormat); + sheet.write_string(row, 1, k) + row += 1 + except: + pass + + for k in ('total', 'overall'): + if k in data: + sheet.write_string(row, 0, 'total') + sheet.write_number(row, 1, data[k], twoDecimal) + total = xl_rowcol_to_cell(row, 1) + totalRow = row + row += 1 + + if tool == 'lcov': + # is this a parallel execution? + try: + segments = data['config']['segments'] + + effectiveParallelism = "" + sep = "+(" + for seg in range(segments): + sheet.write_string(row, 0, 'segment %d' % (seg)) + try: + d = data[seg] + except: + d = data[str(seg)] + + start = row + for k in ('total', 'merge', 'undump'): + sheet.write_string(row, 1, k) + try: + sheet.write_number(row, 2, float(d[k]), twoDecimal) + if k == 'total': + effectiveParallelism += sep + xl_rowcol_to_cell(row, 2) + sep = "+" + except: + print("%s: failed to write %s for lcov[seg %d][%s]" % ( + name, str(d[k]) if k in d else "??", seg, k)) + row += 1 + begin = row + for k in ('parse', 'append'): + try: + # don't crash on partially corrupt profile data + d2 = d[k] + sheet.write_string(row, 1, k) + for f in sorted(d2.keys()): + sheet.write_string(row, 2, f) + try: + sheet.write_number(row, 3, float(d2[f]), twoDecimal) + except: + print("%s: failed to write %s for lcov[seg %d][%s][$s]" % (name, str(d2[f]), seg, k, f)) + row += 1 + except: + print("%s: failed to write %s for lcov[seg %d]" % (name, k, seg)) + effectiveParallelism += ")/%(total)s" % { + 'total': total, + } + sheet.write_formula(totalRow, 3, effectiveParallelism, twoDecimal) + + + except Exception as err: + + # not segmented - just print everything... + for k in ('total', 'merge', 'undump'): + sheet.write_string(row, 1, k) + val = 'NA' + try: + val = data[k] + sheet.write_number(row, 2, float(val), twoDecimal) + except: + print("%s: failed to write %s for lcov[%s]" % (name, str(val), k)) + row += 1 + for k in ('parse', 'append'): + try: + d2 = data[k] + sheet.write_string(row, 1, k) + for f in sorted(d2.keys()): + sheet.write_string(row, 2, f) + try: + sheet.write_number(row, 3, float(d2[f]), twoDecimal) + except: + print("%s: failed to write %s for lcov[%s][$s]" % (name, str(d2[f]), k, f)) + row += 1 + except: + print("%s: failed to find key '%s'" %(name, k)) + + # go on to the next file + continue + + elif tool == 'geninfo': + + summaryKeys = (*geninfoSpecialKeys, *geninfoChunkKeys, *geninfoKeys) + if args.show_filter: + summaryKeys = (*geninfoSpecialKeys, *geninfoChunkKeys, *geninfoKeys, *filterKeys) + if summarySheet: + # first one - add titles, etc + title = self.formats['title'] + + if len(geninfoSheets) == 0: + summarySheet.write_string(1, 0, "average", title) + summarySheet.write_string(2, 0, "stddev", title) + titleRow = 0 + summarySheet.write_string(titleRow, 0, "case", title) + col = 1 + for k in summaryKeys: + if k in ('order',): + continue + summarySheet.write_string(titleRow, col, k, title) + col += 1 + if k in geninfoSpecialKeys: + continue + summarySheet.write_string(titleRow, col, k + ' avg', title) + col += 1 + summarySheet.write_string(3, 0, "YELLOW: Value between [%(devMinThresh)0.2f,%(devMaxThresh)0.2f) standard deviations larger than average" % { + 'devMinThresh': devMinThreshold, + 'devMaxThresh': devMaxThreshold, + }, self.formats['highlight']) + summarySheet.write_string(4, 0, "RED: Value more than %(devMaxThresh)0.2f standard deviations larger than average" % { + 'devMaxThresh': devMaxThreshold, + }, self.formats['danger']) + summarySheet.write_string(5, 0, "GREEN: Value more than %(devMaxThresh)0.2f standard deviations smaller than average" % { + 'devMaxThresh': devMaxThreshold, + }, self.formats['good']) + firstSummaryRow = 7 + + # want rows for average and variance - leave a blank row + summaryRow = firstSummaryRow + len(geninfoSheets) + + geninfoSheets.append(sheet) + # already inserted the ''total' entry + specialsStart = row - 1 + for k in geninfoSpecialKeys[1:]: + try: + sheet.write_string(row, 0, k) + sheet.write_number(row, 1, data[k], twoDecimal) + except: + pass + row += 1; + + sawData = {} + sawData['total'] = 0 + sheet.write_string(row, 0, 'find') + row += 1 + for dirname in sorted(data['find'].keys()): + sheet.write_string(row, 1, dirname) + sheet.write_number(row, 2, data['find'][dirname], twoDecimal) + row += 1 + + row += 1 + def dataSection(typename, elements, keylist, dataRow, statsRow): + + row = dataRow + sheet.write_string(row, 0, typename) + col = 2 + for key in keylist: + sheet.write_string(row, col, key) + col += 1 + row += 1 + dataStart = row + + sawData = {} + for id in elements: + col = 1 + sheet.write_string(row, col, id) + col += 1 + + for key in keylist: + try: + v = data[key][id] + if key in ('order',): + sheet.write_number(row, col, v, intFormat) + else: + sheet.write_number(row, col, v, twoDecimal) + try: + sawData[key] += 1 + except: + sawData[key] = 1 + + except: + pass + col += 1 + row += 1 + + dataEnd = row - 1 + + row = statsRow + # insert link to first associated data entry + sheet.write_url(row, 0, "internal:'%s'!%s" %( + sheet.get_name(), + xl_rowcol_to_cell(dataStart, 0)), + string=typename) + col = 2 + for key in keylist: + if key not in ('order', ): + sheet.write_string(row, col, key) + col += 1 + row += 1 + sheet.write_string(row, 1, 'total') + sheet.write_string(row+1, 1, 'avg') + sheet.write_string(row+2, 1, 'stddev') + insertStats(keylist, sawData, statsRow + 1, statsRow + 2, + statsRow+3, dataStart, dataEnd, 2) + return dataEnd + 1 + + chunkStatsRow = row + fileStatsRow = chunkStatsRow + 4 + filterStatsRow = fileStatsRow + 4; + + if args.show_filter: + chunkDataRow = filterStatsRow + 4 + else: + chunkDataRow = fileStatsRow + 4 + fileStatsRow = row + parallelSumRow = row+1 + parallelSumCol = 3 + + # first the chunk data... + # process: time from immediately before fork in parent + # to immediately after 'process_one_file' in + # child (can't record 'dumper' call time + # because that also dumps the profile + # child: time from child coming to life after fork + # to immediately after 'process_one_file' + # exec: time take to by 'gcov' call + # merge: time to merge child process (undump, read + # trace data, append to summary, etc.) + # undump: dumper 'eval' call + stdout/stderr recovery + # parse: time to read child tracefile.info + # append: time to merge that into parent master report + try: + chunks = sorted(data['child'].keys(), key=int, reverse=True) + row = dataSection('chunks', chunks, geninfoChunkKeys, + chunkDataRow, chunkStatsRow) + row += 1 + fileDataRow = row + 1 + parallelSumCol = 2 + except: + # no chunk data - so just insert file data + fileStatsRow = chunkStatsRow + fileDataRow = fileStatsRow + 4 + pass + + + def cmpFile(a, b): + idA = int(data['order'][a]) + idB = int(data['order'][b]) + if idA < idB: + return 1 + else: + return 0 if idA == idB else -1 + + try: + row = dataSection('files', sorted(data['file'].keys(), key=cmp_to_key(cmpFile)), + geninfoKeys, fileDataRow, fileStatsRow) + except: + # there may be no files - if dataset was empty + print("No 'file' data in %s" % (name)) + + # now the filter data - if any + if args.show_filter: + filterDataRow = row + 1; + try: + chunks = sorted(data['filt_child'].keys(), key=int, reverse=True) + row = dataSection('filter', chunks, filterKeys, + filterDataRow, filterStatsRow) + row += 1 + + except: + pass + + + effectiveParallelism = "+%(sum)s/%(total)s" % { + 'sum': xl_rowcol_to_cell(parallelSumRow, parallelSumCol), + 'total': total, + } + sheet.write_formula(specialsStart + geninfoSpecialKeys.index('parallel'), + 1, effectiveParallelism, twoDecimal) + + if summarySheet: + summarySheet.write_string(summaryRow, 0, name) + # href to the corresponding page.. + summarySheet.write_url(summaryRow, 0, "internal:'%s'!A1" % ( + sheet.get_name())) + summaryCol = 1; + + sheetRef = "='" + sheet.get_name() + "'!" + + # insert total time and observed parallelism for this + # geninfo call + specialsRow = specialsStart + for k in geninfoSpecialKeys: + cell = xl_rowcol_to_cell(specialsRow, 1) + summarySheet.write_formula(summaryRow, summaryCol, + sheetRef + cell, twoDecimal) + summaryCol += 1 + specialsRow += 1 + + # now label this sheet's columns + # and also insert reference to total time and average time + # for each step into the summary sheet. + statsTotalRow = chunkStatsRow + 1 + statsAvgRow = chunkStatsRow + 2 + sections = [(geninfoChunkKeys, chunkStatsRow + 1, chunkStatsRow + 2), + (geninfoKeys, fileStatsRow + 1, fileStatsRow + 2),] + if args.show_filter: + sections.append((filterKeys, filterStatsRow +1, + filterStatsRow+2)) + for d in (sections): + totRow = d[1] + avgRow = d[2] + col = 2 + for k in d[0]: + if k not in ('order',): + sum = xl_rowcol_to_cell(totRow, col) + summarySheet.write_formula(summaryRow, summaryCol, + sheetRef + sum, twoDecimal) + summaryCol +=1 + avg = xl_rowcol_to_cell(avgRow, col) + summarySheet.write_formula(summaryRow, summaryCol, + sheetRef + avg, twoDecimal) + summaryCol +=1 + col += 1 + continue + + elif tool == 'genhtml': + + for k in ('parse_source', 'parse_diff', + 'parse_current', 'parse_baseline'): + if k in data: + sheet.write_string(row, 0, k) + sheet.write_number(row, 1, data[k], twoDecimal) + row += 1 + + # total: time from start to end of the particular unit - + # child: time from start to end of child process + # annotate: annotate callback time (if called) + # load: load source file (if no annotation) + # synth: generate file content (no annotation and no no file found) + # categorize: compute owner/date bins, differenntial categories + # process: time to generate data and write HTML for file + # synth: generate file content (no file found) + # source: + genhtmlKeys = [' '] # placeholder key + # these keys are computed for segments + genhtml_chunkyKeys = ['child', 'startDelay', 'mergeDelay', + 'merge_segment', 'segment'] + filter_keys = ['filt_undump', 'filt_merge', 'filt_queue', 'filt_chunk'] + + perObj_keys = ['file', 'source', 'categorize', 'annotate', 'check_version', + 'html', 'load', 'criteria', 'synth'] + + for k in perObj_keys: + if k in data: + genhtmlKeys.append(k) + + col = 3 + for k in genhtmlKeys: + sheet.write_string(row, col, k) + col += 1 + row += 1 + sumRow = row + sheet.write_string(row, 2, "total") + row += 1 + avgRow = row + sheet.write_string(row, 2, "average") + row += 1 + devRow = row + sheet.write_string(row, 2, "stddev") + row += 1 + + #print(" ".join(data.keys())) + try: + if 'file' in data: + scopeList = data['file'].keys() + else: + scopeList = data['html'].keys() + except: + print("%s: incomplete data - skipping" % (name)) + continue + begin = row + sawData = {} + #sawData['total'] = 0 + def printDataRow(name): + col = 4 + nonlocal row + for k in genhtmlKeys[1:]: + if (k in data and + name in data[k]): + try: + sheet.write_number(row, col, float(data[k][name]), twoDecimal) + if k in sawData: + sawData[k] += 1 + else: + sawData[k] = 1 + except: + print("%s: failed to write %s" %(name, data[k][name])) + col += 1 + + def visitScope(f): + nonlocal row + if '' == f: + sheet.write_string(row, 1, 'top') + else: + pth, name = os.path.split(f) + if name == '': + # this is a directory.. + sheet.write_string(row, 0, 'directory') + sheet.write_string(row, 1, pth) + else: + sheet.write_string(row, 3, name) + # there really is no 'total' data for any file or directory + printDataRow(f) + row += 1 + return 1 + + for f in sorted(scopeList): + visitScope(f) + + insertStats(genhtmlKeys, sawData, sumRow, avgRow, devRow, begin, + row-1, 3) + + overallParallelism = "+%(from)s/%(total)s" % { + 'from': xl_rowcol_to_cell(sumRow, 4), + 'total': total, + } + sheet.write_formula(totalRow, 2, overallParallelism, twoDecimal); + continue + + for k in data: + if k in ('parse_source', 'parse_diff', + 'emit', 'parse_current', 'parse_baseline'): + sheet.write_string(row, 0, k) + sheet.write_number(row, 1, data[k], twoDecimal) + row += 1 + elif k in ('file', 'dir', 'load', 'synth', 'check_version', + 'annotate', 'parse', 'append', 'segment', 'undump', + 'merge', 'gen_info', 'data', 'graph', 'find'): + sheet.write_string(row, 0, k) + d = data[k] + for n in sorted(d.keys()): + sheet.write_string(row, 1, n) + try: + sheet.write_number(row, 2, float(d[n]), twoDecimal) + except: + print("%s: failed to write %s for [%s][%s]" %(name, str(d[n]), k, n)) + row += 1; + continue + elif k in ('config', 'overall', 'total'): + continue + else: + print("not sure what to do with %s" % (k)) + + if summarySheet: + if len(geninfoSheets) < 2: + summarySheet.hide() + + # insert the average and variance data... + # (there will not be any such data if we didn't run geninfo) + try: + col = 1 + lastSummaryRow = firstSummaryRow + len(geninfoSheets) - 1 + avgRow = 1 + devRow = 2 + firstCol = col + for k in (*geninfoChunkKeys, *geninfoKeys): + if k in ('order',): + continue + for j in ('sum', 'avg'): + f = xl_rowcol_to_cell(firstSummaryRow, col) + t = xl_rowcol_to_cell(lastSummaryRow, col) + avg = "+AVERAGE(%(from)s:%(to)s)" % { + 'from': f, + 'to': t, + } + summarySheet.write_formula(avgRow, col, avg, twoDecimal) + avgCell = xl_rowcol_to_cell(avgRow, col) + dev = "+STDEV(%(from)s:%(to)s)" % { + 'from': f, + 'to': t, + } + summarySheet.write_formula(devRow, col, dev, twoDecimal) + col += 1 + insertConditional(summarySheet, avgRow, devRow, + firstSummaryRow, firstCol, lastSummaryRow, col -1) + except: + pass + s.close() + +if __name__ == "__main__": + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" +Simple utility to turn genhtml/geninfo/lcov "profile" JSON output files into a somewhat readable spreadsheet for easier analysis. + +Example usage: + $ spreadsheet.py -o foo.xlsx data.json data2.json data3.json ... +""") + + parser.add_argument("-o", dest='out', action='store', + default='stats.xlsx', + help='save excel to file') + parser.add_argument("--threshold", dest='thresholdPercent', type=float, + help="difference from average smaller than this percentage is ignored (not colorized). Default %0.2f" % (thresholdPercent)) + parser.add_argument("--low", dest='devMinThreshold', type=float, + help="difference from average larger than this * stddev colored yellow. Default: %0.2f" %(devMinThreshold)) + parser.add_argument("--high", dest='devMinThreshold', type=float, + help="difference from average larger than this * stddev colored red. Default: %0.2f" %(devMaxThreshold)) + parser.add_argument('-v', '--verbose', dest='verbose', default=0, + action='count', help='verbosity of report: more data'); + parser.add_argument('--show-filter', dest='show_filter', default=False, + action='store_true', help='include filter keys in table'); + + parser.add_argument('files', nargs=argparse.REMAINDER) + + try: + args = parser.parse_args() + except IOError as err: + print(str(err)) + sys.exit(2) + + GenerateSpreadsheet(args.out, args.files, args) diff --git a/scripts/threshold.pm b/scripts/threshold.pm new file mode 100644 index 00000000..793f4031 --- /dev/null +++ b/scripts/threshold.pm @@ -0,0 +1,125 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# threshold +# +# This is a simple example of a '--criteria-script' to be used by +# lcov/geninfo/genhtml. +# It can be called at any level of hierarchy - but is really expected to be +# called at the 'file' or 'top' level, in lcov or geninfo. +# It simply checks that the 'type' coverage (line, branch, function) exceeds +# the threshold. +# +# Format of the JSON input is: +# {"line":{"found":10,"hit:2,..},"function":{...},"branch":{}" +# Only non-zero elements are included. +# See the 'criteria-script' section in "man genhtml" for details. +# +# If passed the "--suppress" flag, this script will exit with status 0, +# even if the coverage criteria is not met. +# +# Example usage: +# +# - minimum acceptable line coverage = 85%, branch coveage = 70%, +# function coverage (of unique functions) = 100% +# "--rc criteria_callback_levels=top" parameter causes genhtml to execute +# the callback only at the top level (i.e., not also at every file) +# + +# genhtml --criteria-script $LCOV_HOME/share/lcov/support-scripts/threshold.pm,--line,85,--branch,70,--function,100 --rc criteria_callback_levels=top ... +# +# It is not hard to envision much more complicated coverage criteria. + +package threshold; + +use strict; +use Getopt::Long qw(GetOptionsFromArray); +use Scalar::Util qw/looks_like_number/; + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new); + +use constant {SIGNOFF => 0,}; + +sub new +{ + my $class = shift; + my $signoff = 0; + my $script = shift; + my $standalone = $script eq $0; + my @options = @_; + my ($line, $function, $branch, $mcdc); + + if (!GetOptionsFromArray(\@_, + ('signoff' => \$signoff, + 'line=s' => \$line, + 'branch=s' => \$branch, + 'mcdc=s' => \$mcdc, + 'function=s' => \$function,)) || + (!$standalone && @_) + ) { + print(STDERR "Error: unexpected option:\n " . + join(' ', @options) . + "\nusage: name type json-string [--signoff] [--line l_threshold] [--branch b_threshold] [--function f_threshold] [--mcdc -m_threshold]\n" + ); + exit(1) if $standalone; + return undef; + } + my %thresh; + $thresh{line} = $line if defined($line); + $thresh{branch} = $branch if defined($branch); + $thresh{function} = $function if defined($function); + $thresh{mcdc} = $mcdc if defined($mcdc); + die("$script: must specify at least of of --line, --branch, --function, --mcdc" + ) unless (%thresh); + foreach my $key (keys %thresh) { + my $v = $thresh{$key}; + die("unexpected $key threshold '$v'") + unless looks_like_number($v) && 0 < $v && $v <= 100; + } + my $self = [$signoff, \%thresh]; + + return bless $self, $class; +} + +sub check_criteria +{ + my ($self, $name, $type, $db) = @_; + + my $fail = 0; + my @messages; + + foreach my $key (sort keys %{$self->[1]}) { + next unless exists($db->{$key}); + + my $map = $db->{$key}; + my $found = $map->{found}; + next if $found == 0; + my $hit = $map->{hit}; + my $v = 100.0 * $hit / $found; + my $thresh = $self->[1]->{$key}; + + if ($v < $thresh) { + $fail = 1; + push(@messages, sprintf("$key: %0.2f < %0.2f", $v, $thresh)); + } + } + return ($fail && !$self->[SIGNOFF], \@messages); +} + +1; diff --git a/tests/Makefile b/tests/Makefile index 21b2ba21..173cc040 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,6 +1,51 @@ +ifeq ($(COVERAGE), 1) +ifeq ($(COVER_DB),) +export COVER_DB := $(shell echo `pwd`/cover_db) +export PYCOV_DB := $(shell echo `pwd`/pycov.dat) +export HTML_RPT := $(shell echo `pwd`/lcov_coverage) +endif +endif + +.PHONY: report + +all: check report + include common.mak -TESTS := genhtml/ lcov/ +TESTS := genhtml lcov gendiffcov llvm2lcov py2lcov perl2lcov xml2lcov + +# there may or may not be some .info files generated for exported +# tools - py2lcov, perl2lcov, etc. We want them included in the +# report - but they might not have been generated, so we need to +# ignore the potential 'empty glob pattern' error message and a +# potential remote repo timestamp issue +report: + $(SPREADSHEET_TOOL) -o report.xlsx `find . -name "*.json"` + if [ "x$(COVERAGE)" != 'x' ] ; then \ + cover $(COVER_DB) ; \ + $(BINDIR)/perl2lcov -o perlcov.info $(COVER_DB) \ + --version-script $(VERSION_SCRIPT) \ + --exclude genError.pm --exclude filter.pl \ + --exclude brokenCallback.pm --exclude MsgContext.pm \ + --omit-lines 'ERROR_INTERNAL' --omit-lines '\bdie\b' \ + --ignore unsupported,unused,inconsistent \ + --filter region ; \ + if [ -f $(PYCOV_DB) ] ; then \ + $(BINDIR)/py2lcov -o pycov.info $(PYCOV_DB) \ + --version-script $(VERSION_SCRIPT) ; \ + fi ; \ + $(BINDIR)/genhtml --parallel -o $(HTML_RPT) \ + perlcov.info pycov.info \ + --save --title 'LCOV regression tests' \ + --show-navigation --flat --branch --show-proportion \ + --version-script $(VERSION_SCRIPT) \ + --annotate-script $(ANNOTATE_SCRIPT) \ + --filter region \ + --ignore empty,inconsistent ; \ + cp p*cov.info $(HTML_RPT) ; \ + echo "Wrote HTML report to ${HTML_RPT}" ; \ + fi clean: - rm -rf *.info *.counts test.log src/ + rm -rf *.info *.counts test.log src report.xlsx \ + $(COVER_DB) $(HTML_RPT) $(PYCOV_DB) diff --git a/tests/README.md b/tests/README.md index a9b5f3c3..f5a1cd70 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,16 +1,64 @@ LCOV test suite =============== -This directory contains a number of regression tests for LCOV. To start it, -simply run `make check`. The resulting output is written to the terminal and -stored in a log file. +This directory contains a number of regression tests for LCOV. To start it: + + - if you are running in a build/development directory: + + - run `make check` + + - The resulting output is written to the terminal and + stored in a log file. + + - if you are running in or from an installed 'release' (e.g., from + $LCOV_HOME/share/lcov/tests): + + - (optional): cp -r $LCOV_HOME/share/lcov/tests myTestDir ; cd myTestDir + + - make [COVERAGE=1] + + - to generate coverage data for the lcov module, the 'Devel::Cover' + perl package must be available. + + - the Devel::Cover result is written to the terminal and stored in + 'test.log' + + - Results: + + The Devel::Cover 'raw' coverage data can be viewed by pointing your + browser to .../cover_db/coverage.html. + The coverage data can be redirected to a different location via the + COVER_DB variable: + $ make [COVER_DB=path/to/wherever] COVERAGE=1 ... test + + The data is translated to LCOV format and stored in + .../cover_db/perlcov.info. + The data can be redirected to a different location via the PERLCOV + variable: + $ make [PERLCOV=path/to/my/file.info] COVERAGE=1 ... test + + The corresponding genhtml-generated HTMLreport can be viewed by + pointing your browser to .../perlcov/index.html. + The report can be redirected to a different location via the HTML_RPT + variable: + $ make [HTML_RPT=path/to/my/html] COVERAGE=1 ... test + + - environment variables: + + - LCOV_SHOW_LOCATION: + if set, show location on die() or warn() + + - LCOV_FORCE_PARALLEL: + if set, force parallel processing, regardless of number of tasks - + even if only one. This is useful for regression testing - to make + sure that we cover both serial and parallel execution. You can modify some aspects of testing by specifying additional parameters on `make` invocation: - SIZE - Select the size of the artifical coverage files used for testing. + Select the size of the artificial coverage files used for testing. Supported values are small, medium, and large. The default value is small. diff --git a/tests/bin/check_counts b/tests/bin/check_counts index 05d61670..8a3f8a1a 100755 --- a/tests/bin/check_counts +++ b/tests/bin/check_counts @@ -16,15 +16,15 @@ use warnings; sub do_cmp($$$) { - my ($title, $a, $b) = @_; + my ($title, $a, $b) = @_; - if ($a == $b) { - print("$title: $a == $b\n"); - return 0; - } else { - print("$title: $a != $b => mismatch!\n"); - return 1; - } + if ($a == $b) { + print("$title: $a == $b\n"); + return 0; + } else { + print("$title: $a != $b => mismatch!\n"); + return 1; + } } my $lcov = $ENV{"LCOV"}; @@ -36,14 +36,14 @@ my ($lnhit2, $lnfound2, $fnhit2, $fnfound2, $brhit2, $brfound2); my $rc = 0; if (!defined($counts) || !defined($output)) { - die("Usage: $0 \n"); + die("Usage: $0 \n"); } open($fd, "<", $output) or die("$0: Could not read $output: $!\n"); while (<$fd>) { - ($lnhit, $lnfound) = ($1, $2) if (/(\d+) of (\d+) lines/); - ($fnhit, $fnfound) = ($1, $2) if (/(\d+) of (\d+) functions/); - ($brhit, $brfound) = ($1, $2) if (/(\d+) of (\d+) branches/); + ($lnhit, $lnfound) = ($1, $2) if (/(\d+) of (\d+) lines/); + ($fnhit, $fnfound) = ($1, $2) if (/(\d+) of (\d+) functions/); + ($brhit, $brfound) = ($1, $2) if (/(\d+) of (\d+) branches/); } close($fd); @@ -51,10 +51,10 @@ die("$0: Non-zero result code ($?) of command: $cmdline\n") if ($? != 0); open($fd, "<", $counts) or die("$0: Could not open $counts: $!\n"); if (<$fd> !~ /^(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)/) { - die("$0: Invalid count file: $counts\n"); + die("$0: Invalid count file: $counts\n"); } ($lnhit2, $lnfound2, $fnhit2, $fnfound2, $brhit2, $brfound2) = - ($1, $2, $3, $4, $5, $6); + ($1, $2, $3, $4, $5, $6); close($fd); print("Comparing output for $output and $counts:\n"); diff --git a/tests/bin/checkdeps b/tests/bin/checkdeps index 775a4a7c..951f8a6e 100755 --- a/tests/bin/checkdeps +++ b/tests/bin/checkdeps @@ -19,45 +19,52 @@ my $verbose = 0; sub check_file($) { - my ($file) = @_; - my $fd; - my $line; - my $rc = 0; - - open($fd, "<", $file) or die("Could not open $file: $!\n"); - $line = <$fd>; - - if ($line =~ /^#.*perl/) { - while ($line = <$fd>) { - my $module; - - # Look for ...use...module...; - next if ($line !~ /^\s*use\s+(\S+).*;\s*$/); - - $module = $1; - print("Checking for $module\n") if ($verbose); - if (!eval("require $module")) { - warn("Error: Missing Perl module '$module' ". - "required by $file\n"); - $rc = 1; - } - } - } - - close($fd); - - return $rc; + my ($file) = @_; + my $fd; + my $line; + my $rc = 0; + + open($fd, "<", $file) or die("Could not open $file: $!\n"); + $line = <$fd>; + + if (defined($line) && + $line =~ /^#.*perl/) { + while ($line = <$fd>) { + my $module; + + # Look for ...use...module...; + next if ($line !~ /^\s*use\s+(\S+).*;\s*$/); + + $module = $1; + # skip modules we define... + next + if grep(/^$module$/, + ('lcovutil', 'annotateutil', + 'gitversion', 'gitblame', + 'getp4version', 'p4annotate')); + print("Checking for $module\n") if ($verbose); + if (!eval("require $module")) { + warn("Error: Missing Perl module '$module' " . + "required by $file\n"); + $rc = 1; + } + } + } + + close($fd); + + return $rc; } sub main() { - my $rc = 0; + my $rc = 0; - for my $file (@ARGV) { - $rc = 1 if (check_file($file)); - } + for my $file (@ARGV) { + $rc = 1 if (check_file($file)); + } - return $rc; + return $rc; } exit(main()); diff --git a/tests/bin/cleantests b/tests/bin/cleantests index 5d05e5bf..1df6ef57 100755 --- a/tests/bin/cleantests +++ b/tests/bin/cleantests @@ -3,16 +3,21 @@ # Copyright IBM Corp. 2020 # # Usage: cleantests -# +# MAKE="$1" shift TESTS="$*" +declare -a MAKE_OPTS +if [[ "${V:-0}" -lt 1 ]] ; then + MAKE_OPTS+=(-s) +fi + for TEST in ${TESTS} ; do if [[ -d "${TEST}" ]] ; then # Enter sub-directory - ${MAKE} -C "${TEST}" clean + ${MAKE} -C "${TEST}" clean "${MAKE_OPTS[@]}" fi done diff --git a/tests/bin/common b/tests/bin/common index 6bfe9e6b..8d718946 100644 --- a/tests/bin/common +++ b/tests/bin/common @@ -37,6 +37,12 @@ function t_detail() function t_announce() { local TESTNAME="$1" + local len=`echo "$1" | wc -c` + local start=`expr $len - 31` + if [[ $len > 32 ]] ; then + # test name too long - trim from left + TESTNAME="...`echo $TESTNAME | cut -c ${start}-`" + fi printf "$BOLD%-.35s$RESET " "$TESTNAME .............................." t_marker >> "$LOGFILE" diff --git a/tests/bin/mkinfo b/tests/bin/mkinfo index 5231aeac..0e194706 100755 --- a/tests/bin/mkinfo +++ b/tests/bin/mkinfo @@ -4,13 +4,13 @@ # # Usage: mkinfo [-o ] [--seed ] # [=...] -# +# # Create a fake lcov code coverage data file and optionally the corresponding # source tree. DATA_FILE contains all specifications for creating the data # file. Directives can be overridden using KEY=VALUE specifications with KEY # being in the form SECTION.KEY. SEED specifies the number used to initialize # the pseudo random number generator. -# +# # Example: # mkinfo profiles/small -o src files.numfiles=12 # @@ -24,14 +24,14 @@ use File::Path qw(make_path); use File::Basename; use Data::Dumper; -my $MAX_TAKEN = 1000; -my $use_colors = -t STDIN; -my $BOLD = $use_colors ? "\033[1m" : ""; -my $RESET = $use_colors ? "\033[0m" : ""; +my $MAX_TAKEN = 1000; +my $use_colors = -t STDIN; +my $BOLD = $use_colors ? "\033[1m" : ""; +my $RESET = $use_colors ? "\033[0m" : ""; sub usage() { - print(< [-o ] [--seed ] [=...] Create a fake lcov code coverage data file and optionally the corresponding @@ -39,7 +39,7 @@ source tree. DATA_FILE contains all specifications for creating the data file. Directives can be overridden using KEY=VALUE specifications with KEY being in the form SECTION.KEY. SEED specifies the number used to initialize the pseudo random number generator. - + Example: $0 profiles/small -o src files.numfiles=12 EOF @@ -47,241 +47,239 @@ EOF sub read_config($) { - my ($filename) = @_; - my $fd; - my %config; - my $section; - - open($fd, "<", $filename) or die("Could not open $filename: $!\n"); - while (my $line = <$fd>) { - my ($key, $value); - - $line =~ s/(^\s*|\s*$)//g; - next if ($line eq "" || $line =~ /^#/); - if ($line =~ /^\[\s*(\S+)\s*]$/) { - $section = $1; - next; - } - if ($line !~ /^(\S+)\s*=\s*(.*)$/) { - die("$filename:$.: Unknown line format: $line\n"); - } - ($key, $value) = ($1, $2); - if (!defined($section)) { - die("$filename:$.: Directive outside of section\n"); - } - $config{$section}->{$1} = $2; - } - close($fd); - - return \%config; + my ($filename) = @_; + my $fd; + my %config; + my $section; + + open($fd, "<", $filename) or die("Could not open $filename: $!\n"); + while (my $line = <$fd>) { + my ($key, $value); + + $line =~ s/(^\s*|\s*$)//g; + next if ($line eq "" || $line =~ /^#/); + if ($line =~ /^\[\s*(\S+)\s*]$/) { + $section = $1; + next; + } + if ($line !~ /^(\S+)\s*=\s*(.*)$/) { + die("$filename:$.: Unknown line format: $line\n"); + } + ($key, $value) = ($1, $2); + if (!defined($section)) { + die("$filename:$.: Directive outside of section\n"); + } + $config{$section}->{$1} = $2; + } + close($fd); + + return \%config; } sub apply_config($$) { - my ($config, $directive) = @_; + my ($config, $directive) = @_; - for my $dir (@$directive) { - if ($dir !~ /^([^\.]+)\.([^=]+)=(.*)$/) { - die("Unknown directive format: $dir\n"); - } - $config->{$1}->{$2} = $3; - } + for my $dir (@$directive) { + if ($dir !~ /^([^\.]+)\.([^=]+)=(.*)$/) { + die("Unknown directive format: $dir\n"); + } + $config->{$1}->{$2} = $3; + } } sub get_value($$;$) { - my ($config, $dir, $default) = @_; - my ($section, $key, $value); + my ($config, $dir, $default) = @_; + my ($section, $key, $value); - if ($dir !~ /^([^\.]+)\.([^=]+)$/) { - die("$0: Internal error: Unknown key format: $key\n"); - } - ($section, $key) = ($1, $2); + if ($dir !~ /^([^\.]+)\.([^=]+)$/) { + die("$0: Internal error: Unknown key format: $key\n"); + } + ($section, $key) = ($1, $2); - $value = $config->{$section}->{$key}; + $value = $config->{$section}->{$key}; - if (!defined($value)) { - if (!defined($default)) { - die("$0: Missing config value for $dir\n"); - } - $value = $default; - } + if (!defined($value)) { + if (!defined($default)) { + die("$0: Missing config value for $dir\n"); + } + $value = $default; + } - return $value; + return $value; } sub get_int($$;$$$) { - my ($config, $dir, $default, $min, $max) = @_; - my $value = get_value($config, $dir, $default); + my ($config, $dir, $default, $min, $max) = @_; + my $value = get_value($config, $dir, $default); - if ($value !~ /^\d+$/) { - die("$0: Config value $dir must be an integer: $value\n"); - } - $value = int($value); - if (defined($min) && $value < $min) { - die("$0: Config value $dir is too low (min $min): $value\n"); - } - if (defined($max) && $value > $max) { - die("$0: Config value $dir is too high (max $max): $value\n"); - } + if ($value !~ /^\d+$/) { + die("$0: Config value $dir must be an integer: $value\n"); + } + $value = int($value); + if (defined($min) && $value < $min) { + die("$0: Config value $dir is too low (min $min): $value\n"); + } + if (defined($max) && $value > $max) { + die("$0: Config value $dir is too high (max $max): $value\n"); + } - return int($value); + return int($value); } sub get_list($$;$) { - my ($config, $dir, $default) = @_; - my $value = get_value($config, $dir, $default); - my @list = split(/\s+/, $value); + my ($config, $dir, $default) = @_; + my $value = get_value($config, $dir, $default); + my @list = split(/\s+/, $value); - return \@list; + return \@list; } sub randlist($) { - my ($list) = @_; + my ($list) = @_; - return "" if (!@$list); - return $list->[int(rand(scalar(@$list)))]; + return "" if (!@$list); + return $list->[int(rand(scalar(@$list)))]; } sub randbool() { - return int(rand(2)); + return int(rand(2)); } # Reduce LIST to PERCENTAGE of its former size. sub reduce_list_per($$) { - my ($list, $percentage) = @_; - my $remove; + my ($list, $percentage) = @_; + my $remove; - $remove = int((100 - $percentage) * scalar(@$list) / 100); + $remove = int((100 - $percentage) * scalar(@$list) / 100); - for (my $i = 0; $i < $remove; $i++) { - splice(@$list, int(rand(scalar(@$list))), 1); - } + for (my $i = 0; $i < $remove; $i++) { + splice(@$list, int(rand(scalar(@$list))), 1); + } } # Reduce LIST to NUM items. sub reduce_list_num($$) { - my ($list, $num) = @_; - my $remove; + my ($list, $num) = @_; + my $remove; - $remove = scalar(@$list) - $num; + $remove = scalar(@$list) - $num; - for (my $i = 0; $i < $remove; $i++) { - splice(@$list, int(rand(scalar(@$list))), 1); - } + for (my $i = 0; $i < $remove; $i++) { + splice(@$list, int(rand(scalar(@$list))), 1); + } } sub _gen_filename($$) { - my ($c, $root) = @_; - my $ltop = get_list($c, "files.top", ""); - my $lsub = get_list($c, "files.sub", ""); - my $lsubsub = get_list($c, "files.subsub", ""); - my $lprefix = get_list($c, "files.prefix"); - my $lsuffix = get_list($c, "files.suffix", ""); - my $lext = get_list($c, "files.ext"); - my ($top, $sub, $subsub, $prefix, $suffix, $ext) = - ("", "", "", "", "", ""); - my $filename = ""; - - $top = randlist($ltop) if (randbool()); - $sub = randlist($lsub) if (randbool()); - $subsub = randlist($lsubsub) if (randbool()); - $prefix = randlist($lprefix); - $suffix = randlist($lsuffix) if (randbool()); - $ext = randlist($lext); - - $filename = $root; - $filename .= "/".$top if ($top ne ""); - $filename .= "/".$sub if ($sub ne ""); - $filename .= "/".$subsub if ($subsub ne ""); - $filename .= "/".$prefix; - $filename .= "_".$suffix if ($suffix ne ""); - $filename .= $ext; - $filename =~ s#^//#/#; - - return $filename; + my ($c, $root) = @_; + my $ltop = get_list($c, "files.top", ""); + my $lsub = get_list($c, "files.sub", ""); + my $lsubsub = get_list($c, "files.subsub", ""); + my $lprefix = get_list($c, "files.prefix"); + my $lsuffix = get_list($c, "files.suffix", ""); + my $lext = get_list($c, "files.ext"); + my ($top, $sub, $subsub, $prefix, $suffix, $ext) = ("", "", "", "", "", ""); + my $filename = ""; + + $top = randlist($ltop) if (randbool()); + $sub = randlist($lsub) if (randbool()); + $subsub = randlist($lsubsub) if (randbool()); + $prefix = randlist($lprefix); + $suffix = randlist($lsuffix) if (randbool()); + $ext = randlist($lext); + + $filename = $root; + $filename .= "/" . $top if ($top ne ""); + $filename .= "/" . $sub if ($sub ne ""); + $filename .= "/" . $subsub if ($subsub ne ""); + $filename .= "/" . $prefix; + $filename .= "_" . $suffix if ($suffix ne ""); + $filename .= $ext; + $filename =~ s#^//#/#; + + return $filename; } sub gen_filename($$$) { - my ($c, $root, $filenames) = @_; - my $filename; + my ($c, $root, $filenames) = @_; + my $filename; - do { - $filename = _gen_filename($c, $root); - } while ($filenames->{$filename}); - $filenames->{$filename} = 1; + do { + $filename = _gen_filename($c, $root); + } while ($filenames->{$filename}); + $filenames->{$filename} = 1; - return $filename; + return $filename; } sub gen_lines($$) { - my ($c, $length) = @_; - my @lines = 1 .. $length; - my $percent = get_int($c, "lines.instrumented", undef, 0, 100); + my ($c, $length) = @_; + my @lines = 1 .. $length; + my $percent = get_int($c, "lines.instrumented", undef, 0, 100); - reduce_list_per(\@lines, $percent); + reduce_list_per(\@lines, $percent); - return \@lines; + return \@lines; } sub gen_fnname($$) { - my ($c, $hash) = @_; - my $lverb = get_list($c, "functions.verb"); - my $ladj = get_list($c, "functions.adj", ""); - my $lnoun = get_list($c, "functions.noun", ""); - my ($verb, $adj, $noun) = ("", "", ""); - my $fnname; + my ($c, $hash) = @_; + my $lverb = get_list($c, "functions.verb"); + my $ladj = get_list($c, "functions.adj", ""); + my $lnoun = get_list($c, "functions.noun", ""); + my ($verb, $adj, $noun) = ("", "", ""); + my $fnname; - $verb = randlist($lverb); - $adj = randlist($ladj) if (randbool()); - $noun = randlist($lnoun) if (randbool()); + $verb = randlist($lverb); + $adj = randlist($ladj) if (randbool()); + $noun = randlist($lnoun) if (randbool()); - $fnname = $verb; - $fnname .= "_".$adj if ($adj ne ""); - $fnname .= "_".$noun if ($noun ne ""); + $fnname = $verb; + $fnname .= "_" . $adj if ($adj ne ""); + $fnname .= "_" . $noun if ($noun ne ""); - if (exists($hash->{$fnname})) { - my $i = 2; + if (exists($hash->{$fnname})) { + my $i = 2; - while (exists($hash->{$fnname.$i})) { - $i++; - } - $fnname .= $i; - } - $hash->{$fnname} = 1; + while (exists($hash->{$fnname . $i})) { + $i++; + } + $fnname .= $i; + } + $hash->{$fnname} = 1; - return $fnname; + return $fnname; } sub gen_functions($$) { - my ($c, $lines) = @_; - my @fnlines; - my @functions; - my %names; - my $percent = get_int($c, "functions.perinstrumented", undef, 0, 100); + my ($c, $lines) = @_; + my @fnlines; + my @functions; + my %names; + my $percent = get_int($c, "functions.perinstrumented", undef, 0, 100); - @fnlines = @$lines; - reduce_list_per(\@fnlines, $percent); + @fnlines = @$lines; + reduce_list_per(\@fnlines, $percent); - foreach my $fnline (@fnlines) { - push(@functions, [ $fnline, gen_fnname($c, \%names) ]); - } + foreach my $fnline (@fnlines) { + push(@functions, [$fnline, gen_fnname($c, \%names)]); + } - return \@functions; + return \@functions; } - # Returns a value distribution object. This object can be used to randomly # choose one element from a list of elements with a given relative distribution. # @@ -292,88 +290,88 @@ sub gen_functions($$) # num: Value sub get_dist($$;$) { - my ($c, $dir, $default) = @_; - my $list = get_list($c, $dir, $default); - my $sumprob = 0; - my @probs; + my ($c, $dir, $default) = @_; + my $list = get_list($c, $dir, $default); + my $sumprob = 0; + my @probs; - foreach my $spec (@$list) { - my ($n, $p); + foreach my $spec (@$list) { + my ($n, $p); - if ($spec =~ /^(\d+):(\d+)$/) { - ($n, $p) = ($1, $2); - } elsif ($spec =~ /^(\d+)$/) { - $n = $1; - $p = 1; - } else { - die("$0: Config value $dir must be a distribution ". - "list (a:p1 b:p2 ...)\n"); - } - $sumprob += $p; - push(@probs, [ $n, $sumprob ]); - } + if ($spec =~ /^(\d+):(\d+)$/) { + ($n, $p) = ($1, $2); + } elsif ($spec =~ /^(\d+)$/) { + $n = $1; + $p = 1; + } else { + die("$0: Config value $dir must be a distribution " . + "list (a:p1 b:p2 ...)\n"); + } + $sumprob += $p; + push(@probs, [$n, $sumprob]); + } - return [ $sumprob, \@probs ]; + return [$sumprob, \@probs]; } sub rand_dist($) { - my ($dist) = @_; - my ($sumprob, $probs) = @$dist; - my $r = int(rand($sumprob)); + my ($dist) = @_; + my ($sumprob, $probs) = @$dist; + my $r = int(rand($sumprob)); - foreach my $prob (@$probs) { - my ($num, $x) = @$prob; - return $num if ($r < $x); - } + foreach my $prob (@$probs) { + my ($num, $x) = @$prob; + return $num if ($r < $x); + } - die("Internal error: Incomplete distribution list\n"); + die("Internal error: Incomplete distribution list\n"); } sub gen_branches($$) { - my ($c, $lines) = @_; - my $percent = get_int($c, "branches.perinstrumented", undef, 0, 100); - my @allblocks = @{get_list($c, "branches.blocks", "0")}; - my $branchdist = get_dist($c, "branches.branchdist", "2"); - my @brlines; - my @branches; + my ($c, $lines) = @_; + my $percent = get_int($c, "branches.perinstrumented", undef, 0, 100); + my @allblocks = @{get_list($c, "branches.blocks", "0")}; + my $branchdist = get_dist($c, "branches.branchdist", "2"); + my @brlines; + my @branches; - @brlines = @$lines; - reduce_list_per(\@brlines, $percent); + @brlines = @$lines; + reduce_list_per(\@brlines, $percent); - foreach my $brline (@brlines) { - my @blocks = @allblocks; - my $numblocks = int(rand(scalar(@blocks))) + 1; + foreach my $brline (@brlines) { + my @blocks = @allblocks; + my $numblocks = int(rand(scalar(@blocks))) + 1; - reduce_list_num(\@blocks, $numblocks); + reduce_list_num(\@blocks, $numblocks); - foreach my $block (@blocks) { - my $numbranch = rand_dist($branchdist); + foreach my $block (@blocks) { + my $numbranch = rand_dist($branchdist); - for (my $branch = 0; $branch < $numbranch; $branch++) { - push(@branches, [ $brline, $block, $branch]); - } - } - } + for (my $branch = 0; $branch < $numbranch; $branch++) { + push(@branches, [$brline, $block, $branch]); + } + } + } - return \@branches; + return \@branches; } sub gen_filesrc($) { - my ($c) = @_; - my ($length, $lines, $functions, $branches); - my $do_ln = get_int($c, "lines.enabled"); - my $do_fn = get_int($c, "functions.enabled"); - my $do_br = get_int($c, "branches.enabled"); + my ($c) = @_; + my ($length, $lines, $functions, $branches); + my $do_ln = get_int($c, "lines.enabled"); + my $do_fn = get_int($c, "functions.enabled"); + my $do_br = get_int($c, "branches.enabled"); - $length = 1 + int(rand(get_int($c, "lines.maxlines"))); - $lines = gen_lines($c, $length); - $functions = gen_functions($c, $lines) if ($do_fn); - $branches = gen_branches($c, $lines) if ($do_br); + $length = 1 + int(rand(get_int($c, "lines.maxlines"))); + $lines = gen_lines($c, $length); + $functions = gen_functions($c, $lines) if ($do_fn); + $branches = gen_branches($c, $lines) if ($do_br); - return [ $length, $lines, $functions, $branches ]; + return [$length, $lines, $functions, $branches]; } # Generate fake source tree. @@ -398,252 +396,254 @@ sub gen_filesrc($) # sub gen_src($$) { - my ($c, $root) = @_; - my %files; - my $numfiles = get_int($c, "files.numfiles"); - my %filenames; - my ($numlns, $numfns, $numbrs) = (0, 0, 0); + my ($c, $root) = @_; + my %files; + my $numfiles = get_int($c, "files.numfiles"); + my %filenames; + my ($numlns, $numfns, $numbrs) = (0, 0, 0); - for (my $i = 0; $i < $numfiles; $i++) { - my $filename = gen_filename($c, $root, \%filenames); - my $filesrc = gen_filesrc($c); + for (my $i = 0; $i < $numfiles; $i++) { + my $filename = gen_filename($c, $root, \%filenames); + my $filesrc = gen_filesrc($c); - $files{$filename} = $filesrc; - $numlns += scalar(@{$filesrc->[1]}) if (defined($filesrc->[1])); - $numfns += scalar(@{$filesrc->[2]}) if (defined($filesrc->[2])); - $numbrs += scalar(@{$filesrc->[3]}) if (defined($filesrc->[3])); - } + $files{$filename} = $filesrc; + $numlns += scalar(@{$filesrc->[1]}) if (defined($filesrc->[1])); + $numfns += scalar(@{$filesrc->[2]}) if (defined($filesrc->[2])); + $numbrs += scalar(@{$filesrc->[3]}) if (defined($filesrc->[3])); + } - return [ \%files, $numlns, $numfns, $numbrs ]; + return [\%files, $numlns, $numfns, $numbrs]; } sub write_src($) { - my ($src) = @_; - my ($files, $numlns, $numfns, $numbrs) = @$src; + my ($src) = @_; + my ($files, $numlns, $numfns, $numbrs) = @$src; - foreach my $filename (sort(keys(%{$files}))) { - my $filesrc = $files->{$filename}; - my $length = $filesrc->[0]; - my $dir = dirname($filename); - my $fd; + foreach my $filename (sort(keys(%{$files}))) { + my $filesrc = $files->{$filename}; + my $length = $filesrc->[0]; + my $dir = dirname($filename); + my $fd; - if (!-d $dir) { - make_path($dir) or - die("Could not create directory $dir\n"); - } + if (!-d $dir) { + make_path($dir) or + die("Could not create directory $dir\n"); + } - open($fd, ">", $filename) or - die("Could not create file $filename: $!\n"); - for (my $i = 0; $i < $length; $i++) { - print($fd "\n"); - } - close($fd); - } + open($fd, ">", $filename) or + die("Could not create file $filename: $!\n"); + for (my $i = 0; $i < $length; $i++) { + print($fd "\n"); + } + close($fd); + } } sub write_branches($$$$) { - my ($fd, $branches, $brhits, $iref) = @_; - my ($found, $hit) = (0, 0); + my ($fd, $branches, $brhits, $iref) = @_; + my ($found, $hit) = (0, 0); - # Line coverage data - foreach my $brdata (@$branches) { - my $brhit = $brhits->[$$iref++]; - my ($brline, $block, $branch) = @$brdata; + # Line coverage data + foreach my $brdata (@$branches) { + my $brhit = $brhits->[$$iref++]; + my ($brline, $block, $branch) = @$brdata; - $found++; - $hit++ if ($brhit ne "-" && $brhit > 0); - print($fd "BRDA:$brline,$block,$branch,$brhit\n"); - } - if ($found > 0) { - print($fd "BRF:$found\n"); - print($fd "BRH:$hit\n"); - } + $found++; + $hit++ if ($brhit ne "-" && $brhit > 0); + print($fd "BRDA:$brline,$block,$branch,$brhit\n"); + } + if ($found > 0) { + print($fd "BRF:$found\n"); + print($fd "BRH:$hit\n"); + } } sub write_lines($$$$) { - my ($fd, $lines, $lnhist, $iref) = @_; - my ($found, $hit) = (0, 0); + my ($fd, $lines, $lnhist, $iref) = @_; + my ($found, $hit) = (0, 0); - # Line coverage data - foreach my $line (@$lines) { - my $lnhit = $lnhist->[$$iref++]; + # Line coverage data + foreach my $line (@$lines) { + my $lnhit = $lnhist->[$$iref++]; - $found++; - $hit++ if ($lnhit > 0); - print($fd "DA:$line,$lnhit\n"); - } - print($fd "LF:$found\n"); - print($fd "LH:$hit\n"); + $found++; + $hit++ if ($lnhit > 0); + print($fd "DA:$line,$lnhit\n"); + } + print($fd "LF:$found\n"); + print($fd "LH:$hit\n"); } sub write_functions($$$$) { - my ($fd, $functions, $fnhits, $iref) = @_; - my ($found, $hit) = (0, 0); + my ($fd, $functions, $fnhits, $iref) = @_; + my ($found, $hit) = (0, 0); - # Function coverage data - foreach my $fn (@$functions) { - my ($fnline, $fnname) = @$fn; + # Function coverage data + foreach my $fn (@$functions) { + my ($fnline, $fnname) = @$fn; - print($fd "FN:$fnline,$fnname\n"); - } - foreach my $fn (@$functions) { - my ($fnline, $fnname) = @$fn; - my $fnhit = $fnhits->[$$iref++]; + print($fd "FN:$fnline,$fnname\n"); + } + foreach my $fn (@$functions) { + my ($fnline, $fnname) = @$fn; + my $fnhit = $fnhits->[$$iref++]; - $found++; - $hit++ if ($fnhit > 0); - print($fd "FNDA:$fnhit,$fnname\n"); - } - print($fd "FNF:$found\n"); - print($fd "FNH:$hit\n"); + $found++; + $hit++ if ($fnhit > 0); + print($fd "FNDA:$fnhit,$fnname\n"); + } + print($fd "FNF:$found\n"); + print($fd "FNH:$hit\n"); } sub write_filesrc($$$$$) { - my ($c, $fd, $filesrc, $hits, $iter) = @_; - my ($length, $lines, $functions, $branches) = @$filesrc; - my $do_ln = get_int($c, "lines.enabled"); - my $do_fn = get_int($c, "functions.enabled"); - my $do_br = get_int($c, "branches.enabled"); + my ($c, $fd, $filesrc, $hits, $iter) = @_; + my ($length, $lines, $functions, $branches) = @$filesrc; + my $do_ln = get_int($c, "lines.enabled"); + my $do_fn = get_int($c, "functions.enabled"); + my $do_br = get_int($c, "branches.enabled"); - write_functions($fd, $functions, $hits->[1], \$iter->[1]) if ($do_fn); - write_branches($fd, $branches, $hits->[2], \$iter->[2]) if ($do_br); - write_lines($fd, $lines, $hits->[0], \$iter->[0]) if ($do_ln); + write_functions($fd, $functions, $hits->[1], \$iter->[1]) if ($do_fn); + write_branches($fd, $branches, $hits->[2], \$iter->[2]) if ($do_br); + write_lines($fd, $lines, $hits->[0], \$iter->[0]) if ($do_ln); } sub write_info($$$$) { - my ($c, $filename, $src, $hits) = @_; - my $files = $src->[0]; - my $fd; - my %iters; + my ($c, $filename, $src, $hits) = @_; + my $files = $src->[0]; + my $fd; + my %iters; - foreach my $testname (keys(%{$hits})) { - $iters{$testname} = [ 0, 0, 0 ]; - } + foreach my $testname (keys(%{$hits})) { + $iters{$testname} = [0, 0, 0]; + } - open($fd, ">", $filename) or die("Could not create $filename: $!\n"); + open($fd, ">", $filename) or die("Could not create $filename: $!\n"); - foreach my $filename (sort(keys(%{$files}))) { - my $filesrc = $files->{$filename}; + foreach my $filename (sort(keys(%{$files}))) { + my $filesrc = $files->{$filename}; - foreach my $testname (sort(keys(%{$hits}))) { - my $testhits = $hits->{$testname}; - my $iter = $iters{$testname}; + foreach my $testname (sort(keys(%{$hits}))) { + my $testhits = $hits->{$testname}; + my $iter = $iters{$testname}; - print($fd "TN:$testname\n"); - print($fd "SF:$filename\n"); + print($fd "TN:$testname\n"); + print($fd "SF:$filename\n"); - write_filesrc($c, $fd, $filesrc, $testhits, $iter); + write_filesrc($c, $fd, $filesrc, $testhits, $iter); - print($fd "end_of_record\n"); - } - } + print($fd "end_of_record\n"); + } + } - close($fd); + close($fd); } sub get_hit_found($) { - my ($list) = @_; - my ($hit, $found) = (0, 0); + my ($list) = @_; + my ($hit, $found) = (0, 0); - foreach my $e (@$list) { - $hit++ if ($e ne "-" && $e > 0); - $found++; - } - return ($hit, $found); + foreach my $e (@$list) { + $hit++ if ($e ne "-" && $e > 0); + $found++; + } + return ($hit, $found); } sub write_counts($$) { - my ($filename, $hits) = @_; - my $fd; - my (@tlnhits, @tfnhits, @tbrhits); + my ($filename, $hits) = @_; + my $fd; + my (@tlnhits, @tfnhits, @tbrhits); - foreach my $testname (keys(%{$hits})) { - my $testhits = $hits->{$testname}; - my ($lnhits, $fnhits, $brhits) = @$testhits; + foreach my $testname (keys(%{$hits})) { + my $testhits = $hits->{$testname}; + my ($lnhits, $fnhits, $brhits) = @$testhits; - for (my $i = 0; $i < scalar(@$lnhits); $i++) { - $tlnhits[$i] += $lnhits->[$i]; - } - for (my $i = 0; $i < scalar(@$fnhits); $i++) { - $tfnhits[$i] += $fnhits->[$i]; - } - for (my $i = 0; $i < scalar(@$brhits); $i++) { - my $h = $brhits->[$i]; + for (my $i = 0; $i < scalar(@$lnhits); $i++) { + $tlnhits[$i] += $lnhits->[$i]; + } + for (my $i = 0; $i < scalar(@$fnhits); $i++) { + $tfnhits[$i] += $fnhits->[$i]; + } + for (my $i = 0; $i < scalar(@$brhits); $i++) { + my $h = $brhits->[$i]; - $h = 0 if ($h eq "-"); - $tbrhits[$i] += $h; - } - } + $h = 0 if ($h eq "-"); + $tbrhits[$i] += $h; + } + } - open($fd, ">", $filename) or die("Could not create $filename: $!\n"); - print($fd join(" ", get_hit_found(\@tlnhits), get_hit_found(\@tfnhits), - get_hit_found(\@tbrhits))."\n"); - close($fd); + open($fd, ">", $filename) or die("Could not create $filename: $!\n"); + print($fd join(" ", + get_hit_found(\@tlnhits), get_hit_found(\@tfnhits), + get_hit_found(\@tbrhits)) . + "\n"); + close($fd); } # A branch hit value for a block that was not hit must be "-". A branch hit # value for a block that was hit cannot be "-", but must be "0" if not hit. sub sanitize_brhits($) { - my ($brhits) = @_; - my $block_hit = 0; + my ($brhits) = @_; + my $block_hit = 0; - foreach my $brhit_ref (@$brhits) { - if ($$brhit_ref ne "-" && $$brhit_ref > 0) { - $block_hit = 1; - last; - } - } - foreach my $brhit_ref (@$brhits) { - if (!$block_hit) { - $$brhit_ref = "-"; - } elsif ($$brhit_ref eq "-") { - $$brhit_ref = 0; - } - } + foreach my $brhit_ref (@$brhits) { + if ($$brhit_ref ne "-" && $$brhit_ref > 0) { + $block_hit = 1; + last; + } + } + foreach my $brhit_ref (@$brhits) { + if (!$block_hit) { + $$brhit_ref = "-"; + } elsif ($$brhit_ref eq "-") { + $$brhit_ref = 0; + } + } } # Ensure coverage rate interdependencies are met sub sanitize_hits($$) { - my ($src, $hits) = @_; - my $files = $src->[0]; - - foreach my $hits (values(%{$hits})) { - my $brhits = $hits->[2]; - my $i = 0; - - foreach my $filename (sort(keys(%{$files}))) { - my $filesrc = $files->{$filename}; - my $branches = $filesrc->[3]; - my $lastblock; - my $lastline; - my @blist; - - foreach my $brdata (@$branches) { - my ($brline, $block, $branch) = @$brdata; - - if (!defined($lastblock) || - $block != $lastblock || - $brline != $lastline) { - sanitize_brhits(\@blist); - @blist = (); - $lastblock = $block; - $lastline = $brline; - } - push(@blist, \$brhits->[$i++]); - } - sanitize_brhits(\@blist); - } - } + my ($src, $hits) = @_; + my $files = $src->[0]; + + foreach my $hits (values(%{$hits})) { + my $brhits = $hits->[2]; + my $i = 0; + + foreach my $filename (sort(keys(%{$files}))) { + my $filesrc = $files->{$filename}; + my $branches = $filesrc->[3]; + my $lastblock; + my $lastline; + my @blist; + + foreach my $brdata (@$branches) { + my ($brline, $block, $branch) = @$brdata; + + if (!defined($lastblock) || + $block != $lastblock || + $brline != $lastline) { + sanitize_brhits(\@blist); + @blist = (); + $lastblock = $block; + $lastline = $brline; + } + push(@blist, \$brhits->[$i++]); + } + sanitize_brhits(\@blist); + } + } } # Generate random coverage data @@ -658,294 +658,294 @@ sub sanitize_hits($$) # brhit: Number of times a branch was hit by a specific test sub gen_hits($$) { - my ($c, $src) = @_; - my (@lnhits, @fnhits, @brhits); - my ($files, $numlns, $numfns, $numbrs) = @$src; - my $testnames = get_list($c, "tests.names", ""); - my %hits; + my ($c, $src) = @_; + my (@lnhits, @fnhits, @brhits); + my ($files, $numlns, $numfns, $numbrs) = @$src; + my $testnames = get_list($c, "tests.names", ""); + my %hits; - $testnames = [ "" ] if (!@$testnames); + $testnames = [""] if (!@$testnames); - foreach my $testname (@$testnames) { - my (@lnhits, @fnhits, @brhits); + foreach my $testname (@$testnames) { + my (@lnhits, @fnhits, @brhits); - for (my $i = 0; $i < $numlns; $i++) { - push(@lnhits, 1 + int(rand($MAX_TAKEN))); - } + for (my $i = 0; $i < $numlns; $i++) { + push(@lnhits, 1 + int(rand($MAX_TAKEN))); + } - for (my $i = 0; $i < $numfns; $i++) { - push(@fnhits, 1 + int(rand($MAX_TAKEN))); - } + for (my $i = 0; $i < $numfns; $i++) { + push(@fnhits, 1 + int(rand($MAX_TAKEN))); + } - for (my $i = 0; $i < $numbrs; $i++) { - push(@brhits, 1 + int(rand($MAX_TAKEN))); - } + for (my $i = 0; $i < $numbrs; $i++) { + push(@brhits, 1 + int(rand($MAX_TAKEN))); + } - $hits{$testname} = [ \@lnhits, \@fnhits, \@brhits ]; - } + $hits{$testname} = [\@lnhits, \@fnhits, \@brhits]; + } - sanitize_hits($src, \%hits); + sanitize_hits($src, \%hits); - return \%hits; + return \%hits; } # Return a hash containing RATE percent of indices [0..NUM-1]. sub gen_filter($$) { - my ($num, $rate) = @_; - my @list = (0 .. ($num - 1)); - my %hash; + my ($num, $rate) = @_; + my @list = (0 .. ($num - 1)); + my %hash; - reduce_list_per(\@list, $rate); - foreach my $i (@list) { - $hash{$i} = 1; - } + reduce_list_per(\@list, $rate); + foreach my $i (@list) { + $hash{$i} = 1; + } - return \%hash; + return \%hash; } # Zero all entries in LIST identified by the indices in FILTER. sub zero_by_filter($$) { - my ($list, $filter) = @_; + my ($list, $filter) = @_; - foreach my $i (keys(%{$filter})) { - $list->[$i] = 0; - } + foreach my $i (keys(%{$filter})) { + $list->[$i] = 0; + } } # Add a random number of indices between [0..NUM-1] to FILTER. sub widen_filter($$) { - my ($filter, $num) = @_; - my @list; + my ($filter, $num) = @_; + my @list; - for (my $i = 0; $i < $num; $i++) { - push(@list, $i) if (!exists($filter->{$i})); - } - reduce_list_per(\@list, int(rand(101))); + for (my $i = 0; $i < $num; $i++) { + push(@list, $i) if (!exists($filter->{$i})); + } + reduce_list_per(\@list, int(rand(101))); - foreach my $i (@list) { - $filter->{$i} = 1; - } + foreach my $i (@list) { + $filter->{$i} = 1; + } } # Zero coverage data in HITS until the combined coverage rates reach the # specified RATEs. sub reduce_hits($$$$$) { - my ($src, $hits, $lnrate, $fnrate, $brrate) = @_; - my ($files, $numlns, $numfns, $numbrs) = @$src; - my ($lnfilter, $fnfilter, $brfilter); + my ($src, $hits, $lnrate, $fnrate, $brrate) = @_; + my ($files, $numlns, $numfns, $numbrs) = @$src; + my ($lnfilter, $fnfilter, $brfilter); - $lnfilter = gen_filter($numlns, 100 - $lnrate); - $fnfilter = gen_filter($numfns, 100 - $fnrate); - $brfilter = gen_filter($numbrs, 100 - $brrate); + $lnfilter = gen_filter($numlns, 100 - $lnrate); + $fnfilter = gen_filter($numfns, 100 - $fnrate); + $brfilter = gen_filter($numbrs, 100 - $brrate); - foreach my $testhits (values(%{$hits})) { - my ($lnhits, $fnhits, $brhits) = @$testhits; + foreach my $testhits (values(%{$hits})) { + my ($lnhits, $fnhits, $brhits) = @$testhits; - zero_by_filter($lnhits, $lnfilter); - zero_by_filter($fnhits, $fnfilter); - zero_by_filter($brhits, $brfilter); + zero_by_filter($lnhits, $lnfilter); + zero_by_filter($fnhits, $fnfilter); + zero_by_filter($brhits, $brfilter); - # Provide some variation between tests - widen_filter($lnfilter, $numlns); - widen_filter($fnfilter, $numfns); - widen_filter($brfilter, $numbrs); - } + # Provide some variation between tests + widen_filter($lnfilter, $numlns); + widen_filter($fnfilter, $numfns); + widen_filter($brfilter, $numbrs); + } - sanitize_hits($src, $hits); + sanitize_hits($src, $hits); } sub zero_list($) { - my ($list) = @_; + my ($list) = @_; - foreach my $i (@$list) { - $i = 0; - } + foreach my $i (@$list) { + $i = 0; + } } # Zero all coverage in HITS. sub zero_hits($$) { - my ($src, $hits) = @_; + my ($src, $hits) = @_; - foreach my $testhits (values(%{$hits})) { - my ($lnhits, $fnhits, $brhits) = @$testhits; + foreach my $testhits (values(%{$hits})) { + my ($lnhits, $fnhits, $brhits) = @$testhits; - zero_list($lnhits); - zero_list($fnhits); - zero_list($brhits); - } + zero_list($lnhits); + zero_list($fnhits); + zero_list($brhits); + } - sanitize_hits($src, $hits); + sanitize_hits($src, $hits); } # Distribute items from LIST to A and B depending on whether the index for # an item is found in FILTER. sub split_by_filter($$$$) { - my ($list, $filter, $a, $b) = @_; + my ($list, $filter, $a, $b) = @_; - for (my $i = 0; $i < scalar(@$list); $i++) { - if (exists($filter->{$i})) { - push(@$a, $list->[$i]); - push(@$b, 0); - } else { - push(@$a, 0); - push(@$b, $list->[$i]); - } - } + for (my $i = 0; $i < scalar(@$list); $i++) { + if (exists($filter->{$i})) { + push(@$a, $list->[$i]); + push(@$b, 0); + } else { + push(@$a, 0); + push(@$b, $list->[$i]); + } + } } sub split_hits($$$) { - my ($c, $src, $hits) = @_; - my ($files, $numlns, $numfns, $numbrs) = @$src; - my ($lnsplit, $fnsplit, $brsplit); - my (%a, %b); + my ($c, $src, $hits) = @_; + my ($files, $numlns, $numfns, $numbrs) = @$src; + my ($lnsplit, $fnsplit, $brsplit); + my (%a, %b); - $lnsplit = gen_filter($numlns, int(rand(101))); - $fnsplit = gen_filter($numfns, int(rand(101))); - $brsplit = gen_filter($numbrs, int(rand(101))); + $lnsplit = gen_filter($numlns, int(rand(101))); + $fnsplit = gen_filter($numfns, int(rand(101))); + $brsplit = gen_filter($numbrs, int(rand(101))); - foreach my $testname (keys(%{$hits})) { - my $testhits = $hits->{$testname}; - my ($lnhits, $fnhits, $brhits) = @$testhits; - my (@lnhitsa, @fnhitsa, @brhitsa); - my (@lnhitsb, @fnhitsb, @brhitsb); + foreach my $testname (keys(%{$hits})) { + my $testhits = $hits->{$testname}; + my ($lnhits, $fnhits, $brhits) = @$testhits; + my (@lnhitsa, @fnhitsa, @brhitsa); + my (@lnhitsb, @fnhitsb, @brhitsb); - split_by_filter($lnhits, $lnsplit, \@lnhitsa, \@lnhitsb); - split_by_filter($fnhits, $fnsplit, \@fnhitsa, \@fnhitsb); - split_by_filter($brhits, $brsplit, \@brhitsa, \@brhitsb); + split_by_filter($lnhits, $lnsplit, \@lnhitsa, \@lnhitsb); + split_by_filter($fnhits, $fnsplit, \@fnhitsa, \@fnhitsb); + split_by_filter($brhits, $brsplit, \@brhitsa, \@brhitsb); - $a{$testname} = [ \@lnhitsa, \@fnhitsa, \@brhitsa ]; - $b{$testname} = [ \@lnhitsb, \@fnhitsb, \@brhitsb ]; - } + $a{$testname} = [\@lnhitsa, \@fnhitsa, \@brhitsa]; + $b{$testname} = [\@lnhitsb, \@fnhitsb, \@brhitsb]; + } - sanitize_hits($src, \%a); - sanitize_hits($src, \%b); + sanitize_hits($src, \%a); + sanitize_hits($src, \%b); - return (\%a, \%b); + return (\%a, \%b); } sub plural($$$) { - my ($num, $sing, $plur) = @_; + my ($num, $sing, $plur) = @_; - return $num <= 1 ? $sing : $plur; + return $num <= 1 ? $sing : $plur; } sub print_intro($) { - my ($c) = @_; - my $numtests = scalar(@{get_list($c, "tests.names")}); - my $numfiles = get_int($c, "files.numfiles"); + my ($c) = @_; + my $numtests = scalar(@{get_list($c, "tests.names")}); + my $numfiles = get_int($c, "files.numfiles"); - $numtests = 1 if ($numtests < 1); + $numtests = 1 if ($numtests < 1); - print($BOLD."Creating coverage files ($numtests ". - plural($numtests, "test", "tests").", $numfiles ". - plural($numfiles, "source file", "source files").")\n".$RESET); + print($BOLD. "Creating coverage files ($numtests " . + plural($numtests, "test", "tests") . ", $numfiles " . + plural($numfiles, "source file", "source files") . ")\n" . $RESET); } sub main() { - my $opt_help; - my $opt_output; - my $opt_configfile; - my $opt_seed = 0; - my $c; - my $src; - my $hits; - my $root; - my $enum; - my ($a, $b); - - # Parse options - if (!GetOptions("output|o=s" => \$opt_output, - "seed=s" => \$opt_seed, - "help|h" => \$opt_help, - )) { - print(STDERR "Use $0 --help to get usage information\n"); - exit(2); - } - - if ($opt_help) { - usage(); - exit(0); - } - - $opt_configfile = shift(@ARGV); - if (!defined($opt_configfile)) { - print(STDERR "Please specify a config file\n"); - exit(2); - } - - if (defined($opt_output)) { - if (! -d $opt_output) { - mkdir($opt_output) or - die("$0: Could not create directory ". - "$opt_output: $!\n"); - } - $root = abs_path($opt_output) - } else { - $root = "/"; - } - - srand($opt_seed); - - # Get config - $c = read_config($opt_configfile); - apply_config($c, \@ARGV) if (@ARGV); - - print_intro($c); - # Show lines on STDOUT without newline - $| = 1; - - # Create source tree - print(" Source tree ......... "); - $src = gen_src($c, $root); - # Write out source code if requested - write_src($src) if (defined($opt_output)); - print("done ("); - print($src->[1]." lines, "); - print($src->[2]." functions, "); - print($src->[3]." branches)\n"); - - # Write out full-coverage data files - print(" Full coverage ....... "); - $hits = gen_hits($c, $src); - write_info($c, "full.info", $src, $hits); - write_counts("full.counts", $hits); - print("done\n"); - - # Write out data files with target coverage rates - print(" Target coverage ..... "); - reduce_hits($src, $hits, get_int($c, "lines.covered"), - get_int($c, "functions.covered"), - get_int($c, "branches.covered")); - write_info($c, "target.info", $src, $hits); - write_counts("target.counts", $hits); - print("done\n"); - - # Write out partial data files - print(" Partial coverage .... "); - ($a, $b) = split_hits($c, $src, $hits); - write_info($c, "part1.info", $src, $a); - write_counts("part1.counts", $a); - write_info($c, "part2.info", $src, $b); - write_counts("part2.counts", $b); - print("done\n"); - - # Write out zero-coverage data files - print(" Zero coverage ....... "); - zero_hits($src, $hits); - write_info($c, "zero.info", $src, $hits); - write_counts("zero.counts", $hits); - print("done\n"); + my $opt_help; + my $opt_output; + my $opt_configfile; + my $opt_seed = 0; + my $c; + my $src; + my $hits; + my $root; + my $enum; + my ($a, $b); + + # Parse options + if (!GetOptions("output|o=s" => \$opt_output, + "seed=s" => \$opt_seed, + "help|h" => \$opt_help, + )) { + print(STDERR "Use $0 --help to get usage information\n"); + exit(2); + } + + if ($opt_help) { + usage(); + exit(0); + } + + $opt_configfile = shift(@ARGV); + if (!defined($opt_configfile)) { + print(STDERR "Please specify a config file\n"); + exit(2); + } + + if (defined($opt_output)) { + if (!-d $opt_output) { + mkdir($opt_output) or + die("$0: Could not create directory " . "$opt_output: $!\n"); + } + $root = abs_path($opt_output); + } else { + $root = "/"; + } + + srand($opt_seed); + + # Get config + $c = read_config($opt_configfile); + apply_config($c, \@ARGV) if (@ARGV); + + print_intro($c); + # Show lines on STDOUT without newline + $| = 1; + + # Create source tree + print(" Source tree ......... "); + $src = gen_src($c, $root); + # Write out source code if requested + write_src($src) if (defined($opt_output)); + print("done ("); + print($src->[1] . " lines, "); + print($src->[2] . " functions, "); + print($src->[3] . " branches)\n"); + + # Write out full-coverage data files + print(" Full coverage ....... "); + $hits = gen_hits($c, $src); + write_info($c, "full.info", $src, $hits); + write_counts("full.counts", $hits); + print("done\n"); + + # Write out data files with target coverage rates + print(" Target coverage ..... "); + reduce_hits($src, $hits, + get_int($c, "lines.covered"), + get_int($c, "functions.covered"), + get_int($c, "branches.covered")); + write_info($c, "target.info", $src, $hits); + write_counts("target.counts", $hits); + print("done\n"); + + # Write out partial data files + print(" Partial coverage .... "); + ($a, $b) = split_hits($c, $src, $hits); + write_info($c, "part1.info", $src, $a); + write_counts("part1.counts", $a); + write_info($c, "part2.info", $src, $b); + write_counts("part2.counts", $b); + print("done\n"); + + # Write out zero-coverage data files + print(" Zero coverage ....... "); + zero_hits($src, $hits); + write_info($c, "zero.info", $src, $hits); + write_counts("zero.counts", $hits); + print("done\n"); } main(); diff --git a/tests/bin/norminfo b/tests/bin/norminfo index 9fe0ef2f..ad0be7d8 100755 --- a/tests/bin/norminfo +++ b/tests/bin/norminfo @@ -13,230 +13,248 @@ use warnings; sub ferr($$$) { - my ($pos, $filename, $msg) = @_; + my ($pos, $filename, $msg) = @_; - if (defined($pos)) { - $pos .= ":"; - } else { - $pos = ""; - } + if (defined($pos)) { + $pos .= ":"; + } else { + $pos = ""; + } - die("$0:$filename:$pos $msg"); + die("$0:$filename:$pos $msg"); } sub print_sorted($$$) { - my ($fd, $info, $multi) = @_; - my (%fn, %fns, %fnda, %brda, %da); - my ($fnf, $fnh, $brf, $brh, $lf, $lh); - - while (my $line = <$fd>) { - $line =~ s/(^\s*|\s*$)//g; - - if ($line =~ /^end_of_record$/) { - last; - } elsif ($line =~ /^FN:(\d+),(.*)$/) { - my ($lineno, $fnname) = ($1, $2); - - if (exists($fn{$lineno})) { - ferr($., $info, "Duplicate FN: entry\n"); - } - $fn{$lineno} = $fnname; - if (exists($fns{$fnname})) { - ferr($., $info, "Duplicate function name\n"); - } - $fns{$fnname} = $lineno; - } elsif ($line =~ /^FNDA:(\d+),(.*)$/) { - my ($count, $fnname) = ($1, $2); - - if (exists($fnda{$fnname})) { - ferr($., $info, "Duplicate FNDA: entry\n"); - } - $fnda{$fnname} = int($count * $multi); - } elsif ($line =~ /^FNF:(\d+)$/) { - if (defined($fnf)) { - ferr($., $info, "Duplicate FNF: entry\n"); - } - $fnf = $1; - } elsif ($line =~ /^FNH:(\d+)$/) { - if (defined($fnh)) { - ferr($., $info, "Duplicate FNH: entry\n"); - } - $fnh = $1; - } elsif ($line =~ /^BRDA:(\d+),(\d+),(\d+),(\d+|-)$/) { - my ($lineno, $block, $branch, $count) = ($1, $2, $3, $4); - - if (exists($brda{$lineno}->{$block}->{$branch})) { - ferr($., $info, "Duplicate BRDA: entry\n"); - } - $count = int($count * $multi) if ($count ne "-"); - $brda{$lineno}->{$block}->{$branch} = $count; - - } elsif ($line =~ /^BRF:(\d+)$/) { - if (defined($brf)) { - ferr($., $info, "Duplicate BRF: entry\n"); - } - $brf = $1; - } elsif ($line =~ /^BRH:(\d+)$/) { - if (defined($brh)) { - ferr($., $info, "Duplicate BRH: entry\n"); - } - $brh = $1; - } elsif ($line =~ /^DA:(\d+),(\d+)$/) { - my ($lineno, $count) = ($1, $2); - - if (exists($da{$lineno})) { - ferr($., $info, "Duplicate FNDA: entry\n"); - } - $da{$lineno} = int($count * $multi); - } elsif ($line =~ /^LF:(\d+)$/) { - if (defined($lf)) { - ferr($., $info, "Duplicate LF: entry\n"); - } - $lf = $1; - } elsif ($line =~ /^LH:(\d+)$/) { - if (defined($lh)) { - ferr($., $info, "Duplicate LH: entry\n"); - } - $lh = $1; - } else { - ferr($., $info, "Unknown line: $line\n"); - } - } - - # FN:, - foreach my $lineno (sort({ $a <=> $b } keys(%fn))) { - my $fnname = $fn{$lineno}; - print("FN:$lineno,$fnname\n"); - } - - # FNDA:, - foreach my $fnname (keys(%fnda)) { - if (!exists($fns{$fnname})) { - ferr(undef, $info, "FNDA entry without FN: $fnname\n"); - } - } - foreach my $fnname (sort({ $fns{$a} <=> $fns{$b} } keys(%fnda))) { - my $count = $fnda{$fnname}; - print("FNDA:$count,$fnname\n"); - } - # FNF: - print("FNF:$fnf\n") if (defined($fnf)); - # FNH: - if (defined($fnh)) { - $fnh = 0 if ($multi == 0); - print("FNH:$fnh\n"); - } - # BRDA:,,, - foreach my $lineno (sort({ $a <=> $b } keys(%brda))) { - my $blocks = $brda{$lineno}; - - foreach my $block (sort({ $a <=> $b } keys(%{$blocks}))) { - my $branches = $blocks->{$block}; - - foreach my $branch (sort({ $a <=> $b } - keys(%{$branches}))) { - my $count = $branches->{$branch}; - - $count = "-" if ($multi == 0); - print("BRDA:$lineno,$block,$branch,$count\n"); - } - } - - } - # BRF: - print("BRF:$brf\n") if (defined($brf)); - # BRH: - if (defined($brh)) { - $brh = 0 if ($multi == 0); - print("BRH:$brh\n"); - } - # DA:, - foreach my $lineno (sort({ $a <=> $b } keys(%da))) { - my $count = $da{$lineno}; - - print("DA:$lineno,$count\n"); - } - # LF: - print("LF:$lf\n") if (defined($lf)); - # LH: - if (defined($lh)) { - $lh = 0 if ($multi == 0); - print("LH:$lh\n"); - } + my ($fd, $info, $multi) = @_; + my (%fn, %fns, %fnda, %brda, %da); + my ($fnf, $fnh, $brf, $brh, $lf, $lh); + my ($idx, $lineNo, $endLine); + + while (my $line = <$fd>) { + $line =~ s/(^\s*|\s*$)//g; + + if ($line =~ /^end_of_record$/) { + last; + } elsif ($line =~ /^FNL:(\d+),(\d+)(,(\d+))?$/) { + # generate backward-compatible data for the moment + $idx = $1; + $lineNo = $2; + $endLine = $4; + } elsif ($line =~ /^FNA:(\d+),([^,]+),(.+)$/) { + die("unexpected index $1 != $idx") unless $1 == $idx; + $idx = undef; + my $hit = $2; + my $fnname = $3; + $fn{$lineNo} = [$fnname, $endLine]; + $fns{$fnname} = $lineNo; + $fnda{$fnname} = int($hit * $multi); + } elsif ($line =~ /^FN:(\d+),((\d+),)?(.*)$/) { + my ($lineno, $fnname) = ($1, $4); + + if (exists($fn{$lineno})) { + ferr($., $info, "Duplicate FN: entry\n"); + } + $fn{$lineno} = [$fnname, $3]; + if (exists($fns{$fnname})) { + ferr($., $info, "Duplicate function name\n"); + } + $fns{$fnname} = $lineno; + } elsif ($line =~ /^FNDA:(\d+),(.*)$/) { + my ($count, $fnname) = ($1, $2); + + if (exists($fnda{$fnname})) { + ferr($., $info, "Duplicate FNDA: entry\n"); + } + $fnda{$fnname} = int($count * $multi); + } elsif ($line =~ /^FNF:(\d+)$/) { + if (defined($fnf)) { + ferr($., $info, "Duplicate FNF: entry\n"); + } + $fnf = $1; + } elsif ($line =~ /^FNH:(\d+)$/) { + if (defined($fnh)) { + ferr($., $info, "Duplicate FNH: entry\n"); + } + $fnh = $1; + } elsif ($line =~ /^BRDA:(\d+),(e)?(\d+),(\d+),(\d+|-)$/) { + my ($lineno, $is_exception, $block, $branch, $count) = + ($1, defined($2) && $2 eq 'e', $3, $4, $5); + + if (exists($brda{$lineno}->{$block}->{$branch})) { + ferr($., $info, "Duplicate BRDA: entry\n"); + } + $count = int($count * $multi) if ($count ne "-"); + $brda{$lineno}->{$block}->{$branch} = [$count, $is_exception]; + + } elsif ($line =~ /^BRF:(\d+)$/) { + if (defined($brf)) { + ferr($., $info, "Duplicate BRF: entry\n"); + } + $brf = $1; + } elsif ($line =~ /^BRH:(\d+)$/) { + if (defined($brh)) { + ferr($., $info, "Duplicate BRH: entry\n"); + } + $brh = $1; + } elsif ($line =~ /^DA:(\d+),(\d+)$/) { + my ($lineno, $count) = ($1, $2); + + if (exists($da{$lineno})) { + ferr($., $info, "Duplicate FNDA: entry\n"); + } + $da{$lineno} = int($count * $multi); + } elsif ($line =~ /^LF:(\d+)$/) { + if (defined($lf)) { + ferr($., $info, "Duplicate LF: entry\n"); + } + $lf = $1; + } elsif ($line =~ /^LH:(\d+)$/) { + if (defined($lh)) { + ferr($., $info, "Duplicate LH: entry\n"); + } + $lh = $1; + } else { + ferr($., $info, "Unknown line: $line\n"); + } + } + + # FN:, + foreach my $lineno (sort({ $a <=> $b } keys(%fn))) { + my ($fnname, $endLine) = @{$fn{$lineno}}; + $endLine = $endLine ? '.' . $endLine : ''; + print("FN:$lineno$endLine,$fnname\n"); + } + + # FNDA:, + foreach my $fnname (keys(%fnda)) { + if (!exists($fns{$fnname})) { + ferr(undef, $info, "FNDA entry without FN: $fnname\n"); + } + } + foreach my $fnname (sort({ $fns{$a} <=> $fns{$b} } keys(%fnda))) { + my $count = $fnda{$fnname}; + print("FNDA:$count,$fnname\n"); + } + # FNF: + print("FNF:$fnf\n") if (defined($fnf)); + # FNH: + if (defined($fnh)) { + $fnh = 0 if ($multi == 0); + print("FNH:$fnh\n"); + } + # BRDA:,,, + foreach my $lineno (sort({ $a <=> $b } keys(%brda))) { + my $blocks = $brda{$lineno}; + + foreach my $block (sort({ $a <=> $b } keys(%{$blocks}))) { + my $branches = $blocks->{$block}; + + foreach my $branch (sort({ $a <=> $b } + keys(%{$branches})) + ) { + my ($count, $is_exception) = @{$branches->{$branch}}; + + $count = "-" if ($multi == 0); + print("BRDA:$lineno," . + (defined($is_exception) && $is_exception ? 'e' : '') . + "$block,$branch,$count\n"); + } + } + + } + # BRF: + print("BRF:$brf\n") if (defined($brf)); + # BRH: + if (defined($brh)) { + $brh = 0 if ($multi == 0); + print("BRH:$brh\n"); + } + # DA:, + foreach my $lineno (sort({ $a <=> $b } keys(%da))) { + my $count = $da{$lineno}; + + print("DA:$lineno,$count\n"); + } + # LF: + print("LF:$lf\n") if (defined($lf)); + # LH: + if (defined($lh)) { + $lh = 0 if ($multi == 0); + print("LH:$lh\n"); + } } sub main() { - my $infofile = $ARGV[0]; - my $multi = $ARGV[1]; - # info: testname -> files - # files: infofile -> data - # data: [ starting offset, starting line ] - my %info; - my $fd; - my $tn = ""; - my %allfiles; - - $multi = 1 if (!defined($multi)); - if (!defined($infofile)) { - $infofile = "standard input"; - warn("$0: Reading data from standard input\n"); - open($fd, "<&STDIN") or - die("$0: Could not duplicated stdin: $!\n"); - } else { - open($fd, "<", $infofile) or - die("$0: Could not open $infofile: $!\n"); - } - - # Register starting positions of data sets - while (my $line = <$fd>) { - if ($line =~ /^TN:(.*)$/) { - $tn = $1; - } elsif ($line =~ /^SF:(.*)$/) { - my $sf = $1; - my $pos = tell($fd); - - die("$0: Could not get file position: $!\n") - if ($pos == -1); - if (exists($info{$tn}->{$sf})) { - ferr($., $infofile, - "Duplicate entry for $tn:$sf\n"); - } - $info{$tn}->{$sf} = [ $pos, $. ]; - $allfiles{$sf} = 1; - } - } - - # Print data sets in normalized order - foreach my $filename (sort(keys(%allfiles))) { - foreach my $testname (sort(keys(%info))) { - my $pos = $info{$testname}->{$filename}; - my ($cpos, $lpos) = @$pos; - - next if (!defined($pos)); - - if (seek($fd, $cpos, 0) != 1) { - die("$0: Could not seek in $infofile: $!\n"); - } - printf("TN:$testname\n"); - printf("SF:$filename\n"); - - $. = $lpos; - print_sorted($fd, $infofile, $multi); - - printf("end_of_record\n"); - - } - } - foreach my $testname (sort(keys(%info))) { - my $files = $info{$testname}; - - foreach my $filename (sort(keys(%{$files}))) { - } - } - - close($fd); + my $infofile = $ARGV[0]; + my $multi = $ARGV[1]; + # info: testname -> files + # files: infofile -> data + # data: [ starting offset, starting line ] + my %info; + my $fd; + my $tn = ""; + my %allfiles; + + $multi = 1 if (!defined($multi)); + if (!defined($infofile)) { + $infofile = "standard input"; + warn("$0: Reading data from standard input\n"); + open($fd, "<&STDIN") or + die("$0: Could not duplicated stdin: $!\n"); + } else { + open($fd, "<", $infofile) or + die("$0: Could not open $infofile: $!\n"); + } + + # Register starting positions of data sets + while (my $line = <$fd>) { + if ($line =~ /^TN:(.*)$/) { + $tn = $1; + } elsif ($line =~ /^SF:(.*)$/) { + my $sf = $1; + my $pos = tell($fd); + + die("$0: Could not get file position: $!\n") + if ($pos == -1); + if (exists($info{$tn}->{$sf})) { + ferr($., $infofile, "Duplicate entry for $tn:$sf\n"); + } + $info{$tn}->{$sf} = [$pos, $.]; + $allfiles{$sf} = 1; + } + } + + # Print data sets in normalized order + foreach my $filename (sort(keys(%allfiles))) { + foreach my $testname (sort(keys(%info))) { + my $pos = $info{$testname}->{$filename}; + my ($cpos, $lpos) = @$pos; + + next if (!defined($pos)); + + if (seek($fd, $cpos, 0) != 1) { + die("$0: Could not seek in $infofile: $!\n"); + } + printf("TN:$testname\n"); + printf("SF:$filename\n"); + + $. = $lpos; + print_sorted($fd, $infofile, $multi); + + printf("end_of_record\n"); + + } + } + foreach my $testname (sort(keys(%info))) { + my $files = $info{$testname}; + + foreach my $filename (sort(keys(%{$files}))) { + } + } + + close($fd); } main(); diff --git a/tests/bin/runtests b/tests/bin/runtests index f2b6ad59..a8d4a70c 100755 --- a/tests/bin/runtests +++ b/tests/bin/runtests @@ -3,32 +3,64 @@ # Copyright IBM Corp. 2020 # # Usage: runtests -# +# MAKE="$1" shift -TESTS="$*" +TESTS="" +OPTS='' +MAKE_OPTS='' +while [ $# -gt 0 ] ; do + + OPT=$1 + shift + case $OPT in + + --script-args ) + OPTS="$OPTS $1" + MAKE_OPTS="$MAKE_OPTS TESTCASE_ARGS=$1" + shift + ;; + + --coverage ) + COVER_DB=$1 + shift + + OPTS="$OPTS $OPT $COVER_DB" + MAKE_OPTS="$MAKE_OPTS COVER_DB=../$COVER_DB" + ;; + + * ) + TESTS="$TESTS $OPT" + ;; + esac +done + +if [[ "${V:-0}" -lt 1 ]] ; then + MAKE_OPTS="$MAKE_OPTS -s" +fi if [[ -z "${_TESTS_RUNNING}" ]] ; then - # Do this only once at start of test run - export _TESTS_RUNNING=1 + # Do this only once at start of test run + export _TESTS_RUNNING=1 - testsuite_init - trap testsuite_exit exit - # Suppress test results on keyboard interrupt - trap "trap exit ; exit 1" SIGINT + testsuite_init + trap testsuite_exit exit + # Suppress test results on keyboard interrupt + trap "trap exit ; exit 1" SIGINT fi for TEST in ${TESTS} ; do - if [[ -d "${TEST}" ]] ; then - # Enter sub-directory - ${MAKE} -C "${TEST}" check || exit 1 - else - # Enter test - ABS_TEST="$PWD/$TEST" - REL_TEST="${ABS_TEST##$TOPDIR}" - test_run "${REL_TEST}" "${ABS_TEST}" /dev/null) if [ ! -z "$TIME" ] ; then - TIME="$TIME -v -o $TIMEFILE" - if ! $TIME true 2>/dev/null ; then - TIME="" - fi + TIME="$TIME -v -o $TIMEFILE" + if ! $TIME true 2>/dev/null ; then + TIME="" + fi fi t_announce "$TESTNAME" -let POS=$(stat -c %s "$LOGFILE")+1 +case "$OSTYPE" in +linux*) + let POS=$(stat -c %s "$LOGFILE")+1 + ;; +*) + let POS=$(stat -f %z "$LOGFILE")+1 + ;; +esac -t_detail "COMMAND" "\"$*\"" >>"$LOGFILE" +t_detail "COMMAND" "\"$SCRIPT $OPTS*\"" >>"$LOGFILE" t_detail "OUTPUT" "" >>"$LOGFILE" # Run command -$TIME bash -c "$*" 2>&1 | t_indent >>"$LOGFILE" -RC=$? +$TIME bash -c "$INVOKE_COVER $SCRIPT $OPTS" 2>&1 | t_indent >>"$LOGFILE" +RC=${PIPESTATUS[0]} # Evaluate output of time command ELAPSED= RESIDENT= SIGNAL= if [ ! -z "$TIME" ] ; then - while read LINE ; do - case "$LINE" in - "Command terminated by signal"*) SIGNAL=${LINE##* } ;; - "Elapsed"*) ELAPSED=$(elapsed_to_ms ${LINE##* }) ;; - "Maximum resident"*) RESIDENT=${LINE##* } ;; - "Exit status"*) RC=${LINE##* } ;; - esac - done < "$TIMEFILE" - rm -f "$TIMEFILE" + while read LINE ; do + case "$LINE" in + "Command terminated by signal"*) SIGNAL=${LINE##* } ;; + "Elapsed"*) ELAPSED=$(elapsed_to_ms ${LINE##* }) ;; + "Maximum resident"*) RESIDENT=${LINE##* } ;; + "Exit status"*) RC=${LINE##* } ;; + esac + done < "$TIMEFILE" + rm -f "$TIMEFILE" +fi + +if [ 0 == $RC ] ; then + for str in uninitialized ; do + grep $str $LOGFILE + if [ 0 == $? ] ; then + echo "unexpected '$str' in '$LOGFILE' for $TESTNAME" + RC=1 + fi + done fi # Save last output line as reason in case of skip result @@ -60,59 +111,61 @@ t_detail "EXITCODE" "$RC" >>"$LOGFILE" # Show result if [ $RC -eq 0 -a -z "$SIGNAL" ] ; then - RESULT="pass" - t_pass "$TESTNAME" + RESULT="pass" + t_pass "$TESTNAME" else - if [ $RC -eq 2 ] ; then - RESULT="skip" - t_skip "$TESTNAME" - else - if [ -z "$SIGNAL" ] ; then - RESULT="fail" - t_fail "$TESTNAME" - else - RESULT="kill" - t_kill "$TESTNAME" - fi - fi + if [ $RC -eq 2 ] ; then + RESULT="skip" + t_skip "$TESTNAME" + else + if [ -z "$SIGNAL" ] ; then + RESULT="fail" + t_fail "$TESTNAME" + else + RESULT="kill" + t_kill "$TESTNAME" + fi + fi fi if [ ! -z "$SIGNAL" ] ; then - t_detail "SIGNAL" "$SIGNAL" >>"$LOGFILE" + t_detail "SIGNAL" "$SIGNAL" >>"$LOGFILE" fi if [ ! -z "$ELAPSED" ] ; then - echo -n " (time $(($ELAPSED/1000)).$(($ELAPSED%1000/100))s, " - echo "elapsed $TESTNAME $ELAPSED" >> "$COUNTFILE" + echo -n " (time $(($ELAPSED/1000)).$(($ELAPSED%1000/100))s, " + echo "elapsed $TESTNAME $ELAPSED" >> "$COUNTFILE" fi if [ ! -z "$RESIDENT" ] ; then - echo -n "mem $(($RESIDENT/1024)).$((($RESIDENT%1024)/100))MB)" - echo "resident $TESTNAME $RESIDENT" >> "$COUNTFILE" + echo -n "mem $(($RESIDENT/1024)).$((($RESIDENT%1024)/100))MB)" + echo "resident $TESTNAME $RESIDENT" >> "$COUNTFILE" fi echo # Show skip reason if [ $RC -eq 2 ] ; then - t_detail "REASON" "$LAST" | t_indent - t_detail "REASON" "$LAST" >>"$LOGFILE" + t_detail "REASON" "$LAST" | t_indent + t_detail "REASON" "$LAST" >>"$LOGFILE" fi # Show log excerpt on failure or if requested if [ $RC -ne 0 -a $RC -ne 2 -o "$V" == "1" ] ; then - LEN=$(tail -c "+$POS" "$LOGFILE" | wc -l) - if [ "$LEN" -gt "$EXCERPTLEN" -a "$V" != "1" ] ; then - echo " Skipping $LEN previous lines (see $LOGFILE)" - echo " ..." - tail -c "+$POS" "$LOGFILE" | tail -n $EXCERPTLEN | t_indent - let LEN=$LEN-$EXCERPTLEN - else - tail -c "+$POS" "$LOGFILE" | t_indent - fi + LEN=$(tail -c "+$POS" "$LOGFILE" | wc -l) + if [ "$LEN" -gt "$EXCERPTLEN" -a "$V" != "1" ] ; then + echo " Skipping $LEN previous lines (see $LOGFILE)" + echo " ..." + tail -c "+$POS" "$LOGFILE" | tail -n $EXCERPTLEN | t_indent + let LEN=$LEN-$EXCERPTLEN + else + tail -c "+$POS" "$LOGFILE" | t_indent + fi fi # Log more details [ ! -z "$ELAPSED" ] && t_detail "TIME" "${ELAPSED}ms" >>"$LOGFILE" [ ! -z "$RESIDENT" ] && t_detail "MEM" "${RESIDENT}kB" >>"$LOGFILE" t_detail "RESULT" "$RESULT" >> "$LOGFILE" + +exit $RC diff --git a/tests/bin/test_skip b/tests/bin/test_skip index 202606f4..de336d59 100755 --- a/tests/bin/test_skip +++ b/tests/bin/test_skip @@ -8,7 +8,8 @@ # optional reason text. Must be run after testsuite_init. # -TOPDIR=$(realpath $(dirname $0)/..) && source "$TOPDIR/bin/common" +TOPDIR=$(readlink -f $(dirname $0)/..) && test -d "$TOPDIR" \ + && source "$TOPDIR/bin/common" TESTNAME="$1" REASON="${*:2}" ; [ -z "$REASON" ] && REASON="" diff --git a/tests/bin/testsuite_exit b/tests/bin/testsuite_exit index 6720df99..acea8f26 100755 --- a/tests/bin/testsuite_exit +++ b/tests/bin/testsuite_exit @@ -7,7 +7,8 @@ # Announce end of test suite and show aggregate results. # -TOPDIR=$(realpath $(dirname $0)/..) && source "$TOPDIR/bin/common" +TOPDIR=$(readlink -f $(dirname $0)/..) && test -d "$TOPDIR" \ + && source "$TOPDIR/bin/common" echo "end_time $(date +%s.%N)" >>"$COUNTFILE" @@ -59,7 +60,7 @@ MEM="mem ${TOTAL_MEM}MB" [ "$FAILED" -gt 0 ] && FAIL="$RED$FAIL$DEFAULT" [ "$SKIPPED" -gt 0 ] && SKIP="$BLUE$SKIP$DEFAULT" -echo -en "$TOTAL, $PASS, $FAIL, $SKIP$RESET" >&3 +printf "$TOTAL, $PASS, $FAIL, $SKIP$RESET" >&3 [ $HAVE_EXT -eq 1 ] && echo -n " ($TIME, $MEM)" >&3 echo >&3 echo "Result log stored in $LOGFILE" >&3 diff --git a/tests/bin/testsuite_init b/tests/bin/testsuite_init index f901e35f..60ac137f 100755 --- a/tests/bin/testsuite_init +++ b/tests/bin/testsuite_init @@ -7,9 +7,10 @@ # Announce start of test suite and prepare log files. # -TOPDIR=$(realpath $(dirname $0)/..) && source "$TOPDIR/bin/common" +TOPDIR=$(readlink -f $(dirname $0)/..) && test -d "$TOPDIR"\ + && source "$TOPDIR/bin/common" -echo -e $BOLD"Starting tests"$RESET +printf "${BOLD}Starting tests${RESET}\n" echo "start_time $(date +%s.%N)" >"$COUNTFILE" exec >"$LOGFILE" 2>&1 @@ -21,8 +22,26 @@ lcov --version 2>&1 | t_indent t_detail "GCOV" "" gcov --version 2>&1 | t_indent -t_detail "CPUINFO" "" -t_indent < /proc/cpuinfo +case "$OSTYPE" in + linux*) + t_detail "CPUINFO" "" + t_indent < /proc/cpuinfo -t_detail "MEMINFO" "" -t_indent < /proc/meminfo + t_detail "MEMINFO" "" + t_indent < /proc/meminfo + ;; + darwin*) + t_detail "CPUINFO" "" + sysctl machdep.cpu | t_indent + + t_detail "MEMINFO" "" + vm_stat | t_indent + ;; + *) + t_detail "CPUINFO" "" + echo "unknown cpu info" | t_indent + + t_detail "MEMINFO" "" + echo "unknown mem info" | t_indent + ;; +esac diff --git a/tests/common.mak b/tests/common.mak index 7f6917d8..d0cc9fe1 100644 --- a/tests/common.mak +++ b/tests/common.mak @@ -1,5 +1,51 @@ -export TOPDIR := $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) -export TESTDIR := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) + +# TOPDIR == root of test directory - either build dir or copied from share/lcov +TOPDIR := $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) +# TESTDIR == path to this particular testcase +TESTDIR := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) + +ifeq ($(LCOV_HOME),) +ROOT_DIR = $(realpath $(TOPDIR)/..) +else +ROOT_DIR := $(LCOV_HOME) +endif +BINDIR = $(ROOT_DIR)/bin + +ifneq (,$(wildcard $(ROOT_DIR)/scripts)) +SCRIPTDIR := $(ROOT_DIR)/scripts +else +SCRIPTDIR := $(ROOT_DIR)/share/lcov/support-scripts +endif + +ifeq ($(DEBUG),1) +$(warning TOPDIR = $(TOPDIR)) +$(warning TESTDIR = $(TESTDIR)) +$(warning BINDIR = $(BINDIR)) +$(warning SCRIPTDIR = $(SCRIPTDIR)) +endif + +TESTBINDIR := $(TOPDIR)bin + +IS_GIT := $(shell git -C $(TOPDIR) rev-parse 2>&1 > /dev/null ; if [ 0 -eq $$? ]; then echo 1 ; else echo 0 ; fi) +IS_P4 = $(shell p4 have ... 2>&1 > /dev/null ; if [ 0 -eq $$? ]; then echo 1 ; else echo 0 ; fi) + +ifeq (1,$(IS_GIT)) +ANNOTATE_SCRIPT=$(SCRIPTDIR)/gitblame.pm +VERSION_SCRIPT=$(SCRIPTDIR)/gitversion.pm +else +ANNOTATE_SCRIPT=$(SCRIPTDIR)/p4annotate.pm +VERSION_SCRIPT=$(SCRIPTDIR)/P4version.pm,--local-edit,$(ROOT_DIR) +endif + +ifneq ($(COVER_DB),) +export PERL_COVER_ARGS := -MDevel::Cover=-db,$(COVER_DB),-coverage,statement,branch,condition,subroutine,-silent,1 +EXEC_COVER := perl ${PERL_COVER_ARGS} +export COVERAGE_COMMAND = $(shell which coverage 2>&1 > /dev/null ; if [ 0 -eq $$? ] ; then echo coverage ; else echo python3-coverage ; fi ) +PYCOVER = COVERAGE_FILE=$(PYCOV_DB) ${COVERAGE_COMMAND} run --branch --append +#$(warning assigned PYCOVER='$(PYCOVER)') +endif + +export TOPDIR TESTDIR export PARENTDIR := $(dir $(patsubst %/,%,$(TOPDIR))) export RELDIR := $(TESTDIR:$(PARENTDIR)%=%) @@ -15,37 +61,59 @@ export PART1COUNTS := $(TOPDIR)part1.counts export PART2INFO := $(TOPDIR)part2.info export PART2COUNTS := $(TOPDIR)part2.counts export INFOFILES := $(ZEROINFO) $(FULLINFO) $(TARGETINFO) $(PART1INFO) \ - $(PART2INFO) + $(PART2INFO) export COUNTFILES := $(ZEROCOUNTS) $(FULLCOUNTS) $(TARGETCOUNTS) \ - $(PART1COUNTS) $(PART2COUNTS) + $(PART1COUNTS) $(PART2COUNTS) # Use pre-defined lcovrc file LCOVRC := $(TOPDIR)lcovrc # Specify size for artificial info files (small, medium, large) SIZE := small -CC := gcc +export CC ?= gcc +export CXX ?= g++ + +export LCOV_TOOL := $(EXEC_COVER) $(BINDIR)/lcov +export GENHTML_TOOL := $(EXEC_COVER) $(BINDIR)/genhtml +export GENINFO_TOOL := $(EXEC_COVER) $(BINDIR)/geninfo +export PERL2LCOV_TOOL := $(EXEC_COVER) $(BINDIR)/perl2lcov +export LLVM2LCOV_TOOL := $(EXEC_COVER) $(BINDIR)/llvm2lcov +export PY2LCOV_TOOL := $(PYCOVER) $(BINDIR)/py2lcov +export XML2LCOV_TOOL := $(PYCOVER) $(BINDIR)/xml2lcov +export SPREADSHEET_TOOL := $(PYCOVER) $(SCRIPTDIR)/spreadsheet.py # Specify programs under test -export PATH := $(TOPDIR)/../bin:$(TOPDIR)/bin:$(PATH) -export LCOV := lcov --config-file $(LCOVRC) $(LCOVFLAGS) -export GENHTML := genhtml --config-file $(LCOVRC) $(GENHTMLFLAGS) +export PATH := $(BINDIR):$(TESTBINDIR):$(PATH) +export LCOV := $(LCOV_TOOL) --config-file $(LCOVRC) $(LCOVFLAGS) +export GENHTML := $(GENHTML_TOOL) --config-file $(LCOVRC) $(GENHTMLFLAGS) # Ensure stable output export LANG := C # Suppress output in non-verbose mode -ifneq ($(V),2) +export V +ifeq ("${V}","1") + echocmd= +else + echocmd=echo $1 ; .SILENT: endif +ifneq ($(COVER_DB),) +#OPTS += --coverage $(COVER_DB) +endif +ifneq ($(TESTCASE_ARGS),) +OPTS += --script-args "$(TESTCASE_ARGS)" +endif + # Do not pass TESTS= specified on command line to subdirectories to allow # make TESTS=subdir MAKEOVERRIDES := $(filter-out TESTS=%,$(MAKEOVERRIDES)) # Default target check: - runtests "$(MAKE)" $(TESTS) + #echo "found tests '$(TESTS)'" + runtests "$(MAKE)" $(TESTS) $(OPTS) ifeq ($(_ONCE),) @@ -55,20 +123,20 @@ export _ONCE := 1 check: checkdeps prepare checkdeps: - checkdeps $(TOPDIR)/../bin/* $(TOPDIR)/bin/* + checkdeps $(BINDIR)/* $(TESTBINDIR)/* prepare: $(INFOFILES) $(COUNTFILES) # Create artificial info files as test data $(INFOFILES) $(COUNTFILES): - cd $(TOPDIR) && mkinfo profiles/$(SIZE) -o src/ + cd $(TOPDIR) && $(TOPDIR)/bin/mkinfo profiles/$(SIZE) -o src/ endif clean: clean_echo clean_subdirs clean_echo: - echo " CLEAN $(patsubst %/,%,$(RELDIR))" + $(call echocmd," CLEAN lcov/$(patsubst %/,%,$(RELDIR))") clean_subdirs: cleantests "$(MAKE)" $(TESTS) diff --git a/tests/common.tst b/tests/common.tst new file mode 100644 index 00000000..84373766 --- /dev/null +++ b/tests/common.tst @@ -0,0 +1,167 @@ +# common utility for testing - mainly argument parsing + +CLEAN_ONLY=0 +COVER= + +PARALLEL='--parallel 0' +PROFILE="--profile" +LOCAL_COVERAGE=1 +KEEP_GOING=0 + +#echo "CMD: $0 $@" + +while [ $# -gt 0 ] ; do + + OPT=$1 + shift + case $OPT in + + --clean | clean ) + CLEAN_ONLY=1 + ;; + + -v | --verbose | verbose ) + set -x + ;; + + --keep-going | -k ) + KEEP_GOING=1 + ;; + + --coverage ) + if [[ "$1"x != 'x' && $1 != "-"* ]] ; then + COVER_DB=$1 + LOCAL_COVERAGE=0 + shift + else + COVER_DB='cover_db.dat' + fi + export PYCOV_DB="${COVER_DB}_py" + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine,-silent,1 " + + if [ '' != "${COVERAGE_COMMAND}" ] ; then + CMD=${COVERAGE_COMMAND} + else + CMD='coverage' + which $CMD + if [ 0 != $? ] ; then + CMD='python3-coverage' # ubuntu? + fi + fi + which $CMD + if [ 0 != $? ] ; then + echo "cannot find 'coverage' or 'python3-coverage'" + echo "unable to run py2lcov - please install python Coverage.py package" + exit 1 + fi + + PYCOVER="COVERAGE_FILE=$PYCOV_DB $CMD run --branch --append" + ;; + + --home | -home ) + LCOV_HOME=$1 + shift + if [ ! -f $LCOV_HOME/bin/lcov ] ; then + echo "LCOV_HOME '$LCOV_HOME' does not exist" + exit 1 + fi + ;; + + --no-parallel ) + PARALLEL='' + ;; + + --no-profile ) + PROFILE='' + ;; + + --llvm ) + LLVM=1 + module load como/tools/llvm-gnu/11.0.0-1 + # seems to have been using same gcov version as gcc/4.8.3 + module load gcc/4.8.3 + #EXTRA_GCOV_OPTS="--gcov-tool '\"llvm-cov gcov\"'" + CXX="clang++" + ;; + + * ) + echo "Error: unexpected option '$OPT'" + exit 1 + ;; + esac +done + +if [[ "x" == ${LCOV_HOME}x ]] ; then + if [ -f ../../../bin/lcov ] ; then + LCOV_HOME=../../.. + else + LCOV_HOME=../../../../releng/coverage/lcov + fi +fi +LCOV_HOME=`(cd ${LCOV_HOME} ; pwd)` + +if [[ ! ( -d $LCOV_HOME/bin && -d $LCOV_HOME/lib && -x $LCOV_HOME/bin/genhtml && ( -f $LCOV_HOME/lib/lcovutil.pm || -f $LCOV_HOME/lib/lcov/lcovutil.pm ) ) ]] ; then + echo "LCOV_HOME '$LCOV_HOME' seems not to be valid" + exit 1 +fi + +export PATH=${LCOV_HOME}/bin:${LCOV_HOME}/share:${PATH} +export MANPATH=${MANPATH}:${LCOV_HOME}/man + +ROOT=`pwd` +PARENT=`(cd .. ; pwd)` +if [ -f $LCOV_HOME/scripts/getp4version ] ; then + SCRIPT_DIR=$LCOV_HOME/scripts +else + # running test from lcov install + SCRIPT_DIR=$LCOV_HOME/share/lcov/support-scripts + MD5_OPT='--version-script --md5' +fi +if [ 'x' == "x$GENHTML_TOOL" ] ; then + GENHTML_TOOL=${LCOV_HOME}/bin/genhtml + LCOV_TOOL=${LCOV_HOME}/bin/lcov + GENINFO_TOOL=${LCOV_HOME}/bin/geninfo + SPREADSHEET_TOOL=${SCRIPT_DIR}/spreadsheet.py + LLVM2LCOV_TOOL=${LCOV_HOME}/bin/llvm2lcov + PERL2LCOV_TOOL=${LCOV_HOME}/bin/perl2lcov + PY2LCOV_TOOL=${LCOV_HOME}/bin/py2lcov + XML2LCOV_TOOL=${LCOV_HOME}/bin/xml2lcov +fi + +# is this git or P4? +IS_GIT=0 +IS_P4=0 +git -C . rev-parse > /dev/null 2>&1 +if [ 0 == $? ] ; then + # this is git + IS_GIT=1 +else + p4 have ... > /dev/null 2>&1 + if [ 0 == $? ] ; then + IS_P4=1 + fi +fi + +if [ "$IS_GIT" == 1 ] || [ "$IS_P4" == 0 ] ; then + USE_GIT=1 + GET_VERSION=${SCRIPT_DIR}/gitversion.pm + GET_VERSION_EXE=${SCRIPT_DIR}/gitversion + ANNOTATE=${SCRIPT_DIR}/gitblame.pm +else + USE_P4=1 + GET_VERSION=${SCRIPT_DIR}/getp4version + GET_VERSION_EXE=${SCRIPT_DIR}/getp4version + ANNOTATE=${SCRIPT_DIR}/p4annotate.pm +fi +CRITERIA=${SCRIPT_DIR}/criteria +SELECT=${SCRIPT_DIR}/select.pm + +function clean_cover() +{ + if [ "x$COVER" != 'x' ] && [ 0 != $LOCAL_COVERAGE ] ; then + if [ -d $COVER_DB ] ; then + cover -delete -db $COVER_DB + fi + rm -rf $PYCOV_DB + fi +} diff --git a/tests/gendiffcov/Makefile b/tests/gendiffcov/Makefile new file mode 100644 index 00000000..80a9de20 --- /dev/null +++ b/tests/gendiffcov/Makefile @@ -0,0 +1,3 @@ +include ../common.mak + +TESTS := simple filter function insensitive synthesize errs diff --git a/tests/gendiffcov/errs/Makefile b/tests/gendiffcov/errs/Makefile new file mode 100644 index 00000000..143d91de --- /dev/null +++ b/tests/gendiffcov/errs/Makefile @@ -0,0 +1,8 @@ +include ../../common.mak + +TESTS := msgtest.sh + +clean: + for i in $(TESTS) ; do \ + ./$$i --clean ; \ + done diff --git a/tests/gendiffcov/errs/MsgContext.pm b/tests/gendiffcov/errs/MsgContext.pm new file mode 100644 index 00000000..d3544180 --- /dev/null +++ b/tests/gendiffcov/errs/MsgContext.pm @@ -0,0 +1,73 @@ +#!/usr/bin/env perl + +# Copyright (c) MediaTek USA Inc., 2024 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see +# . +# +# hacky callback to generate an error - to test error handling of +# --expect-message-count callback. + +package MsgContext; + +use strict; +use Getopt::Long qw(GetOptionsFromArray); +use lcovutil; + +our @ISA = qw(Exporter); +our @EXPORT_OK = qw(new); + +our $call_count = 0; + +sub new +{ + my $class = shift; + my $script = shift; + my $standalone = $script eq $0; + my @options = @_; + my $comment; + + my $self = [$script]; + + return bless $self, $class; +} + +sub test +{ + my $v = shift; + ++$call_count; + + die("trying to die in callback") if $call_count > 1; + return 1; # otherwise OK +} + +sub context +{ + my $self = shift; + + my %data; + $data{user} = `whoami`; + $data{perl_version} = $^V->{original}; + $data{perl} = `which perl`; + $data{PERL5LIB} = $ENV{PERL5LIB} + if exists($ENV{PERL5LIB}); + + foreach my $k (keys %data) { + chomp($data{$k}); + } + + return \%data; +} + +1; diff --git a/tests/gendiffcov/errs/genError.pm b/tests/gendiffcov/errs/genError.pm new file mode 100644 index 00000000..fa176691 --- /dev/null +++ b/tests/gendiffcov/errs/genError.pm @@ -0,0 +1,55 @@ +#!/usr/bin/env perl + +# die() when callback is called - to enable error message testing + +package genError; + +sub new +{ + my $class = shift; + my $self = [@_]; # don't die if callback name is in list... + return bless $self, $class; +} + +sub select +{ + die("die in select"); +} + +my $count; + +sub extract_version +{ + my $self = shift; + if (grep(/extract/, @$self)) { + return $count++; # different each time.. + } + die("die in extract_version"); +} + +sub compare_version +{ + die("die in compare_version"); +} + +sub annotate +{ + die("die in annotate"); +} + +sub resolve +{ + die("die in resolve"); +} + +sub check_criteria +{ + die("die in check_criteria"); +} + +sub simplify +{ + die("die in simplify"); +} + +1; diff --git a/tests/gendiffcov/errs/mcdc_errs.dat b/tests/gendiffcov/errs/mcdc_errs.dat new file mode 100644 index 00000000..6c9056fd --- /dev/null +++ b/tests/gendiffcov/errs/mcdc_errs.dat @@ -0,0 +1,51 @@ +TN: +SF:a.cpp +DA:1,1 +DA:2,0 +DA:3,0 +DA:4,1 +DA:10,0 +DA:11,0 +DA:12,0 +LF:7 +LH:2 +FN:1,2,fcn +FN:1,2,alias +FN:3,3,noCommonAlias +FN:11,11,onlyA +FNF:4 +FNH:3 +FNDA:0,fcn +FNDA:2,alias +FNDA:0,onlyA +FNDA:1,noCommonAlias + +BRDA:1,1,0,1 +BRDA:1,1,1,1 +BRDA:1,1,2,- +BRDA:1,2,0,1 +# common branch expr count zero in my, nonzero in you +BRDA:1,2,1,0 + +# branch in A only +BRDA:11,0,0,0 +BRDA:11,0,1,0 + +BRF:5 +BRH:4 + +MCDC:1,2,t,0,1,0 +MCDC:1,2,f,0,1,0 +# wrong order +MCDC:1,2,t,0,0,0 +MCDC:1,2,f,0,0,0 + +MCDC:11,1,t,0,0,0 +MCDC:11,1,f,0,0,different expression + +MCDC:0,1,t,1,0,0 + +MCF:6 +MCH:0 + +end_of_record diff --git a/tests/gendiffcov/errs/msgtest.sh b/tests/gendiffcov/errs/msgtest.sh new file mode 100755 index 00000000..a0a294dc --- /dev/null +++ b/tests/gendiffcov/errs/msgtest.sh @@ -0,0 +1,917 @@ +#!/bin/bash +set +x + +source ../../common.tst + +rm -f test.cpp *.gcno *.gcda a.out *.info *.log *.json diff.txt loop*.rc markers.err* readThis.rc testing.rc +rm -rf select criteria annotate empty unused_src scriptErr scriptFixed epoch inconsistent highlight etc mycache cacheFail expect subset context labels sortTables + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type "${CXX}" >/dev/null 2>&1 ; then + echo "Missing tool: $CXX" >&2 + exit 2 +fi + +SELECT_SCRIPT=$SCRIPT_DIR/select.pm +CRITERIA_SCRIPT=$SCRIPT_DIR/criteria.pm +GITBLAME_SCRIPT=$SCRIPT_DIR/gitblame.pm +GITVERSION_SCRIPT=$SCRIPT_DIR/gitversion.pm +P4VERSION_SCRIPT=$SCRIPT_DIR/P4version.pm +SIMPLIFY_SCRIPT=$SCRIPT_DIR/simplify.pm + +if [ 1 == "$USE_GIT" ] ; then + # this is git + VERSION_SCRIPT=${SCRIPT_DIR}/gitversion.pm + ANNOTATE_SCRIPT=${SCRIPT_DIR}/gitblame.pm +else + VERSION_SCRIPT=${SCRIPT_DIR}/getp4version + ANNOTATE_SCRIPT=${SCRIPT_DIR}/p4annotate.pm +fi + + +# filter out the compiler-generated _GLOBAL__sub_... symbol +LCOV_BASE="$EXTRA_GCOV_OPTS --branch-coverage $PARALLEL $PROFILE --no-external --ignore unused,unsupported --erase-function .*GLOBAL.*" +LCOV_OPTS="$LCOV_BASE" +DIFFCOV_OPTS="--filter line,branch,function --function-coverage --branch-coverage --demangle-cpp --prefix $PARENT_VERSION $PROFILE " + + +# old version of gcc has inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + # can't get branch coverpoints in 'initial' mode, with ancient GCC + IGNORE="--ignore usage" +elif [ "${VER[0]}" -ge 14 ] ; then + ENABLE_MCDC=1 + BASE_OPTS="$BASE_OPTS --mcdc" + # enable MCDC + COVERAGE_OPTS="-fcondition-coverage" +fi + +NO_INITIAL_CAPTURE=0 +if [[ "${VER[0]}" -gt 4 && "${VER[0]}" -lt 7 ]] ; then + # no data generated by initial capture + IGNORE_EMPTY="--ignore empty" + NO_INITIAL_CAPTURE=1 +fi + +if [ 1 == $NO_INITIAL_CAPTURE ] ; then + # all test test use --initial + echo 'all tests skipped' + exit 0 +fi + +echo `which gcov` +echo `which lcov` + +ln -s ../simple/simple.cpp test.cpp +${CXX} --coverage test.cpp +./a.out + +# some warnings.. +echo lcov $LCOV_OPTS --capture --directory . --initial --all --output-file initial.info --test-name myTest $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --initial --all --output-file initial.info --test-name myTest $IGNORE 2>&1 | tee initial_all.log +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -- "'--all' ignored" initial_all.log +if [ 0 != $? ] ; then + echo "ERROR: missing ignore message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# need data for version error message checking as well +echo lcov $LCOV_OPTS --capture --directory . --output-file version.info --test-name myTest --version-script $SCRIPT_DIR/getp4version +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file version.info --test-name myTest --version-script $SCRIPT_DIR/getp4version | tee version.log +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# help message +for T in "$GENHTML_TOOL" "$LCOV_TOOL" "$GENINFO_TOOL" ; do + echo "'$T' --help" + $COVER $T --help + if [ 0 != $? ] ; then + echo "unsuccessful $T help" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + echo "'$T' --noSuchOppt" + $COVER $T --noSuchOpt + if [ 0 == $? ] ; then + echo "didn't catch missing opt" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# generate some usage errors.. +echo lcov $LCOV_OPTS --list initial.info --initial +$COVER $LCOV_TOOL $LCOV_OPTS --list initial.info --initial 2>&1 | tee initial_warn.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --list failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "'--initial' is ignored" initial_warn.log +if [ 0 != $? ] ; then + echo "ERROR: missing ignore message 2" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# invalid syntax test: +cp initial.info badRecord.info +echo "MDCD:0,1,t,1,1,abc" >> badRecord.info +echo lcov $LCOV_OPTS --summary badRecord.info --msg-log badRecord.log +$COVER $LCOV_TOOL $LCOV_OPTS --summary badRecord.info --msg-log badRecord.log +if [ 0 == $? ] ; then + echo "ERROR: missing format message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep 'unexpected .info file record' badRecord.log +if [ 0 != $? ] ; then + echo "ERROR: failed to find format message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo lcov $LCOV_OPTS --summary initial.info --prune +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --prune 2>&1 | tee prune_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --summary 3 failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep 'prune-tests has effect' prune_err.log +if [ 0 != $? ] ; then + echo "ERROR: missing ignore message 2" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +echo lcov $LCOV_OPTS --summary initial.info --prune --ignore usage +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --prune --ignore usgae 2>&1 | tee prune_warn.log +if [ 0 != $? ] ; then + echo "ERROR: lcov prune faled" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo lcov $LCOV_OPTS --capture -d . -o build.info --build-dir x/y +$COVER $LCOV_TOOL $LCOV_OPTS --capture -d . -o build.info --build-dir x/y 2>&1 | tee build_dir_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --list failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "'x/y' is not a directory" build_dir_err.log +if [ 0 != $? ] ; then + echo "ERROR: missing build dir message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo geninfo $LCOV_OPTS --no-markers --filter branch . -o usage1.info --msg-log markers.err +$GENINFO_TOOL $LCOV_OPTS --no-markers --filter branch . -o usage1.info --msg-log markers.err +if [ 0 == $? ] ; then + echo "ERROR: expected usage error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "use new '--filter' option or old" markers.err +if [ 0 != $? ] ; then + echo "ERROR: didint find usage error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo lcov $LCOV_OPTS --summary initial.info --config-file noSuchFile --ignore usage +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --config-file noSuchFile 2>&1 | tee err_missing.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: didn't exit after self missing config file error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "cannot read configuration file 'noSuchFile'" err_missing.log +if [ 0 != $? ] ; then + echo "ERROR: missing config file message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# read a config file which is there... +echo "message_log = message_file.log" > testing.rc +echo "config_file = testing.rc" > readThis.rc +echo lcov $LCOV_OPTS --summary initial.info --config-file readThis.rc +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --config-file readThis.rc +if [ ! == ${PIPESTATUS[0]} ] ; then + echo "ERROR: didn't read config file" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ !-f message_file.log] ; then + echo "ERROR: didn't honor message_log" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# loop in config file inclusion +echo "config_file = loop1.rc" > loop1.rc +echo lcov $LCOV_OPTS --summary initial.info --config-file loop1.rc --ignore usage +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --config-file loop1.rc --ignore usage 2>&1 | tee err_selfloop.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: skipped self loop error - which isn't supposed to be possible right now" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "config file inclusion loop" err_selfloop.log +if [ 0 != $? ] ; then + echo "ERROR: missing config file message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo "config_file = loop3.rc" > loop2.rc +echo 'config_file = $ENV{PWD}/loop2.rc' > loop3.rc +echo lcov $LCOV_OPTS --summary initial.info --config-file loop2.rc --ignore usage +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --config-file loop2.rc --ignore usage 2>&1 | tee err_loop.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: skipped self loop error2 - which isn't supposed to be possible" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "config file inclusion loop" err_loop.log +if [ 0 != $? ] ; then + echo "ERROR: missing config file message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo lcov $LCOV_OPTS --capture -d . -o build.info --build-dir $LCOV_HOME +$COVER $LCOV_TOOL $LCOV_OPTS --capture -d . -o build.info --build-dir $LCOV_HOME 2>&1 | tee build_dir_unused.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --list failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "\"--build-directory .* is unused" build_dir_unused.log +if [ 0 != $? ] ; then + echo "ERROR: missing build dir unused message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo lcov $LCOV_OPTS --summary initial.info --rc memory_percentage=-10 +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --rc memory_percentage=-10 2>&1 | tee mem_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --summary 4 failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "memory_percentage '-10' " mem_err.log +if [ 0 != $? ] ; then + echo "ERROR: missing percent message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +echo lcov $LCOV_OPTS --summary initial.info --rc memory_percentage=-10 --ignore usage +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --rc memory_percentage=-10 --ignore usage 2>&1 | tee mem_warn.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov memory usage failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo genhtml $DIFCOV_OPTS initial.info -o select --select-script $SELECT_SCRIPT --select-script -x +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o select --select-script $SELECT_SCRIPT --select-script -x 2>&1 | tee script_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml select passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "unable to create callback from" script_err.log +if [ 0 != $? ] ; then + echo "ERROR: missing script message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +for arg in "--select-script $SELECT_SCRIPT,--range,0:10" \ + "--criteria-script $CRITERIA_SCRIPT,--signoff" \ + "--annotate-script $ANNOTATE_SCRIPT" \ + "--annotate-script $GITBLAME_SCRIPT,mediatek.com,--p4" \ + "--annotate-script $GITBLAME_SCRIPT,--p4" \ + "--annotate-script $GITBLAME_SCRIPT" \ + " --ignore version --version-script $GITVERSION_SCRIPT,--md5,--p4" \ + ; do + echo genhtml $DIFCOV_OPTS initial.info -o scriptErr ${arg},-x + $COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o scriptErr ${arg},-x 2>&1 | tee script_err.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml scriptErr passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + grep "unable to create callback from" script_err.log + if [ 0 != $? ] ; then + echo "ERROR: missing script message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # run again without error + echo genhtml $DIFCOV_OPTS initial.info -o scriptFixed ${arg} + $COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o scriptFixed ${arg} --ignore annotate 2>&1 | tee script_err.log + if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml scriptFixed failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +echo genhtml $DIFCOV_OPTS initial.info -o sortTables --sort +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o sortTables --sort 2>&1 | tee sort.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml --sort failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "is deprecated and will be removed" sort.log +if [ 0 != $? ] ; then + echo "ERROR: missing --sort message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo genhtml $DIFCOV_OPTS initial.info -o p4err --version-script $P4VERSION_SCRIPT,-x +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o p4err --version-script $P4VERSION_SCRIPT,-x 2>&1 | tee p4err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml select passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "unable to create callback from" p4err.log +if [ 0 != $? ] ; then + echo "ERROR: missing script message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo genhtml $DIFCOV_OPTS initial.info -o select --select-script ./select.sh --rc compute_file_version=1 +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o select --select-script ./select.sh --rc compute_file_version=1 2>&1 | tee select_scr.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml compute_version failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ 0 != $? ] ; then + echo "ERROR: trivial select failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "'compute_file_version=1' option has no effect" select_scr.log +if [ 0 != $? ] ; then + echo "ERROR: missing script message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +if [ $IS_GIT == 0 ] && [ $IS_P4 == 0 ] ; then + IGNORE_ANNOTATE='--ignore annotate' +fi + +# and again, as a differential report with annotation +NOW=`date` +rm -rf mycache +echo genhtml $DIFCOV_OPTS initial.info -o select --select-script ./select.sh --annotate $ANNOTATE_SCRIPT,--cache,mycache --baseline-file initial.info $IGNORE_ANNOTATE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o select --select-script ./select.sh --annotate $ANNOTATE_SCRIPT,--cache,mycache --baseline-file initial.info --title 'selectExample' --header-title 'this is the header' --date-bins 1,5,22 --baseline-date "$NOW" --prefix x --no-prefix $IGNORE_ANNOTATE 2>&1 | tee select_scr.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml cache failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +if [ ! -d mycache ] ; then + echo "did not create 'mycache'" +fi + +#break the cached data - cause corruption error +for i in `find mycache -type f` ; do + echo $i + echo xyz > $i +done +# have to ignore version mismatch becaure p4annotate also computes version +echo genhtml $DIFCOV_OPTS initial.info -o cacheFail --select-script ./select.sh --annotate $ANNOTATE_SCRIPT,--cache,mycache --baseline-file initial.info --ignore version $IGNORE_ANNOTATE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o cacheFail --select-script ./select.sh --annotate $ANNOTATE_SCRIPT,--cache,mycache --baseline-file initial.info --title 'selectExample' --header-title 'this is the header' --date-bins 1,5,22 --baseline-date "$NOW" --prefix x --no-prefix --ignore version $IGNORE_ANNOTATE 2>&1 | tee cacheFail.log + +if [ '' == $IGNORE_ANNOTATE ] ; then + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml corrupt deserialize failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + grep -E "corrupt.*unable to deserialize" cacheFail.log + if [ 0 != $? ] && [ '' == $IGNORE_ANNOTATE ]; then + echo "ERROR: failed to find cache corruption" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +fi + +# make cache file unreadable +find mycache -type f -exec chmod ugo-r {} \; +echo genhtml $DIFCOV_OPTS initial.info -o cacheFail --select-script ./select.sh --annotate $ANNOTATE_SCRIPT,--cache,mycache --baseline-file initial.info $IGNORE_ANNOTATE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o cacheFail --select-script ./select.sh --annotate $ANNOTATE_SCRIPT,--cache,mycache --baseline-file initial.info --title 'selectExample' --header-title 'this is the header' --date-bins 1,5,22 --baseline-date "$NOW" --prefix x $IGNORE_ANNOTATE --no-prefix 2>&1 | tee cacheFail2.log + +if [ '' == $IGNORE_ANNOTATE ] ; then + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml unreadable cache failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + grep -E "callback.*can't open" cacheFail2.log + if [ 0 != $? ] ; then + echo "ERROR: failed to find cache error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +fi + +# differential report with empty diff file +touch diff.txt +echo genhtml $DIFCOV_OPTS initial.info -o empty --diff diff.txt --annotate $ANNOTATE_SCTIPT --baseline-file initial.info +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o empty --diff diff.txt --annotate $ANNOTATE_SCRIPT --baseline-file initial.info 2>&1 | tee empty_diff.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml did not fail empty diff eheck" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "'diff' data file diff.txt contains no differences" empty_diff.log +if [ 0 != $? ] ; then + echo "ERROR: missing empty message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# insensitive flag with case-sensitive substitute expr +# - this will trigger multiple usage messages, but we set the max count +# to 1 (one) - to also trigger a 'count exceeded' message. +echo lcov $LCOV_OPTS --summary initial.info --substitute 's#aBc#AbC#' --substitute 's@XyZ#xyz#i' --rc case_insensitive=1 --ignore source --rc max_message_count=1 +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --substitute 's#aBc#AbC#' --rc case_insensitive=1 --ignore source --rc max_message_count=1 2>&1 | tee insensitive.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --summary insensitive" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep " --substitute pattern 's#aBc#AbC#' does not seem to be case insensitive" insensitive.log +if [ 0 != $? ] ; then + echo "ERROR: missing insensitive message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep " (count) max_message_count=1 reached for 'usage' messages: no more will be reported." insensitive.log +if [ 0 != $? ] ; then + echo "ERROR: missing max count message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# invalid regexp +for flag in --substitute ; do + echo genhtml $DIFFCOV_OPTS -o foo initial.info $flag 's#aBc#AbC' --ignore source --rc max_message_count=1 + $COVER $GENHTML_TOOL $DIFFCOV_OPTS -o foo initial.info $flag 's#aBc#AbC' --ignore source --rc max_message_count=1 2>&1 | tee invalid_regexp.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml invalid $flag" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + grep "Invalid regexp \"$flag " invalid_regexp.log + if [ 0 != $? ] ; then + echo "ERROR: missing regexp message for $flag" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +echo genhtml $DIFFCOV_OPTS -o foo initial.info --simplify $SIMPLIFY_SCRIPT,--re, 's#aBc#AbC' --ignore source --rc max_message_count=1 +$COVER $GENHTML_TOOL $DIFFCOV_OPTS -o foo initial.info --simplify $SIMPLIFY_SCRIPT,--re, 's#aBc#AbC' --ignore source --rc max_message_count=1 2>&1 | tee script_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml invalid $flag" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +for str in 'Invalid regexp' 'unable to create callback from module ' ; do + grep "$str" script_err.log + if [ 0 != $? ] ; then + echo "ERROR: missing err message for '$str'" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# script errors +for args in '' ',--file,a,--re,s/a/b/' ',--file,a' ; do + echo genhtml $DIFFCOV_OPTS -o foo initial.info --simplify ${SIMPLIFY_SCRIPT}${args} --ignore source --rc max_message_count=1 + $COVER $GENHTML_TOOL $DIFFCOV_OPTS -o foo initial.info --simplify ${SIMPLIFY_SCRIPT}${args} --ignore source --rc max_message_count=1 2>&1 | tee invalid_callback.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml invalid '$args'" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + grep "unable to create callback from module " invalid_callback.log + if [ 0 != $? ] ; then + echo "ERROR: missing regexp message for '$args'" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# callback error testing +# die() in 'extract' callback: +echo lcov $LCOV_OPTS --summary version.info --filter line --version-script ./genError.pm +$COVER $LCOV_TOOL $LCOV_OPTS --summary version.info --filter line --version-script ./genError.pm 2>&1 | tee extract_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov extract passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "extract_version.+ failed" extract_err.log +if [ 0 != $? ] ; then + echo "ERROR: extract_version message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# pass 'extract' but die in check (need to check version in order to filter) +echo lcov $LCOV_OPTS --summary version.info --filter line --version-script ./genError.pm --version-script extract +$COVER $LCOV_TOOL $LCOV_OPTS --summary version.info --filter line --version-script ./genError.pm --version-script extract 2>&1 | tee extract_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov extract passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "compare_version.+ failed" extract_err.log +if [ 0 != $? ] ; then + echo "ERROR: compare_version message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# resolve +# apply substitution to ensure that the file is not found so the resolve callback +# is called +echo lcov $LCOV_OPTS --summary initial.info --rc case_insensitive=1 --filter branch --resolve ./genError.pm --substitute s/test.cpp/noSuchFile.cpp/i +$COVER $LCOV_TOOL $LCOV_OPTS --summary initial.info --rc case_insensitive=1 --filter branch --resolve ./genError.pm --substitute s/test.cpp/noSuchFile.cpp/i 2>&1 | tee resolve_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --summary resolve" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "resolve.+ failed" resolve_err.log +if [ 0 != $? ] ; then + echo "ERROR: resolve message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +for callback in select annotate criteria simplify ; do + + echo genhtml $DIFCOV_OPTS initial.info -o $callback --${callback}-script ./genError.pm + $COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o $callback --${callback}-script ./genError.pm 2>&1 | tee ${callback}_err.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml $callback error passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + grep -E "${callback}.* failed" ${callback}_err.log + if [ 0 != $? ] ; then + echo "ERROR: $callback message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +echo genhtml $DIFCOV_OPTS initial.info -o unused_src --source-dir ../.. +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o unused_src --source-dir ../.. 2>&1 | tee src_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml source-dir error passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E -- '"--source-directory ../.." is unused' src_err.log +if [ 0 != $? ] ; then + echo "ERROR: missing srcdir message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# inconsistent setting of branch filtering without enabling branch coverage +echo genhtml --filter branch --prefix $PARENT_VERSION $PROFILE initial.info -o inconsistent --rc treat_warning_as_error=1 +$COVER $GENHTML_TOOL --filter branch --prefix $PARENT_VERSION $PROFILE initial.info -o inconsistent --rc treat_warning_as_error=1 2>&1 | tee inconsistent.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml inconsistent warning-as-error passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep 'ERROR: (usage) branch filter enabled but neither branch or condition coverage is enabled' inconsistent.log +if [ 0 != $? ] ; then + echo "ERROR: missing inconsistency message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# when we treat warning as error, but ignore the message type +echo genhtml --filter branch --prefix $PARENT_VERSION $PROFILE initial.info -o inconsistent --rc treat_warning_as_error=1 --ignore usage +$COVER $GENHTML_TOOL --filter branch --prefix $PARENT_VERSION $PROFILE initial.info -o inconsistent --rc treat_warning_as_error=1 --ignore usage 2>&1 | tee inconsistent.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml inconsistent warning-as-error passed by accident" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep 'WARNING: (usage) branch filter enabled but neither branch or condition coverage is enabled' inconsistent.log +if [ 0 != $? ] ; then + echo "ERROR: missing inconsistency message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo genhtml --filter branch --prefix $PARENT_VERSION $PROFILE initial.info -o inconsistent +$COVER $GENHTML_TOOL --filter branch --prefix $PARENT_VERSION $PROFILE initial.info -o inconsistent 2>&1 | tee inconsistent2.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml inconsistent warning-as-error failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep 'WARNING: (usage) branch filter enabled but neither branch or condition coverage is enabled' inconsistent2.log +if [ 0 != $? ] ; then + echo "ERROR: missing inconsistency message 2" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# use jan1 1970 as epoch +echo SOURCE_DATE_EPOCH=0 genhtml $DIFFCOV_OPTS initial.info -o epoch +SOURCE_DATE_EPOCH=0 $COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info --annotate $ANNOTATE_SCRIPT -o epoch 2>&1 | tee epoch.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: missed epoch error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "ERROR: \(inconsistent\) .+ 'SOURCE_DATE_EPOCH=0' .+ is older than annotate time" epoch.log +if [ 0 != $? ] ; then + echo "ERROR: missing epoch" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# deprecated messages +echo genhtml $DIFFCOV_OPTS initial.info -o highlight --highlight +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info --annotate $ANNOTATE_SCRIPT --highlight -o highlight 2>&1 | tee highlight.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: missed decprecated error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "ERROR: \(deprecated\) .*option .+ has been removed" highlight.log +if [ 0 != $? ] ; then + echo "ERROR: missing highlight message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +mkdir -p etc +echo "genhtml_highlight = 1" > etc/lcovrc +echo genhtml $DIFFCOV_OPTS initial.info -o highlight --config-file LCOV_HOME/etc/lcovrc $IGNORE_ANNOTATE +LCOV_HOME=. $COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info --annotate $ANNOTATE_SCRIPT -o highlight $IGNORE_ANNOTATE 2>&1 | tee highlight2.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: deprecated error was fatal" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "WARNING: \(deprecated\) .+ deprecated and ignored" highlight2.log +if [ 0 != $? ] ; then + echo "ERROR: missing decrecated message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +for err in "--rc truncate_owner_table=top,x" "--rc owner_table_entries=abc" "--rc owner_table_entries=-1" ; do + echo genhtml $DIFCOV_OPTS initial.info -o subset --annotate $ANNOTATE_SCRIPT --baseline-file initial.info --show-owners $IGNORE_ANNOTATE + $COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o subset --annotate $ANNOTATE_SCRIPT --baseline-file initial.info --title 'subset' --header-title 'this is the header' --date-bins 1,5,22 --baseline-date "$NOW" --prefix x --no-prefix $err --show-owners $IGNORE_ANNOTATE + if [ 0 == $? ] ; then + echo "ERROR: genhtml $err unexpectedly passed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + + +# test error checks for --expect-message-count expressions +for expr in "malformed" "noSuchMsg:%C<5" "inconsistent:%c<5" 'inconsistent:%C<$x' 'inconsistent:0,inconsistent:2' ; do + + $COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o expect --annotate $ANNOTATE_SCRIPT --baseline-file initial.info --title 'subset' --header-title 'this is the header' --date-bins 1,5,22 --baseline-date "$NOW" --prefix x --no-prefix --expect-message $expr --show-owners $IGNORE_ANNOTATE + if [ 0 == $? ] ; then + echo "ERROR: genhtml $expr unexpectedly passed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + $COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o expect --annotate $ANNOTATE_SCRIPT --baseline-file initial.info --title 'subset' --header-title 'this is the header' --date-bins 1,5,22 --baseline-date "$NOW" --prefix x --no-prefix --expect-message $expr --show-owners --ignore usage $IGNORE_ANNOTATE + if [ 0 != $? ] ; then + echo "ERROR: genhtml $expr with ignore unexpectedly failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# slightly more complicated case...hack a bit so that 'expect' eval fails +# in summary callback +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o context --annotate $ANNOTATE_SCRIPT --baseline-file initial.info --title 'context' --header-title 'this is the header' --date-bins 1,5,22 --baseline-date "$NOW" --prefix x --no-prefix --context-script ./MsgContext.pm --expect-message 'usage:MsgContext::test(%C)' --show-owners --ignore callback $IGNORE_ANNOTATE 2>&1 | tee expect.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml context with ignore unexpectedly failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "WARNING: .*callback.* evaluation of .+ failed" expect.log +if [ 0 != $? ] ; then + echo "ERROR: didn't find expected callback message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# generate error for case that number of date labels doesn't match +$COVER $GENHTML_TOOL $DIFFCOV_OPTS initial.info -o labels --annotate $ANNOTATE_SCRIPT --baseline-file initial.info --title 'context' --header-title 'this is the header' --date-bins 1,5,22 --date-labels a,b,c,d,e --baseline-date "$NOW" --msg-log labels.log $IGNORE_ANNOTATE +if [ 0 == $? ] ; then + echo "ERROR: genhtml --date-labels didn't fail" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "ERROR: .*usage.* expected number of 'age' labels to match" labels.log +if [ 0 != $? ] ; then + echo "ERROR: didn't find expected labels message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +if [ "$ENABLE_MCDC" != 1 ] ; then + $COVER $GENINFO_TOOL . -o mccd --mcdc-coverage $LCOV_OPTS --msg-log mcdc_errs.log + if [ 0 == $? ] ; then + echo "ERROR: no error for unsupported MC/DC" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + grep -E "MC/DC coverage enabled .* does not support the .* option" mcdc_errs.log + if [ 0 != $? ] ; then + echo "ERROR: didn't find expected MCDC error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + +fi + +$COVER $LCOV_TOOL -o err.info -a mcdc_errs.dat --mcdc-coverage $LCOV_OPTS --msg-log mcdc_expr.log --ignore format,inconsistent,source +if [ 0 != $? ] ; then + echo "ERROR: didn't ignore MC/DC errors" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "MC/DC group .* expression .* changed from" mcdc_expr.log +if [ 0 != $? ] ; then + echo "ERROR: did not see MC/DC expression error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "MC/DC group .* non-contiguous expression .* found" mcdc_expr.log +if [ 0 != $? ] ; then + echo "ERROR: did not see MC/DC contiguous error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "unexpected line number .* in condition data record .*" mcdc_expr.log +if [ 0 != $? ] ; then + echo "ERROR: did not see MC/DC contiguous error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +if [ -d mycache ] ; then + find mycache -type f -exec chmod ugo+r {} \; +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ] ; then + cover +fi diff --git a/tests/gendiffcov/errs/select.sh b/tests/gendiffcov/errs/select.sh new file mode 100755 index 00000000..beb382f1 --- /dev/null +++ b/tests/gendiffcov/errs/select.sh @@ -0,0 +1,4 @@ +#!/usr/bin/bash + +#echo "$@_" >> x.log +exit 1 diff --git a/tests/gendiffcov/filter/Makefile b/tests/gendiffcov/filter/Makefile new file mode 100644 index 00000000..dd27e522 --- /dev/null +++ b/tests/gendiffcov/filter/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := filter.pl + +clean: + rm -f *.orig *.filtered *.directive diff --git a/tests/gendiffcov/filter/brace.c b/tests/gendiffcov/filter/brace.c new file mode 100644 index 00000000..f4f0beaa --- /dev/null +++ b/tests/gendiffcov/filter/brace.c @@ -0,0 +1,25 @@ +kal_uint32 set_brack_offset(kal_uint32_offset) +{ +#if defined(CONDITION) + kal_uint32 current_thread = THIS_THREAD_TYPE(); + kal_uint32 current_layer = get_current_layer(); + return VISIT_TREAD_ADDRESS(current_thread, current_layer); +#endif +} + +ConstructorExample1::ConstructorExample1() +: initializerList() +{ +} + +ConstructorExample2::ConstructorExample2() { + if (x) + { + } +} + +void braceExample2() { + just_a_block(); + { + } +} diff --git a/tests/gendiffcov/filter/brace.info b/tests/gendiffcov/filter/brace.info new file mode 100644 index 00000000..5eb4aba2 --- /dev/null +++ b/tests/gendiffcov/filter/brace.info @@ -0,0 +1,26 @@ +TN: +SF:brace.c +FN:1,set_brack_offset +FNDA:27,set_brack_offset +FN:10,ConstructorExample1::ConstructorExample1 +FNDA:1,ConstructorExample1::ConstructorExample1 +FN:15,ConstructorExample2::ConstructorExample2 +FNDA:1,ConstructorExample2::ConstructorExample2 +FN:21,braceExample2 +FNDA:1,braceExample2 +FNF:3 +FNH:3 +DA:3,27 +DA:1,27 +DA:8,27 +DA:11,1 +DA:13,0 +DA:15,1 +DA:16,1 +DA:18,0 +DA:21,1 +DA:22,1 +DA:25,0 +LF:7 +LH:6 +end_of_record diff --git a/tests/gendiffcov/filter/expr1.c b/tests/gendiffcov/filter/expr1.c new file mode 100644 index 00000000..1e0d0004 --- /dev/null +++ b/tests/gendiffcov/filter/expr1.c @@ -0,0 +1,9 @@ +nr_ddmrs_trs_fce_ar_fcm_cal_knl(&mc_trs_ar_fcm, + hch_trs_srv_addr_rx, + &mc_os_vcm_acc_rtl_buf, + total_prg_num, + log2_alpha, + ar_fcm_exp_diff, + sr_fcm_trace); + +hello_world(); diff --git a/tests/gendiffcov/filter/expr2.c b/tests/gendiffcov/filter/expr2.c new file mode 100644 index 00000000..927d4598 --- /dev/null +++ b/tests/gendiffcov/filter/expr2.c @@ -0,0 +1 @@ +kal_uint32 L1_BANK *p_nb_crit = reinterpret_cast(l1_to_icm_cast(mrh_nb_crit)); diff --git a/tests/gendiffcov/filter/expr3.c b/tests/gendiffcov/filter/expr3.c new file mode 100644 index 00000000..48e49fd9 --- /dev/null +++ b/tests/gendiffcov/filter/expr3.c @@ -0,0 +1 @@ +b_in_wf = pmr_fce_ceof_wf->modulo(1/*NT*/ * 12/*12 raw*/ * 12/*#column*/).base(12*12*sym_idx).start(12*12*sym_idx); diff --git a/tests/gendiffcov/filter/expr4.c b/tests/gendiffcov/filter/expr4.c new file mode 100644 index 00000000..62f8249a --- /dev/null +++ b/tests/gendiffcov/filter/expr4.c @@ -0,0 +1 @@ +coreToBlaze_fxp(sr_sig_nor, ((0 << 16) | ((nf_len * symb_num) & 0xFFFF)), 15); diff --git a/tests/gendiffcov/filter/filter.pl b/tests/gendiffcov/filter/filter.pl new file mode 100755 index 00000000..c5a741f7 --- /dev/null +++ b/tests/gendiffcov/filter/filter.pl @@ -0,0 +1,106 @@ +#!/usr/bin/env perl + +use strict; +use warnings; +use FindBin; + +use lib "$FindBin::RealBin/../../../lib"; # build dir testcase +use lib "$ENV{LCOV_HOME}/lib/lcov"; # install testcase +use lcovutil; + +lcovutil::parseOptions({}, {}); + +foreach my $example (glob('expr*.c')) { + print("checking conditional in $example\n"); + my $file = ReadCurrentSource->new($example); + + die("failed to filter bogus conditional in $example") + if $file->containsConditional(1); +} + +# check effect of configurations.. +my $length = ReadCurrentSource->new("expr1.c"); + +$lcovutil::source_filter_lookahead = 5; +print("checking conditional in expr1.c with lookahead " . + $lcovutil::source_filter_lookahead . " and bitwise " . + $lcovutil::source_filter_bitwise_are_conditional . "\n"); +die("source_filter_lookahad had no effect") + unless $length->containsConditional(1); + +$lcovutil::source_filter_lookahead = 10; +$lcovutil::source_filter_bitwise_are_conditional = 1; +print("checking conditional in expr1.c with lookahead " . + $lcovutil::source_filter_lookahead . " and bitwise " . + $lcovutil::source_filter_bitwise_are_conditional . "\n"); +die("source_filter_lookahad had no effect") + unless $length->containsConditional(1); + +# try some trivial functions +foreach my $example (glob('*rivial*.c')) { + print("checking trivial function in $example\n"); + my $lines = 0; + open(FILE, $example) or die("can't open $example: $!"); + $lines++ while (); + close(FILE); + + my $file = ReadCurrentSource->new($example); + if ($file->containsTrivialFunction(1, $lines)) { + die("incorrectly found trivial function in $example") + if $example =~ /^no/; + } else { + die("failed to find trivial function in $example") + unless $example =~ /^no/; + } + my $info = $example; + $info =~ s/c$/info/g; + if (-f $info) { + lcovutil::parse_cov_filters('trivial'); + my $trace = TraceFile->load($info, $file); + $trace->write_info_file($info . '.filtered'); + lcovutil::parse_cov_filters(); + } +} + +# problematic brace filter example... +$lcovutil::verbose = 3; +$lcovutil::derive_function_end_line = 1; +our $func_coverage = 1; +foreach my $example (glob('*brace*.c')) { + print("checking brace filter for $example"); + my $info = $example; + $info =~ s/c$/info/g; + lcovutil::parse_cov_filters(); # turn off filtering + my $vanilla = TraceFile->load($info); + $vanilla->write_info_file($info . '.orig'); + my @v = $vanilla->count_totals(); + my ($lines, $hit) = @{$v[1]}; + print("$lines lines $hit hit\n"); + + lcovutil::parse_cov_filters('brace'); + my $reader = ReadCurrentSource->new($example); + my $trace = TraceFile->load($info, $reader); + $trace->write_info_file($info . '.filtered'); + my @counts = $trace->count_totals(); + my ($filtered, $h2) = @{$counts[1]}; + print("$filtered brace-filtered lines $h2 hit\n"); + die("failed to filter $info") + unless ($lines > $filtered && + $hit > $h2); + + #simple test for compiler directive filtering + lcovutil::parse_cov_filters(); # reset filters + lcovutil::parse_cov_filters('directive', 'brace'); + $reader = ReadCurrentSource->new('brace.c'); + my $directive = TraceFile->load('brace.info', $reader); + $directive->write_info_file($info . '.directive'); + @counts = $directive->count_totals(); + my ($f3, $h3) = @{$counts[1]}; + print("$f3 directive-filtered lines $h3 hit\n"); + die("failed to filter $info: $lines -> $f3, $hit -> $h3") + unless ($lines > $f3 && + $hit > $h3); +} + +print("passed\n"); +exit(0); diff --git a/tests/gendiffcov/filter/multilineTrivial.c b/tests/gendiffcov/filter/multilineTrivial.c new file mode 100644 index 00000000..591d2399 --- /dev/null +++ b/tests/gendiffcov/filter/multilineTrivial.c @@ -0,0 +1,4 @@ +void x() { + // comment + /* and another comment + which goes to here */} diff --git a/tests/gendiffcov/filter/multilineTrivial2.c b/tests/gendiffcov/filter/multilineTrivial2.c new file mode 100644 index 00000000..2013306b --- /dev/null +++ b/tests/gendiffcov/filter/multilineTrivial2.c @@ -0,0 +1,4 @@ +void x() { + + ; +} diff --git a/tests/gendiffcov/filter/notTrivial1.c b/tests/gendiffcov/filter/notTrivial1.c new file mode 100644 index 00000000..75210b40 --- /dev/null +++ b/tests/gendiffcov/filter/notTrivial1.c @@ -0,0 +1 @@ +void skipMe(unsigned, unsigned) { return 1; } diff --git a/tests/gendiffcov/filter/notTrivial2.c b/tests/gendiffcov/filter/notTrivial2.c new file mode 100644 index 00000000..4ffff256 --- /dev/null +++ b/tests/gendiffcov/filter/notTrivial2.c @@ -0,0 +1,2 @@ +void containsCode() { a; +}; diff --git a/tests/gendiffcov/filter/notTrivial3.c b/tests/gendiffcov/filter/notTrivial3.c new file mode 100644 index 00000000..46639df1 --- /dev/null +++ b/tests/gendiffcov/filter/notTrivial3.c @@ -0,0 +1 @@ +;} // doesn't match regexp diff --git a/tests/gendiffcov/filter/notTrivial_init.c b/tests/gendiffcov/filter/notTrivial_init.c new file mode 100644 index 00000000..32a6da29 --- /dev/null +++ b/tests/gendiffcov/filter/notTrivial_init.c @@ -0,0 +1,3 @@ +Data::Data(int a) +: Base(a) { +} diff --git a/tests/gendiffcov/filter/notTrivial_multiline.c b/tests/gendiffcov/filter/notTrivial_multiline.c new file mode 100644 index 00000000..e4a087a7 --- /dev/null +++ b/tests/gendiffcov/filter/notTrivial_multiline.c @@ -0,0 +1,4 @@ +void y(int x, int z) { + ; // maybe this should be + /* marked trivial but two semicolons found */ ; +} diff --git a/tests/gendiffcov/filter/trivial1.c b/tests/gendiffcov/filter/trivial1.c new file mode 100644 index 00000000..930e5369 --- /dev/null +++ b/tests/gendiffcov/filter/trivial1.c @@ -0,0 +1 @@ +void trivial() { /* comment */} diff --git a/tests/gendiffcov/filter/trivial2.c b/tests/gendiffcov/filter/trivial2.c new file mode 100644 index 00000000..78869000 --- /dev/null +++ b/tests/gendiffcov/filter/trivial2.c @@ -0,0 +1 @@ +int trivial(unsigned abc) {}; diff --git a/tests/gendiffcov/filter/trivial3.c b/tests/gendiffcov/filter/trivial3.c new file mode 100644 index 00000000..9dc61a75 --- /dev/null +++ b/tests/gendiffcov/filter/trivial3.c @@ -0,0 +1 @@ +}; // simulate end of class decl diff --git a/tests/gendiffcov/filter/trivialMethod.c b/tests/gendiffcov/filter/trivialMethod.c new file mode 100644 index 00000000..a77434d8 --- /dev/null +++ b/tests/gendiffcov/filter/trivialMethod.c @@ -0,0 +1,3 @@ +Data::Data(int a) { + // nothing here +} diff --git a/tests/gendiffcov/function/Makefile b/tests/gendiffcov/function/Makefile new file mode 100644 index 00000000..aee285aa --- /dev/null +++ b/tests/gendiffcov/function/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := function.sh + +clean: + $(shell ./function.sh --clean) diff --git a/tests/gendiffcov/function/baseline_call_current_call.gold b/tests/gendiffcov/function/baseline_call_current_call.gold new file mode 100644 index 00000000..efa13bc2 --- /dev/null +++ b/tests/gendiffcov/function/baseline_call_current_call.gold @@ -0,0 +1,38 @@ +
+ +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + diff --git a/tests/gendiffcov/function/baseline_call_current_call_region.gold b/tests/gendiffcov/function/baseline_call_current_call_region.gold new file mode 100644 index 00000000..debac4ba --- /dev/null +++ b/tests/gendiffcov/function/baseline_call_current_call_region.gold @@ -0,0 +1,38 @@ + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + diff --git a/tests/gendiffcov/function/baseline_call_current_nocall.gold b/tests/gendiffcov/function/baseline_call_current_nocall.gold new file mode 100644 index 00000000..cc8ee92c --- /dev/null +++ b/tests/gendiffcov/function/baseline_call_current_nocall.gold @@ -0,0 +1,38 @@ + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + diff --git a/tests/gendiffcov/function/baseline_call_current_nocall_region.gold b/tests/gendiffcov/function/baseline_call_current_nocall_region.gold new file mode 100644 index 00000000..3926986e --- /dev/null +++ b/tests/gendiffcov/function/baseline_call_current_nocall_region.gold @@ -0,0 +1,38 @@ + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + diff --git a/tests/gendiffcov/function/baseline_nocall_current_call.gold b/tests/gendiffcov/function/baseline_nocall_current_call.gold new file mode 100644 index 00000000..c2608511 --- /dev/null +++ b/tests/gendiffcov/function/baseline_nocall_current_call.gold @@ -0,0 +1,38 @@ + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + diff --git a/tests/gendiffcov/function/baseline_nocall_current_call_region.gold b/tests/gendiffcov/function/baseline_nocall_current_call_region.gold new file mode 100644 index 00000000..553d373a --- /dev/null +++ b/tests/gendiffcov/function/baseline_nocall_current_call_region.gold @@ -0,0 +1,38 @@ + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + diff --git a/tests/gendiffcov/function/baseline_nocall_current_nocall.gold b/tests/gendiffcov/function/baseline_nocall_current_nocall.gold new file mode 100644 index 00000000..91c6601a --- /dev/null +++ b/tests/gendiffcov/function/baseline_nocall_current_nocall.gold @@ -0,0 +1,38 @@ + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + diff --git a/tests/gendiffcov/function/baseline_nocall_current_nocall_region.gold b/tests/gendiffcov/function/baseline_nocall_current_nocall_region.gold new file mode 100644 index 00000000..5cd19f10 --- /dev/null +++ b/tests/gendiffcov/function/baseline_nocall_current_nocall_region.gold @@ -0,0 +1,38 @@ + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + +-- + + diff --git a/tests/gendiffcov/function/current.cpp b/tests/gendiffcov/function/current.cpp new file mode 100644 index 00000000..b8bf28c8 --- /dev/null +++ b/tests/gendiffcov/function/current.cpp @@ -0,0 +1,102 @@ +/** + * @file current.cpp + * @brief current code for function categorization test + */ + +#include + +void +unchanged() +{ + std::cout << "called unchanged function" << std::endl; +} + +void +unchanged_notCalled() +{ + std::cout << "called unchanged_notCalled function" << std::endl; +} + +void +added() +{ + std::cout << "called added function" << std::endl; + std::cout << "added some code" << std::endl; +} + +void +added_notCalled() +{ + std::cout << "called add_notCalled function" << std::endl; + std::cout << "added some code" << std::endl; +} + +void +removed() +{ + std::cout << "called removed function" << std::endl; +} + +void +removed_notCalled() +{ + std::cout << "called removed_notCalled function" << std::endl; +} + +void +included() +{ + std::cout << "called included function" << std::endl; +#ifdef ADD_CODE + std::cout << "this code excluded in baseline" << std::endl; +#endif +} + +void +included_notCalled() +{ + std::cout << "called included_notCalled function" << std::endl; +#ifdef ADD_CODE + std::cout << "this code excluded in baseline" << std::endl; +#endif +} + +void +excluded() +{ + std::cout << "called excluded function" << std::endl; +#ifndef REMOVE_CODE + std::cout << "this code excluded in current" << std::endl; +#endif +} + +void +excluded_notCalled() +{ + std::cout << "called excluded_notCalled function" << std::endl; +#ifndef REMOVE_CODE + std::cout << "this code excluded in current" << std::endl; +#endif +} + +void inserted() +{ + std::cout << "function inserted gets added" << std::endl; +} + +void inserted_notCalled() +{ + std::cout << "function inserted_notCalled gets inserted" << std::endl; +} + +int main(int ac, char **av) +{ +#ifdef CALL_FUNCTIONS + unchanged(); + added(); + removed(); + included(); + excluded(); + inserted(); +#endif +} diff --git a/tests/gendiffcov/function/function.sh b/tests/gendiffcov/function/function.sh new file mode 100755 index 00000000..61413ede --- /dev/null +++ b/tests/gendiffcov/function/function.sh @@ -0,0 +1,274 @@ +#!/bin/bash +set +x + +source ../../common.tst + +rm -f test.cpp *.gcno *.gcda a.out *.info *.info.gz diff.txt diff_r.txt diff_broken.txt *.log *.err *.json dumper* results.xlsx *.diff *.txt template *gcov +rm -rf baseline_*call_current*call alias* no_alias* + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type "${CXX}" >/dev/null 2>&1 ; then + echo "Missing tool: $CXX" >&2 + exit 2 +fi + +if ! python3 -c "import xlsxwriter" >/dev/null 2>&1 ; then + echo "Missing python module: xlsxwriter" >&2 + exit 2 +fi + +#PARALLEL='' +#PROFILE="'' + +# filter out the compiler-generated _GLOBAL__sub_... symbol +LCOV_BASE="$EXTRA_GCOV_OPTS --branch-coverage $PARALLEL $PROFILE --no-external --ignore unused,unsupported --erase-function .*GLOBAL.*" +VERSION_OPTS="--version-script $GET_VERSION" +LCOV_OPTS="$LCOV_BASE $VERSION_OPTS" +DIFFCOV_OPTS="--filter line,branch,function --function-coverage --branch-coverage --demangle-cpp --frame --prefix $PARENT --version-script $GET_VERSION $PROFILE $PARALLEL" +#DIFFCOV_OPTS="--function-coverage --branch-coverage --demangle-cpp --frame" +#DIFFCOV_OPTS='--function-coverage --branch-coverage --demangle-cpp' + + +echo * + +echo `which gcov` +echo `which lcov` + +ln -s initial.cpp test.cpp +${CXX} --coverage -DCALL_FUNCTIONS test.cpp +./a.out + + +echo lcov $LCOV_OPTS --capture --directory . --output-file baseline_call.info --test-name myTest +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file baseline_call.info --test-name myTest +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +gzip -c baseline_call.info > baseline_call.info.gz + +# run again - without version info: +echo lcov $LCOV_BASE --capture --directory . --output-file baseline_no_vers.info --test-name myTest +$COVER $LCOV_TOOL $LCOV_BASE --capture --directory . --output-file baseline_no_vers.info --test-name myTest +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture no version failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep VER: baseline_no_vers.info +if [ 0 == $? ] ; then + echo "ERROR: lcov contains version info" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# insert the version info +echo lcov $VERSION_OPTS --rc compute_file_version=1 --add-tracefile baseline_no_vers.info --output-file baseline_vers.info +$COVER $LCOV_TOOL $VERSION_OPTS --rc compute_file_version=1 --add-tracefile baseline_no_vers.info --output-file baseline_vers.info +if [ 0 != $? ] ; then + echo "ERROR: lcov insert version failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +diff baseline_vers.info baseline_call.info +if [ 0 != $? ] ; then + echo "ERROR: data differs after version insert" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +rm -f test.gcno test.gcda a.out + +${CXX} --coverage test.cpp +./a.out + +echo lcov $LCOV_OPTS --capture --directory . --output-file baseline_nocall.info --test-name myTest +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file baseline_nocall.info --test-name myTest +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture (2) failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +gzip -c baseline_call.info > baseline_call.info.gz + +export PWD=`pwd` +echo $PWD + +rm -f test.cpp test.gcno test.gcda a.out +ln -s current.cpp test.cpp +${CXX} --coverage -DADD_CODE -DREMOVE_CODE -DCALL_FUNCTIONS test.cpp +./a.out +echo lcov $LCOV_OPTS --capture --directory . --output-file current_call.info +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file current_call.info +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture (3) failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +gzip -c current_call.info > current_call.info.gz + +rm -f test.gcno test.gcda a.out +${CXX} --coverage -DADD_CODE -DREMOVE_CODE test.cpp +./a.out +echo lcov $LCOV_OPTS --capture --directory . --output-file current_nocall.info +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file current_nocall.info +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture (4) failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +gzip -c current_nocall.info > current_nocall.info.gz + + +diff -u initial.cpp current.cpp | perl -pi -e "s#(initial|current)*\.cpp#$ROOT/test.cpp#g" > diff.txt + +if [ $? != 0 ] ; then + echo "diff failed" + exit 1 +fi + +#check for end line markers - if present then check for whole-function +#categorization +grep -E 'FNL:[0-9]+,[0-9]+,[0-9]+' baseline_call.info +NO_END_LINE=$? + +if [ $NO_END_LINE == 0 ] ; then + echo "----------------------" + echo " compiler version support start/end reporting" + SUFFIX='_region' +else + echo "----------------------" + echo " compiler version DOES NOT support start/end reporting" + SUFFIX='' +fi + +for base in baseline_call baseline_nocall ; do + for curr in current_call current_nocall ; do + OUT=${base}_${curr} + echo genhtml -o $OUT $DIFFCOV_OPTS --baseline-file ${base}.info --diff-file diff.txt ${curr}.info --ignore inconsistent + $COVER $GENHTML_TOOL -o $OUT $DIFFCOV_OPTS --baseline-file ${base}.info --diff-file diff.txt ${curr}.info --elide-path --ignore inconsistent + if [ $? != 0 ] ; then + echo "genhtml $OUT failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + grep 'coverFn"' -A 1 $OUT/function/test.cpp.func.html > $OUT.txt + + diff -b $OUT.txt ${OUT}${SUFFIX}.gold | tee $OUT.diff + + if [ ${PIPESTATUS[0]} != 0 ] ; then + if [ $UPDATE != 0 ] ; then + echo "update $out" + cp $OUT.txt ${OUT}${SUFFIX}.gold + else + echo "diff $OUT failed - see $OUT.diff" + exit 1 + fi + else + rm $OUT.diff + fi + done +done + +# test function alias suppression +rm *.gcda *.gcno +${CXX} --coverage -std=c++11 -o template template.cpp +./template +echo lcov $LCOV_OPTS --capture --directory . --demangle --output-file template.info --no-external --branch-coverage --test-name myTest +$COVER $LCOV_TOOL $LCOV_OPTS --capture --demangle --directory . --output-file template.info --no-external --branch-coverage --test-name myTest +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +COUNT=`grep -c FNA: template.info` +if [ 4 != $COUNT ] ; then + echo "ERROR: expected 4 FNA - found $COUNT" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +for opt in '' '--forget-test-names' ; do + outdir="alias$opt" + echo genhtml -o $outdir $opt $DIFFCOV_OPTS template.info --show-proportion + $COVER $GENHTML_TOOL -o $outdir $pt $DIFFCOV_OPTS template.info --show-proportion + if [ $? != 0 ] ; then + echo "genhtml $outdir failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + #expect 5 entries in 'func' list (main, leader, 3 aliases + COUNT=`grep -c 'coverFnAlias"' $outdir/function/template.cpp.func.html` + if [ 3 != $COUNT ] ; then + echo "ERROR: expected 3 aliases - found $COUNT" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + outdir="no_alias$opt" + # suppress aliases + echo genhtml -o $outdir $opt $DIFFCOV_OPTS template.info --show-proportion --suppress-alias + $COVER $GENHTML_TOOL -o $outdir $opt $DIFFCOV_OPTS template.info --show-proportion --suppress-alias + if [ $? != 0 ] ; then + echo "genhtml $outdir failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + #expect 2 entries in 'func' list + COUNT=`grep -c 'coverFn"' $outdir/function/template.cpp.func.html` + if [ 2 != $COUNT ] ; then + echo "ERROR: expected 2 functions - found $COUNT" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + COUNT=`grep -c 'coverFnAlias"' $outdir/function/template.cpp.func.html` + if [ 0 != $COUNT ] ; then + echo "ERROR: expected zero aliases - found $COUNT" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + + +# and generate a spreadsheet..check that we don't crash +SPREADSHEET=$LCOV_HOME/scripts/spreadsheet.py +if [ ! -f $SPREADSHEET ] ; then + SPREADSHEET=$LCOV_HOME/share/lcov/support-scripts/spreadsheet.py +fi +if [ -f $SPREADSHEET ] ; then + $SPREADSHEET -o results.xlsx `find . -name "*.json"` + if [ 0 != $? ] ; then + echo "ERROR: spreadsheet generation failed" + exit 1 + fi +else + echo "Did not find $SPREADSHEET to run test" + exit 1 +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ] ; then + cover +fi diff --git a/tests/gendiffcov/function/initial.cpp b/tests/gendiffcov/function/initial.cpp new file mode 100644 index 00000000..3f31dfbc --- /dev/null +++ b/tests/gendiffcov/function/initial.cpp @@ -0,0 +1,102 @@ +/** + * @file initial.cpp + * @brief baseline code for function categorization test + */ + +#include + +void +unchanged() +{ + std::cout << "called unchanged function" << std::endl; +} + +void +unchanged_notCalled() +{ + std::cout << "called unchanged_notCalled function" << std::endl; +} + +void +added() +{ + std::cout << "called added function" << std::endl; +} + +void +added_notCalled() +{ + std::cout << "called add_notCalled function" << std::endl; +} + +void +removed() +{ + std::cout << "called removed function" << std::endl; + std::cout << "this line gets removed" << std::endl; +} + +void +removed_notCalled() +{ + std::cout << "called removed_notCalled function" << std::endl; + std::cout << "this line gets removed" << std::endl; +} + +void +included() +{ + std::cout << "called included function" << std::endl; +#ifdef ADD_CODE + std::cout << "this code excluded in baseline" << std::endl; +#endif +} + +void +included_notCalled() +{ + std::cout << "called included_notCalled function" << std::endl; +#ifdef ADD_CODE + std::cout << "this code excluded in baseline" << std::endl; +#endif +} + +void +excluded() +{ + std::cout << "called excluded function" << std::endl; +#ifndef REMOVE_CODE + std::cout << "this code excluded in current" << std::endl; +#endif +} + +void +excluded_notCalled() +{ + std::cout << "called excluded_notCalled function" << std::endl; +#ifndef REMOVE_CODE + std::cout << "this code excluded in current" << std::endl; +#endif +} + +void deleted() +{ + std::cout << "function deleted gets deleted" << std::endl; +} + +void deleted_notCalled() +{ + std::cout << "this deleted_notCalled gets deleted" << std::endl; +} + +int main(int ac, char **av) +{ +#ifdef CALL_FUNCTIONS + unchanged(); + added(); + removed(); + included(); + excluded(); + deleted(); +#endif +} diff --git a/tests/gendiffcov/function/template.cpp b/tests/gendiffcov/function/template.cpp new file mode 100644 index 00000000..eaad4e56 --- /dev/null +++ b/tests/gendiffcov/function/template.cpp @@ -0,0 +1,19 @@ +// test genhtml function alias suppression + +#include + +template +void +func(const char * str) +{ + std::cout << str << ": sizeof(T) " << sizeof(T) << i << std::endl; +} + +int +main(int ac, char **av) +{ + func("char/1"); + func("char/2"); + func("unsigned"); + return 0; +} diff --git a/tests/gendiffcov/insensitive/Makefile b/tests/gendiffcov/insensitive/Makefile new file mode 100644 index 00000000..40285556 --- /dev/null +++ b/tests/gendiffcov/insensitive/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := insensitive.sh + +clean: + $(shell ./insensitive.sh --clean) diff --git a/tests/gendiffcov/insensitive/annotate.sh b/tests/gendiffcov/insensitive/annotate.sh new file mode 100755 index 00000000..65503446 --- /dev/null +++ b/tests/gendiffcov/insensitive/annotate.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env perl + +# A bit hacky - to test that case insensitivity is working properly +# in all the scripts: +# look for a file matching the input argument name (case insensitive) +# Then nmunge the return data to further scramble the name + +use strict; +use warnings; +use DateTime; +use POSIX qw(strftime); +use File::Spec; + +sub get_modify_time($) +{ + my $filename = shift; + my @stat = stat $filename; + my $tz = strftime("%z", localtime($stat[9])); + $tz =~ s/([0-9][0-9])$/:$1/; + return strftime("%Y-%m-%dT%H:%M:%S", localtime($stat[9])) . $tz; +} + +my $file = $ARGV[0]; + +my ($volume, $dir, $f) = File::Spec->splitpath($file); +$dir = File::Spec->canonpath($dir); +my @stack; +OUTER: while ($dir && + !-d $dir) { + push(@stack, $f); + ($volume, $dir, $f) = File::Spec->splitpath($dir); + $dir = File::Spec->canonpath($dir); + + if (opendir(my $d, $dir)) { + foreach my $name (readdir($d)) { + if ($name =~ /^$f$/i) { + push(@stack, $name); + last OUTER; + } + } + } +} +my $path = $dir; +-d $dir or die("$dir is not a directory"); +while (1 < scalar(@stack)) { + my $f = pop(@stack); + opendir(my $d, $path) or die("cannot read dir $path"); + foreach my $name (readdir($d)) { + if ($name =~ /^$f$/i) { + $path = File::Spec->catdir($path, $name); + last; + } + } +} + +die("I can't handle that path munging: $file") unless (-d $path); +die("I don't seem to have a filename") unless 1 >= scalar(@stack); +$f = pop(@stack) + if @stack; # remaining element should be the filename we are looking for + +my $annotated = File::Spec->catfile($path, $f . ".annotated"); +opendir my $d, $path or die("cannot read $path"); +foreach my $name (readdir($d)) { + if ($name =~ /^$f\.annotated$/i) { + $annotated = File::Spec->catfile($path, $name); + } elsif ($name =~ /^$f$/i) { # case insensitive match + $file = File::Spec->catfile($path, $name); + } +} + +my $now = DateTime->now(); + +if (open(HANDLE, '<', $annotated)) { + while (my $line = ) { + chomp $line; + $line =~ s/\r//g; # remove CR from line-end + my ($commit, $who, $days, $text) = split(/\|/, $line, 4); + my $duration = DateTime::Duration->new(days => $days); + my $date = $now - $duration; + + printf("%s|%s|%s|%s\n", $commit, $who, $date, $text); + } + close(HANDLE) or die("unable to close $annotated: $!"); +} elsif (open(HANDLE, $file)) { + my $mtime = get_modify_time($file); # when was the file last modified? + my $owner = + getpwuid((stat($file))[4]); # who does the filesystem think owns it? + while (my $line = ) { + chomp $line; + # Also remove CR from line-end + $line =~ s/\015$//; + printf("%s|%s|%s|%s\n", "NONE", $owner, $mtime, $line); + } + close(HANDLE) or die("unable to close $annotated: $!"); +} else { + die("unable to open $file: $!"); +} diff --git a/tests/gendiffcov/insensitive/insensitive.sh b/tests/gendiffcov/insensitive/insensitive.sh new file mode 100755 index 00000000..25024ccd --- /dev/null +++ b/tests/gendiffcov/insensitive/insensitive.sh @@ -0,0 +1,215 @@ +#!/bin/bash +set +x + +source ../../common.tst + +rm -f *.cpp *.gcno *.gcda a.out *.info *.info.gz diff.txt *.log *.err *.json dumper* *.annotated *.log TEST.cpp TeSt.cpp +rm -rf ./baseline ./current ./differential* ./cover_db + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type "${CXX}" >/dev/null 2>&1 ; then + echo "Missing tool: $CXX" >&2 + exit 2 +fi + +ANNOTATE=${SCRIPT_DIR}/p4annotate + +if [ ! -f $ANNOTATE ] ; then + echo "annotate '$ANNOTATE' not found" + exit 1 +fi + +#PARALLEL='' +#PROFILE="'' + +LCOV_OPTS="$EXTRA_GCOV_OPTS --branch-coverage --version-script `pwd`/version.sh $PARALLEL $PROFILE" +DIFFCOV_OPTS="--function-coverage --branch-coverage --demangle-cpp --frame --prefix $PARENT --version-script `pwd`/version.sh $PROFILE $PARALLEL" + + +echo * + +# filename was all upper case +ln -s ../simple/simple.cpp TEST.cpp +${CXX} --coverage TEST.cpp +./a.out + +echo `which gcov` +echo `which lcov` + +# old gcc version generates inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" +fi + +echo lcov $LCOV_OPTS --capture --directory . --output-file baseline.info $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file baseline.info --no-external $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +gzip -c baseline.info > baseline.info.gz + +# newer versions of gcc generate coverage data with full paths to sources +# in '.' - whereas older versions have relative paths. +# In case of relative paths, need some additional genhtml flags to make +# tests run the same way +grep './TEST.cpp' baseline.info +if [ 0 == $? ] ; then + # found - need some flags + GENHTML_PORT='--elide-path-mismatch' + LCOV_PORT='--substitute s#./#pwd/# --ignore unused' +fi + +# test merge with names that differ in case +# ignore 'source' error when we try to open the file (for filtering) - because +# our filesystem is not actually case insensitive. +sed -e 's/TEST.cpp/test.cpp/g' < baseline.info > baseline2.info +$COVER $LCOV_TOOL $LCOV_OPTS --output merge.info -a baseline.info -a baseline2.info --ignore source +if [ 0 != $? ] ; then + echo "ERROR: merge with mismatched case did not fail" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +COUNT=`grep -c SF: merge.info` +if [ $COUNT != '2' ] ; then + echo "ERROR: expected 2 files found $COUNT" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS --rc case_insensitive=1 --output merge2.info -a baseline.info -a baseline2.info --ignore source +if [ 0 != $? ] ; then + echo "ERROR: ignore error case insensitive merge failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +COUNT=`grep -c SF: merge2.info` +if [ $COUNT != '1' ] ; then + echo "ERROR: expected 1 file in case-insensitive result found $COUNT" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +export PWD=`pwd` +echo $PWD + +rm -f TEST.cpp *.gcno *.gcda a.out +ln -s ../simple/simple2.cpp TeSt.cpp +${CXX} --coverage -DADD_CODE -DREMOVE_CODE TeSt.cpp +./a.out +echo lcov $LCOV_OPTS --capture --directory . --output-file current.info $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file current.info $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture TeSt failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# udiff file has yet a different case... + +( cd ../simple ; diff -u simple.cpp simple2.cpp ) | sed -e "s|simple2*\.cpp|$ROOT/tEsT.cpp|g" > diff.txt + +# and put yet another different case in the annotate file name +ln -s ../simple/simple2.cpp.annotated TEst.cpp.annotated + +# check that this works with test names +# need to not do the exiistence callback because the 'insensitive' name +# won't be found but the version-check in the .info file already contains +# a value - so we would get a version check error +echo genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode -o differential ./current.info --rc case_insensitive=1 --ignore-annotate,source $IGNORE --rc check_existence_before_callback=0 --ignore inconsistent +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode -o differential ./current.info --rc case_insensitive=1 $GENHTML_PORT --ignore annotate,source $IGNORE --rc check_existence_before_callback=0 --ignore inconsistent +if [ 0 != $? ] ; then + echo "ERROR: genhtml differential failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# check warning +echo lcov $LCOV_OPTS --capture --directory . --output-file current.info --substitute 's/test/TEST/g' $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file current.info --substitute 's/test\b/TEST/' --rc case_insensitive=1 --ignore unused,source $IGNORE 2>&1 | tee warn.log +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture TeSt failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "does not seem to be case insensitive" warn.log +if [ 0 != $? ] ; then + echo "did not find expected warning message in warn.log" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +rm -f TeSt.cpp + +# check annotation failure message... +# check that this works with test names +echo genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script $ANNOTATE --show-owners all --show-noncode -o differential2 ./current.info --ignore source $IGNORE --rc check_existence_before_callback=0 +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script $ANNOTATE --show-owners all --show-noncode -o differential2 ./current.info $GENHTML_PORT --ignore source $IGNORE --rc check_existence_before_callback=0 2>&1 | tee fail.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: expected annotation error but didn't find" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -i -E "Error: \(annotate\) annotate command failed: .*non-zero exit status" fail.log +if [ 0 != $? ] ; then + echo "did not find expected annotate error message in fail.log" + exit 1 +fi + +# just ignore the version check error this time.. +echo genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script $ANNOTATATE --show-owners all --show-noncode -o differential3 ./current.info --ignore-source,annotate,version $IGNORE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script $ANNOTATE --show-owners all --show-noncode -o differential3 ./current.info $GENHTML_PORT --ignore source,annotate,version $IGNORE 2>&1 | tee fail2.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: expected synthesize error but didn't find" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -i -E "Warning: \(annotate\).* non-zero exit status" fail2.log +if [ 0 != $? ] ; then + echo "did not find expected annotate warning message in fail2.log" + exit 1 +fi +grep "is not readable or doesn't exist" fail2.log +if [ 0 != $? ] ; then + echo "did not find expected existence error message in fail2.log" + exit 1 +fi + +echo genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script $ANNOTATATE --show-owners all --show-noncode -o differential4 ./current.info --ignore-source,annotate,version --synthesize $IGNORE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script $ANNOTATE --show-owners all --show-noncode -o differential4 ./current.info $GENHTML_PORT --ignore source,annotate,version --synthesize $IGNORE 2>&1 | tee fail3.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: unexpected synthesize error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "cannot read .+synthesizing fake content" fail3.log +if [ 0 != $? ] ; then + echo "did not find expected annotate warning message in fail3.log" + exit 1 +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ 0 != $LOCAL_COVERAGE ]; then + cover +fi diff --git a/tests/gendiffcov/insensitive/version.sh b/tests/gendiffcov/insensitive/version.sh new file mode 100755 index 00000000..a12167db --- /dev/null +++ b/tests/gendiffcov/insensitive/version.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env perl + +#just return constant value for the moment + +print("1\n"); +exit(0); diff --git a/tests/gendiffcov/simple/Makefile b/tests/gendiffcov/simple/Makefile new file mode 100644 index 00000000..0b007fa9 --- /dev/null +++ b/tests/gendiffcov/simple/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := script.sh + +clean: + $(shell ./script.sh --clean) diff --git a/tests/gendiffcov/simple/annotate.sh b/tests/gendiffcov/simple/annotate.sh new file mode 100755 index 00000000..35c59ebf --- /dev/null +++ b/tests/gendiffcov/simple/annotate.sh @@ -0,0 +1,45 @@ +#!/bin/env perl + +use strict; +use warnings; +use DateTime; +use POSIX qw(strftime); + +sub get_modify_time($) +{ + my $filename = shift; + my @stat = stat $filename; + my $tz = strftime("%z", localtime($stat[9])); + $tz =~ s/([0-9][0-9])$/:$1/; + return strftime("%Y-%m-%dT%H:%M:%S", localtime($stat[9])) . $tz; +} + +my $file = $ARGV[0]; +my $annotated = $file . ".annotated"; +my $now = DateTime->now(); + +if (open(HANDLE, '<', $annotated)) { + while (my $line = ) { + chomp $line; + $line =~ s/\r//g; # remove CR from line-end + my ($commit, $who, $days, $text) = split(/\|/, $line, 4); + my $duration = DateTime::Duration->new(days => $days); + my $date = $now - $duration; + + printf("%s|%s|%s|%s\n", $commit, $who, $date, $text); + } + close(HANDLE) or die("unable to close $annotated: $!"); +} elsif (open(HANDLE, $file)) { + my $mtime = get_modify_time($file); # when was the file last modified? + my $owner = + getpwuid((stat($file))[4]); # who does the filesystem think owns it? + while (my $line = ) { + chomp $line; + # Also remove CR from line-end + $line =~ s/\015$//; + printf("%s|%s|%s|%s\n", "NONE", $owner, $mtime, $line); + } + close(HANDLE) or die("unable to close $annotated: $!"); +} else { + die("unable to open $file: $!"); +} diff --git a/tests/gendiffcov/simple/script.sh b/tests/gendiffcov/simple/script.sh new file mode 100755 index 00000000..f86d6dff --- /dev/null +++ b/tests/gendiffcov/simple/script.sh @@ -0,0 +1,1555 @@ +#!/bin/bash +set +x + +source ../../common.tst + +rm -f test.cpp *.gcno *.gcda a.out *.info *.info.gz diff.txt diff_r.txt diff_broken.txt *.log *.err *.json dumper* results.xlsx annotate.{cpp,exe} c d ./cover_db_py names.data linked.cpp linked_diff.txt *.msg +rm -rf ./baseline ./current ./differential* ./reverse ./diff_no_baseline ./no_baseline ./no_annotation ./no_owners differential_nobranch reverse_nobranch baseline-filter* noncode_differential* broken mismatchPath elidePath ./cover_db ./criteria* ./mismatched ./navigation differential_prop proportion ./annotate ./current-* ./current_prefix* select select2 html_report ./usage ./errOut ./noNames no_source linked linked_err linked_elide linked_dir failUnder expect_err expect recategorize + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type "${CXX}" >/dev/null 2>&1 ; then + echo "Missing tool: $CXX" >&2 + exit 2 +fi +1 +if ! python3 -c "import xlsxwriter" >/dev/null 2>&1 ; then + echo "Missing python module: xlsxwriter" >&2 + exit 2 +fi + +echo * + +CRITERIA=${SCRIPT_DIR}/criteria +SELECT=${SCRIPT_DIR}/select.pm + +#PARALLEL='' +#PROFILE="'' + +echo "COVER = '$COVER'" +echo "COVER_DB = '$COVER_DB'" +echo "PYCOVER = '$PYCOVER'" +echo "PYCOV_DB = '$PYCOV_DB'" + +BASE_OPTS="--branch-coverage $PARALLEL $PROFILE" + +echo `which gcov` +echo `which lcov` +# old version of gcc has inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent,category" +fi +if [ "${VER[0]}" -lt 9 ] ; then + DERIVE='--rc derive_function_end_line=1' +elif [ "${VER[0]}" -ge 14 ] ; then + ENABLE_MCDC=1 + BASE_OPTS="$BASE_OPTS --mcdc" + # enable MCDC + COVERAGE_OPTS="-fcondition-coverage" +fi +LCOV_OPTS="$EXTRA_GCOV_OPTS $BASE_OPTS --version-script $GET_VERSION $MD5_OPT --version-script --allow-missing" +DIFFCOV_NOFRAME_OPTS="$BASE_OPTS --demangle-cpp --prefix $PARENT --version-script $GET_VERSION $MD5_OPT --version-script --allow-missing" +#DIFFCOV_OPTS="--function-coverage --branch-coverage --demangle-cpp --frame" +#DIFFCOV_OPTS='--function-coverage --branch-coverage --demangle-cpp' +DIFFCOV_OPTS="$DIFFCOV_NOFRAME_OPTS --frame" + +status=0 +cp simple.cpp test.cpp +${CXX} --coverage $COVERAGE_OPTS test.cpp +./a.out + + + +echo lcov $LCOV_OPTS --capture --directory . --output-file baseline.info $IGNORE --memory 20 +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file baseline.info $IGNORE --comment "this is the baseline" --memory 20 +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +cp baseline.info baseline_orig.info +# make the version number look different so the new diff file +# consistency check will pass +sed -i -E 's/VER:#1/VER:#2/' baseline.info +gzip -c baseline.info > baseline.info.gz + +# check that we wrote the comment that was expected... +head -1 baseline.info | grep -E '^#.+ the baseline$' +if [ 0 != $? ] ; then + echo "ERROR: didn't write comment into capture" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# newer versions of gcc generate coverage data with full paths to sources +# in '.' - whereas older versions have relative paths. +# In case of relative paths, need some additional genhtml flags to make +# tests run the same way +grep './test.cpp' baseline.info +if [ 0 == $? ] ; then + # found - need some flags + GENHTML_PORT='--elide-path-mismatch' + LCOV_PORT='--substitute s#./#pwd/# --ignore unused' +fi + +echo lcov $LCOV_OPTS --capture --directory . --output-file baseline_name.info --test-name myTest $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file baseline_name.info --test-name myTest $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture with namefailed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# make the version number look different so the new diff file +# consistency check will pass +sed -i -E 's/VER:#1/VER:#2/' baseline_name.info + +# test merge with differing version +# - sed, to make sure versions look different +sed -e 's/VER:/VER:x/g' -e 's/ md5:/ md5:0/g' < baseline.info > baseline2.info +$COVER $LCOV_TOOL $LCOV_OPTS --output merge.info -a baseline.info -a baseline2.info $IGNORE +if [ 0 == $? ] ; then + echo "ERROR: merge with mismatched version did not fail" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +$COVER $LCOV_TOOL $LCOV_OPTS --ignore version --output merge2.info -a baseline.info -a baseline2.info $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: ignore error merge with mismatched version failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# test filter with differing version +$COVER $LCOV_TOOL $LCOV_OPTS --output filt.info --filter branch,line -a baseline2.info $IGNORE +if [ 0 == $? ] ; then + echo "ERROR: filter with mismatched version did not fail 2" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +$COVER $LCOV_TOOL $LCOV_OPTS --output filt.info --filter branch,line -a baseline2.info $IGNORE --ignore version +if [ 0 != $? ] ; then + echo "ERROR: ignore error filter with mismatched version failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# run again with version script options passed in string +# test filter with differing version +$COVER $LCOV_TOOL $EXTRA_GCOV_OPTS $BASE_OPTS --version-script "$GET_VERSION_EXE --md5 --allow-missing" --output filt2.info --filter branch,line -a baseline2.info $IGNORE +if [ 0 == $? ] ; then + echo "ERROR: filter with mismatched version did not fail 2" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ -e filt2.info ] ; then + echo "ERROR: filter failed by still produced result" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +$COVER $LCOV_TOOL $EXTRA_GCOV_OPTS $BASE_OPTS --version-script "$GET_VERSION_EXE --md5 --allow-missing" --output filt2.info --filter branch,line -a baseline2.info $IGNORE --ignore version +if [ 0 != $? ] ; then + echo "ERROR: ignore error filter with combined opts and mismatched version failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +diff filt.info filt2.info +if [ 0 != $? ] ; then + echo "ERROR: string and separate args produced different result" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# test the 'fail under' flag +echo $LCOV_TOOL $LCOV_OPTS --output failUnder.info $IGNORE --capture -d . --ignore version --fail-under-lines 70 +$COVER $LCOV_TOOL $LCOV_OPTS --output failUnder.info $IGNORE --capture -d . --ignore version --fail-under-lines 70 +if [ 0 == $? ] ; then + echo "ERROR: did not fail with low coverage" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ ! -f failUnder.info ] ; then + echo "ERROR: did not write info file when failing" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +echo genhtml $DIFFCOV_OPTS $IGNORE failUnder.info --output-directory ./failUnder --fail-under-lines 70 +$COVER $GENHTML_TOOL $DIFFCOV_OPTS $IGNORE failUnder.info --output-directory ./failUnder --fail-under-lines 70 +if [ 0 == $? ] ; then + echo "ERROR: genhtml did not fail with low coverage" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ ! -f failUnder/index.html ] ; then + echo "ERROR: did not write HTML when failing" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# run genhtml with mismatched version +echo genhtml $DIFFCOV_OPTS baseline2.info --output-directory ./mismatched +$COVER $GENHTML_TOOL $DIFFCOV_OPTS baseline2.info --output-directory ./mismatched +if [ 0 == $? ] ; then + echo "ERROR: genhtml with mismatched baseline did not fail" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo lcov $LCOV_OPTS --capture --directory . --output-file baseline_nobranch.info $IGNORE --rc memory=1024 +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file baseline_nobranch.info $IGNORE --rc memory=1024 +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture (2) failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# make the version number look different so the new diff file +# consistency check will pass +sed -i -E 's/VER:#1/VER:#2/' baseline_nobranch.info +gzip -c baseline_nobranch.info > baseline_nobranch.info.gz +#genhtml baseline.info --output-directory ./baseline + +echo genhtml $DIFFCOV_OPTS baseline_orig.info --output-directory ./baseline $IGNORE --rc memory_percentage=50 --serialize ./baseline/coverage.dat +$COVER $GENHTML_TOOL $DIFFCOV_OPTS baseline_orig.info --output-directory ./baseline --save $IGNORE --rc memory_percentage=50 --serialize ./baseline/coverage.dat +if [ 0 != $? ] ; then + echo "ERROR: genhtml baseline failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ ! -f ./baseline/coverage.dat ] ; then + echo "ERROR: no serialized data found" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# expect not to see differential categories... + +echo lcov $LCOV_OPTS --filter branch,line --capture --directory . --output-file baseline-filter.info $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --filter branch,line --capture --directory . --output-file baseline-filter.info $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture (3) failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + +fi +gzip -c baseline-filter.info > baseline-filter.info.gz +#genhtml baseline.info --output-directory ./baseline +echo genhtml $DIFFCOV_OPTS baseline-filter.info --output-directory ./baseline-filter $IGNORE --missed +$COVER $GENHTML_TOOL $DIFFCOV_OPTS baseline-filter.info --output-directory ./baseline-filter $IGNORE --missed +if [ 0 != $? ] ; then + echo "ERROR: genhtml baseline-filter failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +#genhtml baseline.info --dark --output-directory ./baseline +echo genhtml $DIFFCOV_OPTS --dark baseline-filter.info --output-directory ./baseline-filter-dark $IGNORE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --dark baseline-filter.info --output-directory ./baseline-filter-dark $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: genhtml baseline-filter-dark failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +export PWD=`pwd` +echo $PWD + +rm -f test.cpp test.gcno test.gcda a.out +ln -s simple2.cpp test.cpp +${CXX} --coverage $COVERAGE_OPTS -DADD_CODE -DREMOVE_CODE test.cpp +./a.out +echo lcov $LCOV_OPTS --capture --directory . --output-file current.info $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file current.info $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture (4) failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +gzip -c current.info > current.info.gz + +echo lcov $LCOV_OPTS --capture --directory . --output-file current_name.info.gz --test-name myTest $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file current_name.info.gz --test-name myTest $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture (name) failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo '' > names.data +echo -o noNames $DIFFCOV_OPTS $IGNORE --show-details --description names.data current_name.info.gz +$COVER $GENHTML_TOOL -o noNames $DIFFCOV_OPTS $IGNORE --show-details --description names.data current_name.info.gz +if [ 0 == $? ] ; then + echo "ERROR: expected fail due to missing descriptions - but passed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +echo "TD: out of sequence" > names.data +echo genhtml -o noNames $DIFFCOV_OPTS $IGNORE --show-details --description names.data current_name.info.gz +$COVER $GENHTML_TOOL -o noNames $DIFFCOV_OPTS $IGNORE --show-details --description names.data current_name.info.gz +if [ 0 == $? ] ; then + echo "ERROR: expected fail due to invalid sequence - but passed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# check that vanilla, flat, hierarchical work with and without prefix +cat > names.data < c + ls current_prefix$mode > d + diff c d + if [ 0 != $? ] ; then + echo "ERROR: diff current $mode content differs" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +diff -u simple.cpp simple2.cpp | sed -e "s|simple2*\.cpp|$ROOT/test.cpp|g" > diff.txt + +# change the default annotation tooltip so grep commands looking for +# owner table entries doesn't match accidentally +# No spaces to avoid escaping quote substitutions +# No frames - so directory table link to first TLA in file are shown +POPUP='--rc genhtml_annotate_tooltip=mytooltip' +for opt in "" --dark-mode --flat ; do + outDir=./noncode_differential$opt + echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_NOFRAME_OPTS $opt --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --ignore-errors source --simplified-colors -o $outDir ./current.info.gz $IGNORE $POPUP + $COVER $GENHTML_TOOL $DIFFCOV_NOFRAME_OPTS $opt --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --ignore-errors source --simplified-colors -o $outDir ./current.info.gz $GENHTML_PORT --save $IGNORE $POPUP + if [ 0 != $? ] ; then + echo "ERROR: genhtml $outdir failed (1)" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + #look for navigation links in index.html files + if [ -f $outDir/simple/index.html ] ; then + indexDir=$outDir/simple + else + # flat view - so the navigation links should be at top level + indexDir=$outDir + fi + for f in $indexDir/index.html ; do + grep -E 'href=.*#L[0-9]+.*Go to first ' $f + if [ 0 != $? ] ; then + status=1 + echo "ERROR: no navigation links in $f" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + + # expect to see non-code owners 'rupert.psmith' and 'pelham.wodehouse' in file annotations + FILE=`find $outDir -name test.cpp.gcov.html` + for owner in rupert.psmith pelham.wodehouse ; do + grep $owner $FILE + if [ 0 != $? ] ; then + echo "ERROR: did not find $owner in $outDir annotations" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + if [ "$opt"x == '--flat'x ] ; then + + # flat view don't expect to see index.html in subdir + if [ -e $outDir/simple/index.html ] ; then + echo "ERROR: --flat should not write subdir index in $outDir" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # expect to see path to source file in the indices + for f in $outDir/index*.html ; do + grep "simple/test.cpp" $f + if [ 0 != $? ] ; then + echo "ERROR: expected to see path in $f" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + fi + + outDir=./differential_subset$opt + echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_NOFRAME_OPTS $opt --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --simplified-colors -o $outDir ./current.info.gz $IGNORE $POPUP --rc truncate_owner_table=top,directory --rc owner_table_entries=2 --include '*simple*' + $COVER $GENHTML_TOOL $DIFFCOV_NOFRAME_OPTS $opt --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --ignore-errors source --simplified-colors -o $outDir ./current.info.gz $GENHTML_PORT --save $IGNORE $POPUP --rc truncate_owner_table=top,directory --include '*simple*' --rc owner_table_entries=2 + if [ 0 != $? ] ; then + echo "ERROR: genhtml subset $outDir failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # expect to see owners 'henry.cox' and 'roderick.glossop' + # but not augustus.finknottle - who should have been truncated + OUT='augustus.finknottle' + FILES=$outDir/index.html + if [ -d $outDir/simiple/index.html ] ; then + FILES="$FILES $outDir/simiple/index.html" + fi + for FILE in $FILES ; do + for owner in henry.cox roderick.glossop ; do + grep $owner $FILE + if [ 0 != $? ] ; then + echo "ERROR: did not find $owner in $outDir $FILE annotations" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + # expect to see note about truncation in the table view + grep '2 authors truncated' $FILE + if [ 0 != $? ] ; then + echo "ERROR: did not find truncation count in $FILE" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + for owner in augustus.finknottle ; do + grep $owner $FILE + if [ 0 == $? ] ; then + echo "ERROR: unexpectedly found $owner in $outDir $FILEannotations" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + done # for each index file + +done + + +# check that this works with test names +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline_name.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --ignore-errors source --simplified-colors -o differential_named ./current_name.info.gz $IGNORE --description names.data --serialize differential_named/coverage.dat +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline_name.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --ignore-errors source --simplified-colors -o differential_named ./current_name.info.gz $GENHTML_PORT $IGNORE --description names.data --serialize differential_named/coverage.dat +if [ 0 != $? ] ; then + echo "ERROR: genhtml differential testname failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# test --build-dir option, using linked build +cp test.cpp linked.cpp +mkdir -p linked/build +(cd linked/build ; ln -s ../../linked.cpp ) +for f in baseline current ; do + cat ${f}.info | sed -e 's/test.cpp/linked\/build\/linked.cpp/' > linked_${f}.info +done +cat diff.txt | sed -e s/test.cpp/linked.cpp/ > linked_diff.txt + +# note: ignore version mismatch because copying file changed the +# date - and so will cause a mismatch +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./linked_baseline.info --diff-file linked_diff.txt -o linked_err ./linked_current.info $IGNORE --ignore version +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./linked_baseline.info --diff-file linked_diff.txt -o linked_err ./linked_current.info $IGNORE --ignore version 2>&1 | tee linked.log +# should fail to find source files +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "expected genhtml to fail with linked build" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# check for diff file inconsistency message +grep -E 'version changed from .+ but file not found in' linked.log +if [ 0 != $? ] ; then + echo "failed to find expected diff consistency message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# run again - skipping that message - expect to hit path mismatch +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./linked_baseline.info --diff-file linked_diff.txt -o linked_err ./linked_current.info $IGNORE --ignore version,inconsistent +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./linked_baseline.info --diff-file linked_diff.txt -o linked_err ./linked_current.info $IGNORE --ignore version,inconsistent 2>&1 | tee linked2.log +# should fail to find source files +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "expected genhtml to fail with linked build (2)" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# check error message +grep "possible path inconsistency" linked2.log +if [ 0 != $? ] ; then + echo "failed to find expected mismatch message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# run again eliding mismatches.. +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./linked_baseline.info --diff-file linked_diff.txt -o linked_elide ./linked_current.info $IGNORE --ignore version,inconsistent --elide-path +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./linked_baseline.info --diff-file linked_diff.txt -o linked_elide ./linked_current.info $IGNORE --elide-path --ignore version,inconsistent +# should pass +if [ 0 != $? ] ; then + echo "expected genhtml --elide to pass" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ ! -f linked_elide/simple/linked/build/linked.cpp.gcov.html ] ; then + echo "expected linked/elide output not found" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# run again with build dir +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./linked_baseline.info --diff-file linked_diff.txt -o linked_dir ./linked_current.info $IGNORE --build-dir linked --ignore version,inconsistent --rc scope_regexp=linked +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./linked_baseline.info --diff-file linked_diff.txt -o linked_dir ./linked_current.info $IGNORE --build-dir linked --ignore version,inconsistent --rc scope_regexp=linked +# should pass +if [ 0 != $? ] ; then + echo "expected genhtml --build-dir to pass" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ ! -f linked_elide/simple/linked/build/linked.cpp.gcov.html ] ; then + echo "expected linked/elide output not found" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# run with several different combinations of options - and see +# if they do what we expect +TEST_OPTS=$DIFFCOV_OPTS +EXT="" +for opt in "" "--show-details" "--hier"; do + + for o in "" $opt ; do + OPTS="$TEST_OPTS $o" + outdir=./differential${EXT}${o} + echo ${LCOV_HOME}/bin/genhtml $OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source -o $outdir ./current.info $IGNORE $POPUP + $COVER ${GENHTML_TOOL} $OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source -o $outdir ./current.info $GENHTML_PORT $IGNORE $POPUP + if [ 0 != $? ] ; then + echo "ERROR: genhtml $outdir failed (2)" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + if [[ $OPTS =~ "show-details" ]] ; then + found=0 + else + found=1 + fi + grep "show details" $outdir/simple/index.html + # expect to find the string (0 return val) if flag is present + if [ $found != $? ] ;then + echo "ERROR: '--show-details' mismatch in $outdir" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + if [[ $OPTS =~ "hier" ]] ; then + # we don't expect a hierarchical path - grep return code is nonzero + code=0 + else + code=1 + fi + # look for full path name (starting from '/') in the index.html file.. + # we aren't sure where gcc is installed - so we aren't sure what + # path to look for + # However - some compiler versions (e.g., gcc/10) don't find any + # coverage info in the system header files, so there is no + # hierarchical entry in the output HTML + COUNT=`grep -c index.html\" $outdir/index.html` + if [ $COUNT != 1 ] ; then + # look for at least 2 directory elements in the path name + # name might include 'c++' + grep -E '[a-zA-Z0-9_.-+]+/[a-zA-Z0-9_.-+]+/index.html\"[^>]*>' $outdir/index.html + # expect to find the string (0 return val) if flag is NOT present + if [ $code == $? ] ; then + echo "ERROR: '--hierarchical' path mismatch in $outdir" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + else + echo "only one directory in output" + fi + + # expect to not to see non-code owners 'rupert.psmith' and 'pelham.wodehose' in file annotations + FILE=`find $outdir -name test.cpp.gcov.html` + for owner in rupert.psmith pelham.wodehose ; do + grep $owner $FILE + if [ 1 != $? ] ;then + echo "ERROR: found $owner in $outdir annotations" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + # expect to see augustus.finknottle in owner table (100% coverage) + for owner in augustus.finknottle ; do + grep $owner $outdir/index.html + if [ 0 != $? ] ;then + echo "ERROR: did not find $owner in $outdir owner summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + for summary in Branch Line ; do + grep "$summary coverage" $outdir/index.html + if [ 0 != $? ] ;then + echo "ERROR: did not find $summary in $outdir summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + done + TEST_OPTS="$TEST_OPTS $opt" + EXT=${EXT}${opt} +done + + +echo genhtml $DIFFCOV_OPTS --no-branch-coverage --baseline-file ./baseline_nobranch.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners --ignore-errors source -o ./differential_nobranch ./current.info $IGNORE $POPUP +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --no-branch-coverage --baseline-file ./baseline_nobranch.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners --ignore-errors source -o ./differential_nobranch ./current.info $GENHTML_PORT $IGNORE $POPUP +if [ 0 != $? ] ; then + echo "ERROR: genhtml differential_nobranch failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# should not be a branch table +# expect not to find 'augustus.finknottle' whose code is 100% covered in owner table +for owner in augustus.finknottle ; do + grep $owner differential_nobranch/index.html + if [ 1 != $? ] ;then + echo "ERROR: found $owner in differential_nobranch owner summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done +for summary in Branch ; do + grep "$summary coverage" differential_nobranch/index.html + if [ 1 != $? ] ;then + echo "ERROR: found $summary in differential_nobranch summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + + +# with no sourceview +echo genhtml $DIFFCOV_OPTS --no-sourceview -branch-coverage --baseline-file ./baseline_nobranch.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners --ignore-errors source -o ./differential_nosource ./current.info $IGNORE $POPUP +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --no-sourceview --branch-coverage --baseline-file ./baseline_nobranch.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners --ignore-errors source -o ./differential_nosource ./current.info $GENHTML_PORT $IGNORE $POPUP +if [ 0 != $? ] ; then + echo "ERROR: genhtml differential_nosource failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# should be no source directory.. +if [ -e differential_nosource/simple/test.cpp.gcov.html ] ; then + echo "expected no source..but found some" +fi + +# check case that we have a diff file but no baseline +echo genhtml $DIFFCOV_OPTS ./current.info --diff-file diff.txt -o ./diff_no_baseline $IGNORE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS ./current.info --diff-file diff.txt -o ./diff_no_baseline $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: genhtml diff_no_baseline failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# and the inverse difference +rm -f test.cpp +cp simple.cpp test.cpp +diff -u simple2.cpp simple.cpp | sed -e "s|simple2*\.cpp|$ROOT/test.cpp|g" > diff_r.txt + +# make the version number look different so the new diff file +# consistency check will pass +sed -E 's/VER:#1/VER:#2/' current.info > current_hacked.info + +# will get MD5 mismatch unless we have the simple.cpp and simple.cpp files +# set up in the expected places +echo genhtml $DIFFCOV_OPTS --baseline-file ./current_hacked.info --diff-file diff_r.txt -o ./reverse ./baseline_orig.info $IGNORE --ignore version +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --baseline-file ./current_hacked.info --diff-file diff_r.txt -o ./reverse ./baseline_orig.info $GENHTML_PORT $IGNORE --ignore version +if [ 0 != $? ] ; then + echo "ERROR: genhtml branch failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# just ignore the new diff file/version ID mismatch message - +# obviously, some of the inputs here are cobbled together rather than +# entirely generated by the numbers. Kind of painful to re-create everything +echo genhtml $DIFFCOV_OPTS --baseline-file ./current_hacked.info --diff-file diff_r.txt -o ./reverse_nobranch ./baseline_nobranch.info.gz $IGNORE --ignore inconsistent,version +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --baseline-file ./current_hacked.info --diff-file diff_r.txt -o ./reverse_nobranch ./baseline_nobranch.info.gz $GENHTML_PORT $IGNORE --ignore inconsistent,version +if [ 0 != $? ] ; then + echo "ERROR: genhtml reverse_nobranch failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# point to 'new' file now... +rm -f test.cpp +ln -s simple2.cpp test.cpp + +echo genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script ./annotate.sh -o ./no_owners ./current.info $IGNORE +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script ./annotate.sh -o ./no_owners ./current.info $GENHTML_PORT $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: genhtml no_owners failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# expect to not find ownership summary table... +for summary in ownership ; do + grep $summary no_owners/index.html + if [ 1 != $? ] ;then + echo "ERROR: found $summary in no_owners summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt -o ./no_annotation ./current.info $IGNORE --rc scope_regexp='test.cpp' +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff.txt -o ./no_annotation ./current.info $GENHTML_PORT $IGNORE --rc scope_regexp='test.cpp' +if [ 0 != $? ] ; then + echo "ERROR: genhtml no_annotation failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# expect to find differential TLAs - but don't expect ownership and date tables +for key in UNC LBC UIC UBC GBC GIC GNC CBC EUB ECB DUB DCB ; do + grep $key no_annotation/index.html + if [ 0 != $? ] ;then + echo "ERROR: did not find $key in no_annotation summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done +for key in "date bins" "ownership bins" ; do + grep "$key" no_annotation/index.html + if [ 1 != $? ] ; then + echo "ERROR: found $key in no_annotation summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners -o ./no_baseline ./current.info $IGNORE +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners -o ./no_baseline ./current.info $GENHTML_PORT $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: genhtml no_baseline failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# don't expect to find differential TLAs - but still expect ownership and date tables +for key in "date bins" "ownership bins" ; do + grep "$key" no_baseline/index.html + if [ 0 != $? ] ;then + echo "ERROR: did not find $key in no_baseline summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done +for key in UNC LBC UIC UBC GBC GIC GNC CBC EUB ECB DUB DCB ; do + grep $key no_baseline/index.html + if [ 1 != $? ] ;then + echo "ERROR: found $key in no_baseline summary" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + + +echo "now some error checking and issue workaround tests..." + +# - first, create a 'diff' file whose pathname is not quite right.. +sed -e "s#/simple/test#/badPath/test#g" diff.txt > diff_broken.txt + +# now run genhtml - expect to see an error: +# skip new diff/version inconsistency message +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff_broken.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --simplified-colors -o ./broken ./current.info.gz $IGNORE --ignore inconsistent +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info --diff-file diff_broken.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --simplified-colors -o ./broken ./current.info.gz $IGNORE --ignore inconsistent 2>&1 | tee err.log + +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: expected error but didn't see it" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +grep -i "Error: .* possible path inconsistency" err.log +if [ 0 != $? ] ; then + echo "ERROR: can't find expected error message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# now run genhtml - expect to see an warning: +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff_broken.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --ignore-errors path --simplified-colors -o ./mismatchPath ./current.info.gz $IGNORE --ignore inconsistent +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff_broken.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --ignore-errors path --simplified-colors -o ./mismatchPath ./current.info.gz $IGNORE --ignore inconsistent 2>&1 | tee warn.log + +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: expected warning but didn't see it" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +grep -i 'Warning: .* possible path inconsistency' warn.log +if [ 0 != $? ] ; then + echo "ERROR: can't find expected warning message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# now use the 'elide' feature to avoid the error +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff_broken.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --elide-path-mismatch --simplified-colors -o ./elidePath ./current.info.gz $IGNORE --ignore inconsistent +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff_broken.txt --annotate-script `pwd`/annotate.sh --show-owners all --show-noncode --elide-path-mismatch --simplified-colors -o ./elidePath ./current.info.gz $IGNORE --ignore inconsistent 2>&1 | tee elide.log + +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: expected success but didn't see it" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +grep "has same basename" elide.log +if [ 0 != $? ] ; then + echo "ERROR: can't find expected warning message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# test criteria-related RC override errors: +for errs in 'criteria_callback_levels=dir,a' 'criteria_callback_data=foo' ; do + echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --criteria $CRITERIA -o $outdir ./current.info --rc $errs $IGNORE + $COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --criteria $CRITERIA -o criteria ./current.info $GENHTML_PORT --rc $errs $IGNORE > criteriaErr.log 2> criteriaErr.err + if [ 0 == $? ] ; then + echo "ERROR: genhtml criteria should have failed but didn't" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + grep -E "invalid '.+' value .+ expected" criteriaErr.err + if [ 0 != $? ] ;then + echo "ERROR: 'invalid criteria option message is missing" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + + +# test 'coverage criteria' callback - both script and module +for mod in '' '.pm' ; do + # we expect to fail - and to see error message - it coverage criteria not met + # ask for date and owner data - even though the callback doesn't use it + echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --criteria $CRITERIA$mod -o criteria$mod ./current.info --rc criteria_callback_data=date,owner --rc criteria_callback_levels=top,file $IGNORE + $COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --criteria $CRITERIA$mod --rc criteria_callback_data=date,owner --rc criteria_callback_levels=top,file -o criteria$mod ./current.info $GENHTML_PORT $IGNORE > criteria$mod.log 2> criteria$mod.err + if [ 0 == $? ] ; then + echo "ERROR: genhtml criteria should have failed but didn't" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + # signoff should pass... + echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --criteria $CRITERIA$mod --criteria --signoff -o criteria_signoff$mod ./current.info --rc criteria_callback_data=date,owner --rc criteria_callback_levels=top,file $IGNORE + $COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --criteria $CRITERIA$mod --criteria --signoff --rc criteria_callback_data=date,owner --rc criteria_callback_levels=top,file -o criteria_signoff$mod ./current.info $GENHTML_PORT $IGNORE > criteria_signoff$mod.log 2> criteria_signoff$mod.err + if [ 0 != $? ] ; then + echo "ERROR: genhtml criteria signoff should have passed but didn't" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + if [[ $OPTS =~ "show-details" ]] ; then + found=0 + else + found=1 + fi + grep "Failed coverage criteria" criteria$mod.log + # expect to find the string (0 return val) if flag is present + if [ 0 != $? ] ;then + echo "ERROR: 'criteria fail message not matched" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + for suffix in '' '_signoff' ; do + if [ 'x' == "x$suffix" ] ; then + SIGNOFF_ERR=0 + else + # we don't expect to see error, if signoff + SIGNOFF_ERR=1 + fi + for l in criteria$suffix$mod.log criteria$suffix$mod.err ; do + FOUND=0 + if [[ $l =~ "err" ]] ; then + # don't expect to find message in stderr, if signoff + FOUND=$SIGNOFF_ERR + fi + grep "UNC + LBC + UIC != 0" $l + # expect to find the string (0 return val) if flag is present + if [ $FOUND != $? ] ;then + echo "ERROR: 'criteria string not matching in $l" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + done +done + + +# test 'coverage criteria' callback +# we expect to fail - and to see error message - it coverage criteria not met +# ask for date and owner data - even though the callback doesn't use it +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --criteria "$CRITERIA --signoff" -o $outdir ./current.info --rc criteria_callback_data=date,owner --rc criteria_callback_levels=top,file $IGNORE +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --criteria "$CRITERIA --signoff" --rc criteria_callback_data=date,owner --rc criteria_callback_levels=top,file -o criteria ./current.info $GENHTML_PORT $IGNORE > signoff.log 2> signoff.err +if [ 0 != $? ] ; then + echo "ERROR: genhtml criteria --signoff did not pass" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "UNC + LBC + UIC != 0" signoff.log +# expect to find the string (0 return val) if flag is present +if [ 0 != $? ] ; then + echo "ERROR: 'criteria string is missing from signoff.log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# check select script +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --select "$SELECT" --select --owner --select stanley.ukeridge current.info -o select $IGNORE --validate +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --select "$SELECT" --select --owner --select stanley.ukeridge current.info -o select $IGNORE --validate +if [ 0 != $? ] ; then + echo "ERROR: genhtml select did not pass" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +FILE=`find select -name test.cpp.gcov.html` +for owner in roderick.glossop ; do #expect to filter these guys out + grep $owner $FILE + if [ 0 == $? ] ; then + echo "ERROR: did not find $owner in select group" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done +COUNT=`grep -c 'ignored lines' $FILE` +if [ 0 != $? ] ; then + echo "ERROR: did not find elided message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ 2 != $COUNT ] ; then + echo "ERROR: did not find elided messages" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# check select script +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --select "$SELECT" --select --owner --select not.there current.info -o select2 $IGNORE --valideate +$COVER ${GENHTML_TOOL} $DIFFCOV_OPTS --baseline-file ./baseline.info.gz --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source --select "$SELECT" --select --owner --select not.there current.info -o select2 $IGNORE --validate +if [ 0 != $? ] ; then + echo "ERROR: genhtml select did not pass" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +grep 'Coverage data table is empty' select2/index.html +if [ 0 != $? ] ; then + echo "ERROR: did not find elided message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +NAME=`(cd select2 ; ls *.html | grep -v -E '(cmdline|profile)')` +if [ "index.html" != "$NAME" ] ; then + echo "ERROR: expected to find only one HTML file" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# test '--show-navigation' option +# need "--ignore-unused for gcc/10.2.0 - which doesn't see code in its c++ headers +echo ${LCOV_HOME}/bin/genhtml $DIFFCOV_NOFRAME_OPTS --annotate-script `pwd`/annotate.sh --show-owners all --show-navigation -o navigation --ignore unused --exclude '*/include/c++/*' ./current.info $IGNORE +$COVER ${GENHTML_TOOL} $DIFFCOV_NOFRAME_OPTS --annotate-script `pwd`/annotate.sh --show-owners all --show-navigation -o navigation --ignore unused --exclude '*/include/c++/*' $GENHTML_PORT ./current.info $IGNORE > navigation.log 2> navigation.err + +if [ 0 != $? ] ; then + echo "ERROR: genhtml --show-navigation failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +HIT=`grep -c HIT.. navigation.log` +MISS=`grep -c MIS.. navigation.log` +if [ "$ENABLE_MCDC" != '1' ] ; then + EXPECT_MISS=2 + EXPECT_HIT=3 +else + # MC/DC included... + EXPECT_MISS=3 + EXPECT_HIT=4 +fi +if [[ $HIT != $EXPECT_HIT || $MISS != $EXPECT_MISS ]] ; then + echo "ERROR: 'navigation counts are wrong: hit $HIT != $EXPECT_HIT $MISS != $EXPECT_MISS" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +#look for navigation links in index.html files +for f in navigation/simple/index.html ; do + grep -E 'href=.*#L[0-9]+.*Go to first ' $f + if [ 0 != $? ] ; then + status=1 + echo "ERROR: no navigation links in $f" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done +# look for unexpected naming in HTML +for tla in GNC UNC ; do + grep "next $tla in" ./navigation/simple/test.cpp.gcov.html + if [ 0 == $? ] ; then + echo "ERROR: found unexpected tla $TLA in result" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done +# look for expected naming in HTML +for tla in HIT MIS ; do + grep "next $tla in" ./navigation/simple/test.cpp.gcov.html + if [ 0 != $? ] ; then + echo "ERROR: did not find expected tla $TLA in result" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + + +# test file substitution option +# need to ignore the 'missing source' error which will happen when we try to +# filter for exclude patterns - the file 'pwd/test.cpp' does not exist +echo lcov $LCOV_OPTS --capture --directory . --output-file subst.info --substitute "s#${PWD}#pwd#g" --exclude '*/iostream' --ignore source,source $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file subst.info --substitute "s#${PWD}#pwd#g" --exclude '*/iostream' --ignore source,source $LCOV_PORT $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "pwd/test.cpp" subst.info +if [ 0 != $? ] ; then + echo "ERROR: --substitute failed - not found in subst.info" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "iostream" subst.info +if [ 0 == $? ] ; then + echo "ERROR: --exclude failed - found in subst.info" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "pwd/test.cpp" baseline.info +if [ 0 == $? ] ; then + # substitution should not have happened in baseline.info + echo "ERROR: --substitute failed - found in baseline.info" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# gcc/10 doesn't see code in its c++ headers - test will fail.. +COUNT=`grep -c SF: baseline.info` +if [ $COUNT != '1' ] ; then + grep "iostream" baseline.info + if [ 0 != $? ] ; then + # exclude should not have happened in baseline.info + echo "ERROR: --exclude failed - not found in baseline.info" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +fi + +echo lcov $LCOV_OPTS --capture --directory . --output-file trivial.info --filter trivial,branch $IGNORE $DERIVE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file trivial.info --filter trivial,branch $IGNORE $DERIVE +if [ 0 == $? ] ; then + BASELINE_COUNT=`grep -c FNL: baseline.info` + TRIVIAL_COUNT=`grep -c FNL: trivial.info` + # expect lower function count: we should have removed 'static_initial... + GENERATED=`grep -c _GLOBAL__ baseline.info` + if [[ ( 0 != $GENERATED && + $TRIVIAL_COUNT -ge $BASELINE_COUNT ) || + ( 0 == $GENERATED && + $TRIVIAL_COUNT != $BASELINE_COUNT) ]] ; then + echo "ERROR: trivial filter failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +else + echo "old version of gcc doesn't support trivial function filtering because no end line" + # try to see if we can generate the data if we ignore unsupported... + $COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file trivial.info --filter trivial,branch $IGNORE $DERIVE --ignore unsupported + if [ 0 != $? ] ; then + echo "ERROR: lcov --capture trivial failed after ignoring error" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +fi + +# some error checks... +# use 'no_markers' flag so we won't see the filter message +echo $LCOV_TOOL $LCOV_OPTS --output err1.info -a current.info -a current.info --substitute "s#xyz#pwd#g" --exclude 'thisStringDoesNotMatch' --no-markers +$COVER $LCOV_TOOL $LCOV_OPTS --output err1.info -a current.info -a current.info --substitute "s#xyz#pwd#g" --exclude 'thisStringDoesNotMatch' --no-markers +if [ 0 == $? ] ; then + echo "ERROR: lcov ran despite error" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo $LCOV_TOOL $LCOV_OPTS --output unused.info -a current.info -a current.info --substitute "s#xyz#pwd#g" --exclude 'thisStringDoesNotMatch' --ignore unused --no-markers $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --output unused.info -a current.info -a current.info --substitute "s#xyz#pwd#g" --exclude 'thisStringDoesNotMatch' --ignore unused --no-markers $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov failed despite suppression" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# test function "coverpoint proportion" feature +grep -E 'FNL:[0-9]+,[0-9]+,[0-9]+' baseline.info +NO_END_LINE=$? + +if [ $NO_END_LINE == 0 ] ; then + echo "----------------------" + echo " compiler version support start/end reporting" + SUFFIX='_region' +else + echo "----------------------" + echo " compiler version DOES NOT support start/end reporting" + SUFFIX='' +fi + +echo genhtml $DIFFCOV_OPTS current.info --output-directory ./proportion --show-proportion $IGNORE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS current.info --output-directory ./proportion --show-proportion $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: genhtml current proportional failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# and then a differential report... +echo ${LCOV_HOME}/bin/genhtml $OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source -o ./differential_prop ./current.info --show-proportion $IGNORE +$COVER ${GENHTML_TOOL} $OPTS --baseline-file ./baseline.info --diff-file diff.txt --annotate-script `pwd`/annotate.sh --show-owners all --ignore-errors source -o ./differential_prop ./current.info --show-proportion $GENHTML_PORT $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: genhtml differential proportional failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# and see if we find the content we expected... +for test in proportion differential_prop ; do + for s in "unexercised branches" "unexercised lines" ; do + if [ 0 == $NO_END_LINE ] ; then + for f in "" '-c' '-b' '-l' ; do + NAME=$test/simple/test.cpp.func$f.html + grep "sort table by $s" $NAME + if [ 0 != $? ] ; then + echo "did not find col '$s' in $NAME" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + else + for f in "" '-c' ; do + NAME=$test/simple/test.cpp.func$f.html + grep "sort table by $s" $NAME + if [ 0 == $? ] ; then + echo "unexpected col '$s' in $NAME" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + fi + done +done + +# check error message if nothing annotated +cp simple.cpp annotate.cpp +${CXX} $COVERAGE_OPTS -o annotate.exe --coverage annotate.cpp +if [ 0 != $? ] ; then + echo "annotate compile failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +./annotate.exe +if [ 0 != $? ] ; then + echo "./annotate.exe failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +echo lcov $LCOV_OPTS --capture --directory . --output-file annotate.info $IGNORE --include "annotate.cpp" +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file annotate.info $IGNORE --include "annotate.cpp" +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture annotate failed" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +echo genhtml $DIFFCOV_OPTS --output-directory ./annotate --annotate $ANNOTATE annotate.info +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --output-directory ./annotate --annotate $ANNOTATE annotate.info +if [ 0 == $? ] ; then + echo "ERROR: annotate with no annotation" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +echo genhtml $DIFFCOV_OPTS --output-directory ./annotate --annotate $ANNOTATE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --output-directory ./annotate --annotate $ANNOTATE --ignore annotate annotate.info +if [ 0 != $? ] ; then + echo "ERROR: annotate with no annotation ignore did not pass" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# check nonexistent --rc option (note minus on '-memory_percentage') +echo genhtml $DIFFCOV_OPTS --output-directory ./errOut --rc -memory_percentage=50 baseline_orig.info $IGNORE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --output-directory ./errOut --rc -memory_percentage=50 baseline_orig.info $IGNORE +if [ 0 == $? ] ; then + echo "ERROR: incorrect RC option not caught" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# check --rc formatting +echo genhtml $DIFFCOV_OPTS --output-directory ./errOut --rc memory_percentage baseline_orig.info $IGNORE +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --output-directory ./errOut --rc memory_percentage baseline_orig.info $IGNORE +if [ 0 == $? ] ; then + echo "ERROR: incorrect RC option not caught" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# skip both errors +# ignore version error which might happen if timestamp is included +echo genhtml $DIFFCOV_OPTS --output-directory ./usage --rc memory_percentage --rc -memory_percentage=50 baseline_orig.info --ignore usage,version +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --output-directory ./usage --rc memory_percentage --rc percent=5 baseline_orig.info --ignore usage,version $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: didn't ignore errors" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# skip both errors - but check total message count +echo genhtml $DIFFCOV_OPTS --output-directory ./expect_err --rc memory_percentage --rc -memory_percentage=50 baseline_orig.info --ignore usage,version --expect usage:1 +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --output-directory ./expect_err --rc memory_percentage --rc percent=5 baseline_orig.info --ignore usage,version $IGNORE --expect usage:1 2>&1 | tee expect_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "ERROR: didn't catch expect count error" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "ERROR:.*count.*'usage' constraint .+ is not true" expect_err.log + +# now skip the count message too +echo genhtml $DIFFCOV_OPTS --output-directory ./expect --rc memory_percentage --rc -memory_percentage=50 baseline_orig.info --ignore usage,version,count --rc expect_message_count=usage:1 --msg-log +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --output-directory ./expect --rc memory_percentage --rc percent=5 baseline_orig.info --ignore usage,version,count $IGNORE --rc expect_message_count=usage:1 --msg-log 2>&1 | tee expect.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: didn't skip expect count error" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +grep -E "WARNING:.*count.*'usage' constraint .+ is not true" expect.msg +if [ 0 == $? ] ; then + echo "ERROR: didn't find expected msg in log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo $SPREADSHEET_TOOL -o results.xlsx `find . -name "*.json"` +eval $SPREADSHEET_TOOL -o results.xlsx `find . -name "*.json"` +if [ 0 != $? ] ; then + status=1 + echo "ERROR: spreadsheet generation failed" + exit 1 +fi + +if [ 0 == $status ] ; then + echo "Tests passed" +else + echo "Tests failed" +fi + +if [ "x$COVER" != "x" ] ; then + echo "Generating coverage report" + echo ${LCOV_HOME}/bin/py2lcov -o pycov.info --testname simple --version-script $GET_VERSION $PYCOV_DB + ${LCOV_HOME}/bin/py2lcov -o pycov.info --testname simple --version-script $GET_VERSION $PYCOV_DB + + if [ 0 != $LOCAL_COVERAGE ] ; then + cover $COVER_DB + ${LCOV_HOME}/bin/perl2lcov -o perlcov.info --testname simple --version-script $GET_VERSION $COVER_DB + ${LCOV_HOME}/bin/genhtml -o html_report perlcov.info pycov.info --branch --flat --show-navigation --show-proportion --version-script $GET_VERSION --annotate-script $ANNOTATE --parallel --ignore empty,usage + echo "see HTML report 'html_report'" + else + echo cp pycov.info $COVER_DB/spreadsheet.info + cp pycov.info $COVER_DB/spreadsheet.info + fi +fi + +exit $status diff --git a/tests/gendiffcov/simple/simple.cpp b/tests/gendiffcov/simple/simple.cpp new file mode 100644 index 00000000..4e3541f3 --- /dev/null +++ b/tests/gendiffcov/simple/simple.cpp @@ -0,0 +1,45 @@ +/** + * @file simple.cpp + * @author MTK50321 Henry Cox hcox@HC-EL6-4502226 + * @date Mon Apr 13 13:00:49 2020 + * + * @brief Test for differential coverage + */ + +#include + +int +main(int ac, char ** av) +{ + int cond = 0; + if (ac == 1) + { + std::cout << "ac == 1 - code exercised" << std::endl;// CBC +#ifdef ADD_CODE + std::cout << " this code will be GIC" << std::endl; +#endif +#ifndef REMOVE_CODE + cond = 1; // when this goes away, baseline coverage is reduced + std::cout << " this code will be ECB" << std::endl; +#endif + std::cout << " this code will be DCB" << std::endl; + } + else + { + // this code not hit in 'regress' + cond = 2; + std::cout << "ac = " << ac << std::endl;// UBC +#ifdef ADD_CODE + std::cout << " this code will be UIC" << std::endl; +#endif +#ifndef REMOVE_CODE + std::cout << " this code will be EUB" << std::endl; +#endif + std::cout << " this code will be DUB" << std::endl; + } + if (cond == 1) + std::cout << "cond == " << cond << "... code exercised" << std::endl;// LBC + else if (cond == 2) + std::cout << "cond == " << cond << "... code not exercised" << std::endl; + return 0; +} diff --git a/tests/gendiffcov/simple/simple2.cpp b/tests/gendiffcov/simple/simple2.cpp new file mode 100644 index 00000000..0defcc9b --- /dev/null +++ b/tests/gendiffcov/simple/simple2.cpp @@ -0,0 +1,45 @@ +/** + * @file simple2.cpp + * @author MTK50321 Henry Cox hcox@HC-EL6-4502226 + * @date Mon Apr 13 13:05:16 2020 + * + * @brief this is the 'new' version... + */ +#include + +int +main(int ac, char ** av) +{ + int cond = 0; + std::cout << "this is GNC" << std::endl; + if (ac == 1) + { + std::cout << "ac == 1 - code exercised" << std::endl;// CBC + cond = 3;// GNC +#ifdef ADD_CODE + std::cout << " this code will be GIC" << std::endl; +#endif +#ifndef REMOVE_CODE + cond = 1; // when this goes away, baseline coverage is reduced + std::cout << " this code will be ECB" << std::endl; +#endif + } + else + { + // this code not hit in 'regress' + cond = 2; + std::cout << "ac = " << ac << std::endl;// UBC + std::cout << "this is UNC" << std::endl; +#ifdef ADD_CODE + std::cout << " this code will be UIC" << std::endl; +#endif +#ifndef REMOVE_CODE + std::cout << " this code will be EUB" << std::endl; +#endif + } + if (cond == 1) + std::cout << "cond == " << cond << "... code exercised" << std::endl;// LBC + else if (cond == 2) + std::cout << "cond == " << cond << "... code exercised" << std::endl; + return 0; +} diff --git a/tests/gendiffcov/simple/simple2.cpp.annotated b/tests/gendiffcov/simple/simple2.cpp.annotated new file mode 100644 index 00000000..37c73bcf --- /dev/null +++ b/tests/gendiffcov/simple/simple2.cpp.annotated @@ -0,0 +1,45 @@ +1|henry.cox|274|/** +1|henry.cox|274| * @file simple2.cpp +1|henry.cox|274| * @author MTK50321 Henry Cox hcox@HC-EL6-4502226 +2|galahad.threepwood|28| * @date Mon Apr 13 13:05:16 2020 +2|galahad.threepwood|28| * +2|galahad.threepwood|28| * @brief this is the 'new' version... +2|galahad.threepwood|28| */ +3|rupert.psmith|26|#include +1|henry.cox|274| +1|henry.cox|274|int +1|henry.cox|274|main(int ac, char ** av) +1|henry.cox|274|{ +1|henry.cox|274| int cond = 0; +4|augustus.finknottle|4| std::cout << "this is GNC" << std::endl; +1|henry.cox|274| if (ac == 1) +1|henry.cox|274| { +1|henry.cox|274| std::cout << "ac == 1 - code exercised" << std::endl;// CBC +5|augustus.finknottle|13| cond = 3;// GNC +1|henry.cox|274|#ifdef ADD_CODE +1|henry.cox|274| std::cout << " this code will be GIC" << std::endl; +1|henry.cox|274|#endif +6|stanley.ukeridge|274|#ifndef REMOVE_CODE +6|stanley.ukeridge|274| cond = 1; // when this goes away, baseline coverage is reduced +6|stanley.ukeridge|274| std::cout << " this code will be ECB" << std::endl; +6|stanley.ukeridge|274|#endif +6|stanley.ukeridge|274| } +1|henry.cox|274| else +1|henry.cox|274| { +7|pelham.wodehouse|122| // this code not hit in 'regress' +1|henry.cox|274| cond = 2; +1|henry.cox|274| std::cout << "ac = " << ac << std::endl;// UBC +5|roderick.glossop|13| std::cout << "this is UNC" << std::endl; +1|henry.cox|274|#ifdef ADD_CODE +1|henry.cox|274| std::cout << " this code will be UIC" << std::endl; +1|henry.cox|274|#endif +1|henry.cox|274|#ifndef REMOVE_CODE +1|henry.cox|274| std::cout << " this code will be EUB" << std::endl; +1|henry.cox|274|#endif +1|henry.cox|274| } +1|henry.cox|274| if (cond == 1) +7|roderick.glossop|122| std::cout << "cond == " << cond << "... code exercised" << std::endl;// LBC +1|henry.cox|274| else if (cond == 2) +1|henry.cox|274| std::cout << "cond == " << cond << "... code exercised" << std::endl; +1|henry.cox|274| return 0; +1|henry.cox|274|} diff --git a/tests/gendiffcov/synthesize/Makefile b/tests/gendiffcov/synthesize/Makefile new file mode 100644 index 00000000..df2a36cb --- /dev/null +++ b/tests/gendiffcov/synthesize/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := synthesize.sh + +clean: + $(shell ./synthesize.sh --clean) diff --git a/tests/gendiffcov/synthesize/munge.pl b/tests/gendiffcov/synthesize/munge.pl new file mode 100644 index 00000000..17b5a05d --- /dev/null +++ b/tests/gendiffcov/synthesize/munge.pl @@ -0,0 +1,15 @@ +use strict; + +while (<>) { + if (/LF:(\d+)$/) { + print("DA:71,1\nDA:74,0\nLF:", $1 + 2, "\n"); + } elsif (/(BR|FN|L)H:(\d+)$/) { + print($1, "H:", $2 + 1, "\n"); + } elsif (/FNF:([0-9]+)$/) { + print("FN:71,73,outOfRangeFnc\nFNDA:1,outOfRangeFnc\nFNF:", $1 + 1, "\n"); + } elsif (/BRF:([0-9]+)$/) { + print("BRDA:71,0,0,0\nBRDA:71,0,1,1\nBRF:", $1 + 2, "\n"); + } else { + print; + } +} diff --git a/tests/gendiffcov/synthesize/munge2.pl b/tests/gendiffcov/synthesize/munge2.pl new file mode 100755 index 00000000..940916eb --- /dev/null +++ b/tests/gendiffcov/synthesize/munge2.pl @@ -0,0 +1,18 @@ +# remove a line coverpoint where branch is found to test synthesis method + +use strict; +my $filterLine; +while (<>) { + if (/LF:(\d+)$/) { + print("LF:", $1 -1, "\n"); + } elsif (/(BR|FN|L)H:(\d+)$/) { + print($1, "H:", $2 + 1, "\n"); + } elsif (/BRDA:(\d+),/) { + $filterLine = $1 unless defined($filterLine); + print; + } elsif (defined($filterLine) && /^DA:$filterLine,/) { + next; + } else { + print; + } +} diff --git a/tests/gendiffcov/synthesize/synthesize.sh b/tests/gendiffcov/synthesize/synthesize.sh new file mode 100755 index 00000000..98ae5465 --- /dev/null +++ b/tests/gendiffcov/synthesize/synthesize.sh @@ -0,0 +1,232 @@ +#!/bin/bash +set +x + +source ../../common.tst + +#PARALLEL='' +#PROFILE="'' + +rm -f *.cpp *.gcno *.gcda a.out *.info *.log *.json dumper* *.annotated annotate.sh +rm -rf ./vanilla ./annotated ./annotateErr ./annotated2 ./annotateErr2 ./range ./filter ./cover_db annotated_nofunc + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type "${CXX}" >/dev/null 2>&1 ; then + echo "Missing tool: $CXX" >&2 + exit 2 +fi + +LCOV_OPTS="$EXTRA_GCOV_OPTS --branch-coverage $PARALLEL $PROFILE" +DIFFCOV_OPTS="--function-coverage --branch-coverage --demangle-cpp --frame --prefix $PARENT $PROFILE $PARALLEL" + + +echo * + +# filename was all upper case +ln -s ../simple/simple2.cpp test.cpp +ln -s ../simple/simple2.cpp.annotated test.cpp.annotated +ln -s ../simple/annotate.sh . + +${CXX} --coverage test.cpp +./a.out + +echo `which gcov` +echo `which lcov` + +# old gcc version generates inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" +fi + +echo lcov $LCOV_OPTS --capture --directory . --output-file current.info --no-external $IGNORE +$COVER $LCOV_TOOL $LCOV_OPTS --capture --directory . --output-file current.info --no-external $IGNORE +if [ 0 != $? ] ; then + echo "ERROR: lcov --capture failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# add an out-of-range line to the coverage data +perl munge.pl current.info > munged.info +# remove a line which has a branch: create branch with no corresponding line +# LLVM seems to generate this kind of inconsistent data, at times +perl munge2.pl current.info > munged2.info + +echo genhtml $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotateErr ./munged.info +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotateErr ./munged.info 2>&1 | tee err.log +if [ 0 == ${[PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml did not return error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "ERROR.*? contains only .+? lines but coverage data refers to line" err.log +if [ 0 != $? ] ; then + echo "did not find expected range error message in err.log" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo genhtml $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotated --ignore range ./munged.info +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotated ./munged.info --ignore range 2>&1 | tee annotate.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml annotated failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# expect to see generated function labels +for label in 'BEGIN' 'END' ; do + grep -E "$label: function .+outOfRangeFnc" annotated/synthesize/test.cpp.gcov.html + if [ 0 != $? ] ; then + echo "ERROR: genhtml didn't generate function $label label" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +echo genhtml $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotated_nofunc --no-function-coverage --ignore range ./munged.info +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotated_nofunc --no-function-coverage ./munged.info --ignore range 2>&1 | tee annotate.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml annotated_nofunc failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# should not be generated.. +grep -E "function .+outOfRangeFnc" annotated_nofunc/synthesize/test.cpp.gcov.html +if [ 0 == $? ] ; then + echo "ERROR: genhtml should not have generated function label" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo genhtml $DIFFCOV_OPTS -o vanilla --ignore range ./munged.info +$COVER $GENHTML_TOOL $DIFFCOV_OPTS -o vanilla --ignore range ./munged.info 2>&1 | tee vanilla.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml vanilla failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +for log in annotate.log vanilla.log ; do + grep -E "WARNING.*? contains only .+? lines but coverage data refers to line" $log + if [ 0 != $? ] ; then + echo "did not find expected synthesize warning message in log" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +for dir in annotated vanilla ; do + grep -E "not long enough" $dir/synthesize/test.cpp.gcov.html + if [ 0 != $? ] ; then + echo "did not find expected synthesize warning message in HTML" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +echo genhtml $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotateErr2 ./munged2.info +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotateErr2 ./munged2.info 2>&1 | tee err2.log +if [ 0 == ${[PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml did not return error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "ERROR.*? has branchcov but no linecov data" err2.log +if [ 0 != $? ] ; then + echo "did not find expected inconsistent error message in err2.log" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo genhtml $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotated2 --ignore inconsistent ./munged2.info +$COVER $GENHTML_TOOL $DIFFCOV_OPTS --annotate-script `pwd`/annotate.sh --show-owners all -o annotated2 ./munged2.info --ignore inconsistent 2>&1 | tee annotate2.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: genhtml annotated failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -E "WARNING.*? has branchcov but no linecov data" annotate2.log +if [ 0 != $? ] ; then + echo "did not find expected inconsistent error message in annotate2.log" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo lcov $LCOV_OPTS --ignore range -o range.info -a ./munged.info --filter branch +$COVER $LCOV_TOOL $LCOV_OPTS --ignore range -o range.info -a ./munged.info --filter branch 2>&1 | tee range.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --ignore range failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +COUNT1=`grep -c -i "warning: .*range.* unknown.* line .* there are only" range.log` +if [ 1 != $COUNT1 ] ; then + echo "Missing expected warning: expected 1 found $COUNT1" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo lcov $LCOV_OPTS --ignore range -o range.info -a ./munged.info --filter branch --rc warn_once_per_file=0 +$COVER $LCOV_TOOL $LCOV_OPTS --ignore range -o range.info -a ./munged.info --filter branch --rc warn_once_per_file=0 --comment 'insert a comment' 2>&1 | tee range2.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --ignore range2 failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +COUNT2=`grep -c -i "warning: .*range.* unknown.* line .* there are only" range2.log` +if [ 2 != $COUNT2 ] ; then + echo "Expected 2 messages found $COUNT2" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo lcov $LCOV_OPTS -o filter.info --filter range -a ./munged.info --filter branch +$COVER $LCOV_TOOL $LCOV_OPTS -o filter.info --filter range -a ./munged.info --filter branch 2>&1 | tee filter.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "ERROR: lcov --filter range failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep -i "warning: .*range.* unknown line .* there are only" filter.log +if [ 0 == $? ] ; then + echo "Found unexpected warning" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ 0 != $LOCAL_COVERAGE ] ; then + cover $COVER_DB + $PERL2LCOV_TOOL -o perlcov.info $COVER_DB + $GENHTML_TOOL -o coverage perlcov.info +fi diff --git a/tests/genhtml/Makefile b/tests/genhtml/Makefile index e7a80ea3..686a1801 100644 --- a/tests/genhtml/Makefile +++ b/tests/genhtml/Makefile @@ -1,6 +1,12 @@ include ../common.mak -TESTS := full.sh part1.sh part2.sh target.sh zero.sh demangle.sh +# disabling some old tests because generated data is inconsistent +# (line/branch/function hit/miss stats do not match). +# Those tests have probably outlived their usefulness - so eliminating for now +# rather than enhancing the generation to become consistent +TESTS := full.sh zero.sh demangle.sh relative lambda + +DISABLED := part1.sh part2.sh target.sh clean: rm -rf *.log out_* *.tmp diff --git a/tests/genhtml/demangle.sh b/tests/genhtml/demangle.sh index 300a57d3..d07bb3b3 100755 --- a/tests/genhtml/demangle.sh +++ b/tests/genhtml/demangle.sh @@ -6,6 +6,33 @@ # genhtml_demangle_cpp_params # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + OUTDIR="out_demangle" STDOUT="demangle_stdout.log" STDERR="demangle_stderr.log" @@ -15,16 +42,16 @@ HTML="${OUTDIR}/genhtml/${SOURCE}.func.html" MYFILT="${PWD}/mycppfilt.sh" function die() { - echo "Error: $*" >&2 - exit 1 + echo "Error: $*" >&2 + exit 1 } function cleanup() { - rm -rf "${OUTDIR}" "${INFO}" "${SOURCE}" + rm -rf "${OUTDIR}" "${INFO}" "${SOURCE}" } function prepare() { - cat >"${INFO}" <"${INFO}" <${STDOUT} 2>${STDERR} - RC=$? + # Run genhtml + echo "CMDLINE: $CMDLINE" + $CMDLINE 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} + RC=$? - echo "STDOUT_START" - cat ${STDOUT} - echo "STDOUT_STOP" + echo "STDOUT_START" + cat ${STDOUT} + echo "STDOUT_STOP" - echo "STDERR_START" - cat ${STDERR} - echo "STDERR_STOP" + echo "STDERR_START" + cat ${STDERR} + echo "STDERR_STOP" - # Check exit code - [[ $RC -ne 0 ]] && die "Non-zero genhtml exit code $RC" + # Check exit code + [[ $RC -ne 0 && $KEEP_GOING != 1 ]] && die "Non-zero genhtml exit code $RC" - # Output must not contain warnings - if [[ -s ${STDERR} ]] ; then - echo "Error: Output on stderr.log:" - cat ${STDERR} - exit 1 - fi + # Output must not contain warnings + if [[ -s ${STDERR} && $COVER == '' ]] ; then + echo "Error: Output on stderr.log:" + cat ${STDERR} + exit 1 + fi - # Log function names - echo "Found function names:" - grep coverFn ${HTML} + # Log function names + echo "Found function names:" + grep coverFn ${HTML} } prepare @@ -73,44 +101,57 @@ prepare echo "Run 1: No demangling" run "" if grep -q myfunc1 ${HTML} ; then - echo "Success - found myfunc1" + echo "Success - found myfunc1" else - die "Missing function name 'myfunc1' in output" + die "Missing function name 'myfunc1' in output" fi echo echo "Run 2: Demangle using defaults" if type -P c++filt >/dev/null ; then - # Depending on environment, encoded symbols are converted to either - # myfunc2() or myfunc3() - run "--demangle-cpp" - if grep -q 'myfunc[23]()' ${HTML} ; then - echo "Success - found myfunc[23]() converted by c++filt" - else - die "Missing converted function name 'myfunc[23]()' in output" - fi + # Depending on environment, encoded symbols are converted to either + # myfunc2() or myfunc3() + run "--demangle-cpp --" + if grep -q 'myfunc[23]()' ${HTML} ; then + echo "Success - found myfunc[23]() converted by c++filt" + else + die "Missing converted function name 'myfunc[23]()' in output" + fi else - echo "Skipping - missing c++filt tool" + echo "Skipping - missing c++filt tool" fi +# need to quiet warnings because 'run' method croaks if there is +# anything in stderr +IGNORE="--ignore deprecated" echo echo "Run 3: Demangle using custom demangling tool" # mycppfilt.sh with no parameters prepends aaa to each function name -run "--demangle-cpp --rc genhtml_demangle_cpp_tool=$MYFILT" +run "--demangle-cpp --rc genhtml_demangle_cpp_tool=$MYFILT $IGNORE" if grep -q 'aaamyfunc' ${HTML} ; then - echo "Success - found myfunc prefixed by mycppfilt.sh" + echo "Success - found myfunc prefixed by mycppfilt.sh" else - die "Missing converted function name 'aaamyfunc' in output" + die "Missing converted function name 'aaamyfunc' in output" fi echo echo "Run 4: Demangle with params set" # mycppfilt.sh with parameter prepends that parameter to to each function name -run "--demangle-cpp --rc genhtml_demangle_cpp_tool=$MYFILT --rc genhtml_demangle_cpp_params='bbb'" +run "--demangle-cpp --rc genhtml_demangle_cpp_tool=$MYFILT --rc genhtml_demangle_cpp_params='bbb' $IGNORE" +if grep -q 'bbbmyfunc' ${HTML} ; then + echo "Success - found myfunc prefixed by custom prefix" +else + die "Missing converted function name 'bbbmyfunc' in output" +fi + +echo +echo "Run 5: Demangle with params set from command line" +# mycppfilt.sh with parameter prepends that parameter to to each function name +run "--demangle-cpp $MYFILT --demangle-cpp 'bbb'" if grep -q 'bbbmyfunc' ${HTML} ; then - echo "Success - found myfunc prefixed by custom prefix" + echo "Success - found myfunc prefixed by custom prefix" else - die "Missing converted function name 'bbbmyfunc' in output" + die "Missing converted function name 'bbbmyfunc' in output" fi # Success diff --git a/tests/genhtml/full.sh b/tests/genhtml/full.sh index ba71970f..b06b44f7 100755 --- a/tests/genhtml/full.sh +++ b/tests/genhtml/full.sh @@ -3,6 +3,33 @@ # Create HTML output for info files containing 100% coverage rates # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + OUTDIR="out_full" STDOUT="full_stdout.log" STDERR="full_stderr.log" @@ -10,7 +37,7 @@ STDERR="full_stderr.log" rm -rf "${OUTDIR}" # Run genhtml -$GENHTML $FULLINFO -o ${OUTDIR} >${STDOUT} 2>${STDERR} +$GENHTML $FULLINFO -o ${OUTDIR} 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? echo "STDOUT_START" @@ -22,16 +49,16 @@ cat ${STDERR} echo "STDERR_STOP" # Check exit code -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero genhtml exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero genhtml exit code $RC" + exit 1 fi # Output must not contain warnings -if [[ -s ${STDERR} ]] ; then - echo "Error: Output on stderr.log:" - cat ${STDERR} - exit 1 +if [[ -s ${STDERR} && $COVER == '' ]] ; then + echo "Error: Output on stderr.log:" + cat ${STDERR} + exit 1 fi # Output must indicate correct coverage rates @@ -40,16 +67,16 @@ check_counts "${FULLCOUNTS}" "${STDOUT}" || exit 1 # Check output directory if [[ ! -d "$OUTDIR" ]] ; then - echo "Error: Output directory was not created" - exit 1 + echo "Error: Output directory was not created" + exit 1 fi # Check output files NUM_HTML_FILES=$(find ${OUTDIR} -name \*.html | wc -l) if [[ "$NUM_HTML_FILES" -eq 0 ]] ; then - echo "Error: No HTML file was generated" - exit 1 + echo "Error: No HTML file was generated" + exit 1 fi # Success diff --git a/tests/genhtml/lambda/Makefile b/tests/genhtml/lambda/Makefile new file mode 100644 index 00000000..24f5514d --- /dev/null +++ b/tests/genhtml/lambda/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := lambda.sh + +clean: + $(shell ./lambda.sh --clean) diff --git a/tests/genhtml/lambda/lambda.cpp b/tests/genhtml/lambda/lambda.cpp new file mode 100644 index 00000000..f28737e5 --- /dev/null +++ b/tests/genhtml/lambda/lambda.cpp @@ -0,0 +1,83 @@ +// borrowed from https://en.cppreference.com/w/cpp/utility/functional/bind +#include +#include +#include +#include + +void f(int n1, int n2, int n3, const int& n4, int n5) +{ + std::cout << n1 << ' ' << n2 << ' ' << n3 << ' ' << n4 << ' ' << n5 << '\n'; +} + +int g(int n1) +{ + return n1; +} + +struct Foo +{ + void print_sum(int n1, int n2) + { + std::cout << n1 + n2 << '\n'; + } + + int data = 10; +}; + +int main() +{ + using namespace std::placeholders; // for _1, _2, _3... + + std::cout << "1) argument reordering and pass-by-reference: "; + int n = 7; + // (_1 and _2 are from std::placeholders, and represent future + // arguments that will be passed to f1) + auto f1 = std::bind(f, _2, 42, _1, std::cref(n), n); + n = 10; + f1(1, 2, 1001); // 1 is bound by _1, 2 is bound by _2, 1001 is unused + // makes a call to f(2, 42, 1, n, 7) + + std::cout << "2) achieving the same effect using a lambda: "; + n = 7; + auto lambda = [&ncref = n, n](auto a, auto b, auto /*unused*/) + { + f(b, 42, a, ncref, n); + }; + n = 10; + lambda(1, 2, 1001); // same as a call to f1(1, 2, 1001) + + std::cout << "3) nested bind subexpressions share the placeholders: "; + auto f2 = std::bind(f, _3, std::bind(g, _3), _3, 4, 5); + f2(10, 11, 12); // makes a call to f(12, g(12), 12, 4, 5); + + std::cout << "4) bind a RNG with a distribution: "; + std::default_random_engine e; + std::uniform_int_distribution<> d(0, 10); + auto rnd = std::bind(d, e); // a copy of e is stored in rnd + for (int n = 0; n < 10; ++n) + std::cout << rnd() << ' '; + std::cout << '\n'; + + std::cout << "5) bind to a pointer to member function: "; + Foo foo; + auto f3 = std::bind(&Foo::print_sum, &foo, 95, _1); + f3(5); + + std::cout << "6) bind to a mem_fn that is a pointer to member function: "; + auto ptr_to_print_sum = std::mem_fn(&Foo::print_sum); + auto f4 = std::bind(ptr_to_print_sum, &foo, 95, _1); + f4(5); + + std::cout << "7) bind to a pointer to data member: "; + auto f5 = std::bind(&Foo::data, _1); + std::cout << f5(foo) << '\n'; + + std::cout << "8) bind to a mem_fn that is a pointer to data member: "; + auto ptr_to_data = std::mem_fn(&Foo::data); + auto f6 = std::bind(ptr_to_data, _1); + std::cout << f6(foo) << '\n'; + + std::cout << "9) use smart pointers to call members of the referenced objects: "; + std::cout << f6(std::make_shared(foo)) << ' ' + << f6(std::make_unique(foo)) << '\n'; +} diff --git a/tests/genhtml/lambda/lambda.sh b/tests/genhtml/lambda/lambda.sh new file mode 100755 index 00000000..03c193bc --- /dev/null +++ b/tests/genhtml/lambda/lambda.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +# lambda function extents +set +x + +source ../../common.tst +rm -rf *.txt* *.json dumper* report lambda *.gcda *.gcno *.info + +clean_cover + +if [[ 1 == "$CLEAN_ONLY" ]] ; then + exit 0 +fi + +LCOV_OPTS="--branch $PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" + # and filter exception branches to avoid spurious differences for old compiler + FILTER='--filter branch' + + # gcc older than 5 doesn't support lambda + echo "Compiler version is too old - skipping lambda test" + exit 0 +fi + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +${CXX} -o lambda --coverage lambda.cpp -std=c++1y + +./lambda +if [ 0 != $? ] ; then + echo "Error: 'lambda' returned error code" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -o lambda.info --capture -d . --demangle --rc derive_function_end_line=0 +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +$COVER $GENHTML_TOOL $LCOV_OPTS -o report lambda.info --show-proportion +if [ 0 != $? ] ; then + echo "Error: unexpected error code from genhtml" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/genhtml/mycppfilt.sh b/tests/genhtml/mycppfilt.sh index d33595dd..af5fe494 100755 --- a/tests/genhtml/mycppfilt.sh +++ b/tests/genhtml/mycppfilt.sh @@ -15,7 +15,7 @@ else fi while read LINE ; do - echo "${PREFIX}${LINE}" + echo $LINE | perl -pe "s/^FN(DA)?:([^,]+),(.+)$/FN\1:\2,${PREFIX}\3/"; unset LINE done diff --git a/tests/genhtml/part1.sh b/tests/genhtml/part1.sh index d05e39ab..d28f6127 100755 --- a/tests/genhtml/part1.sh +++ b/tests/genhtml/part1.sh @@ -3,6 +3,33 @@ # Create HTML output for info files containing partial coverage rates # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,$COVER_DB,-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + OUTDIR="out_part1" STDOUT="part1_stdout.log" STDERR="part1_stderr.log" @@ -10,7 +37,7 @@ STDERR="part1_stderr.log" rm -rf "${OUTDIR}" # Run genhtml -$GENHTML $PART1INFO -o ${OUTDIR} >${STDOUT} 2>${STDERR} +$GENHTML $PART1INFO -o ${OUTDIR} 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? echo "STDOUT_START" @@ -22,16 +49,16 @@ cat ${STDERR} echo "STDERR_STOP" # Check exit code -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero genhtml exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero genhtml exit code $RC" + exit 1 fi # Output must not contain warnings -if [[ -s ${STDERR} ]] ; then - echo "Error: Output on stderr.log:" - cat ${STDERR} - exit 1 +if [[ -s ${STDERR} && $COVER == '' ]] ; then + echo "Error: Output on stderr.log:" + cat ${STDERR} + exit 1 fi # Output must indicate correct coverage rates @@ -40,16 +67,16 @@ check_counts "${PART1COUNTS}" "${STDOUT}" || exit 1 # Check output directory if [[ ! -d "$OUTDIR" ]] ; then - echo "Error: Output directory was not created" - exit 1 + echo "Error: Output directory was not created" + exit 1 fi # Check output files NUM_HTML_FILES=$(find ${OUTDIR} -name \*.html | wc -l) if [[ "$NUM_HTML_FILES" -eq 0 ]] ; then - echo "Error: No HTML file was generated" - exit 1 + echo "Error: No HTML file was generated" + exit 1 fi # Success diff --git a/tests/genhtml/part2.sh b/tests/genhtml/part2.sh index 1b6e8f20..73ded3b7 100755 --- a/tests/genhtml/part2.sh +++ b/tests/genhtml/part2.sh @@ -3,6 +3,33 @@ # Create HTML output for info files containing partial coverage rates # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + OUTDIR="out_part2" STDOUT="part2_stdout.log" STDERR="part2_stderr.log" @@ -10,7 +37,7 @@ STDERR="part2_stderr.log" rm -rf "${OUTDIR}" # Run genhtml -$GENHTML $PART2INFO -o ${OUTDIR} >${STDOUT} 2>${STDERR} +$GENHTML $PART2INFO -o ${OUTDIR} 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? echo "STDOUT_START" @@ -22,16 +49,16 @@ cat ${STDERR} echo "STDERR_STOP" # Check exit code -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero genhtml exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero genhtml exit code $RC" + exit 1 fi # Output must not contain warnings -if [[ -s ${STDERR} ]] ; then - echo "Error: Output on stderr.log:" - cat ${STDERR} - exit 1 +if [[ -s ${STDERR} && $COVER == '' ]] ; then + echo "Error: Output on stderr.log:" + cat ${STDERR} + exit 1 fi # Output must indicate correct coverage rates @@ -40,16 +67,16 @@ check_counts "${PART2COUNTS}" "${STDOUT}" || exit 1 # Check output directory if [[ ! -d "$OUTDIR" ]] ; then - echo "Error: Output directory was not created" - exit 1 + echo "Error: Output directory was not created" + exit 1 fi # Check output files NUM_HTML_FILES=$(find ${OUTDIR} -name \*.html | wc -l) if [[ "$NUM_HTML_FILES" -eq 0 ]] ; then - echo "Error: No HTML file was generated" - exit 1 + echo "Error: No HTML file was generated" + exit 1 fi # Success diff --git a/tests/genhtml/relative/Makefile b/tests/genhtml/relative/Makefile new file mode 100644 index 00000000..11403a22 --- /dev/null +++ b/tests/genhtml/relative/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := relative.sh + +clean: + $(shell ./relative.sh --clean) diff --git a/tests/genhtml/relative/relative.info b/tests/genhtml/relative/relative.info new file mode 100644 index 00000000..d4bb9f6b --- /dev/null +++ b/tests/genhtml/relative/relative.info @@ -0,0 +1,12 @@ +SF:lib/other_class.dart +DA:2,1 +DA:3,1 +LF:2 +LH:2 +end_of_record +SF:lib/src/sample_class.dart +DA:2,1 +DA:3,1 +LF:2 +LH:2 +end_of_record diff --git a/tests/genhtml/relative/relative.sh b/tests/genhtml/relative/relative.sh new file mode 100755 index 00000000..9979b8b6 --- /dev/null +++ b/tests/genhtml/relative/relative.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +# test relative path usage +set +x + +source ../../common.tst + +rm -rf *.txt* *.json dumper* relative + +clean_cover + +if [[ 1 == "$CLEAN_ONLY" ]] ; then + exit 0 +fi + +LCOV_OPTS="--branch $PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" + # and filter exception branches to avoid spurious differences for old compiler + FILTER='--filter branch' +fi + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +$COVER $GENHTML_TOOL $LCOV_OPTS -o relative relative.info --ignore source,source --synthesize +if [ 0 != $? ] ; then + echo "Error: unexpected error code from genhtml" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +for dir in lib src lib/src ; do + if [ -e relative/$dir/$dir ] ; then + echo "Error: unexpected duplicated path to '$dir'" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +done + +for f in lib lib/other_class.dart.gcov.html lib/src lib/src/sample_class.dart.gcov.html ; do + if [ ! -e relative/$f ] ; then + echo "Error: can't find '$f'" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +done + + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/genhtml/target.sh b/tests/genhtml/target.sh index 716b3c7f..f994adbd 100755 --- a/tests/genhtml/target.sh +++ b/tests/genhtml/target.sh @@ -4,6 +4,34 @@ # specified in mkinfo profile. # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + + * ) + break + ;; + esac +done + OUTDIR="out_target" STDOUT="target_stdout.log" STDERR="target_stderr.log" @@ -11,7 +39,7 @@ STDERR="target_stderr.log" rm -rf "${OUTDIR}" # Run genhtml -$GENHTML $TARGETINFO -o ${OUTDIR} >${STDOUT} 2>${STDERR} +$GENHTML $TARGETINFO -o ${OUTDIR} 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? echo "STDOUT_START" @@ -23,16 +51,16 @@ cat ${STDERR} echo "STDERR_STOP" # Check exit code -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero genhtml exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero genhtml exit code $RC" + exit 1 fi # Output must not contain warnings -if [[ -s ${STDERR} ]] ; then - echo "Error: Output on stderr.log:" - cat ${STDERR} - exit 1 +if [[ -s ${STDERR} && $COVER == '' ]] ; then + echo "Error: Output on stderr.log:" + cat ${STDERR} + exit 1 fi # Output must indicate correct coverage rates @@ -41,16 +69,16 @@ check_counts "${TARGETCOUNTS}" "${STDOUT}" || exit 1 # Check output directory if [[ ! -d "$OUTDIR" ]] ; then - echo "Error: Output directory was not created" - exit 1 + echo "Error: Output directory was not created" + exit 1 fi # Check output files NUM_HTML_FILES=$(find ${OUTDIR} -name \*.html | wc -l) if [[ "$NUM_HTML_FILES" -eq 0 ]] ; then - echo "Error: No HTML file was generated" - exit 1 + echo "Error: No HTML file was generated" + exit 1 fi # Success diff --git a/tests/genhtml/zero.sh b/tests/genhtml/zero.sh index 34d1ddd4..47fdc79e 100755 --- a/tests/genhtml/zero.sh +++ b/tests/genhtml/zero.sh @@ -3,6 +3,33 @@ # Create HTML output for info files containing zero coverage rates # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + OUTDIR="out_zero" STDOUT="zero_stdout.log" STDERR="zero_stderr.log" @@ -10,7 +37,7 @@ STDERR="zero_stderr.log" rm -rf "${OUTDIR}" # Run genhtml -$GENHTML $ZEROINFO -o ${OUTDIR} >${STDOUT} 2>${STDERR} +$GENHTML $ZEROINFO -o ${OUTDIR} 2> >(grep -v Devel::Cover: 1>&1 > $STDERR) >${STDOUT} RC=$? echo "STDOUT_START" @@ -22,16 +49,16 @@ cat ${STDERR} echo "STDERR_STOP" # Check exit code -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero genhtml exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero genhtml exit code $RC" + exit 1 fi # Output must not contain warnings -if [[ -s ${STDERR} ]] ; then - echo "Error: Output on stderr.log:" - cat ${STDERR} - exit 1 +if [[ -s ${STDERR} && $COVER == '' ]] ; then + echo "Error: Output on stderr.log:" + cat ${STDERR} + exit 1 fi # Output must indicate correct coverage rates @@ -40,16 +67,16 @@ check_counts "${ZEROCOUNTS}" "${STDOUT}" || exit 1 # Check output directory if [[ ! -d "$OUTDIR" ]] ; then - echo "Error: Output directory was not created" - exit 1 + echo "Error: Output directory was not created" + exit 1 fi # Check output files NUM_HTML_FILES=$(find ${OUTDIR} -name \*.html | wc -l) if [[ "$NUM_HTML_FILES" -eq 0 ]] ; then - echo "Error: No HTML file was generated" - exit 1 + echo "Error: No HTML file was generated" + exit 1 fi # Success diff --git a/tests/lcov/Makefile b/tests/lcov/Makefile index 3c056688..d0203b9d 100644 --- a/tests/lcov/Makefile +++ b/tests/lcov/Makefile @@ -1,3 +1,3 @@ include ../common.mak -TESTS := add/ diff/ misc/ summary/ +TESTS := add/ misc/ summary/ extract/ demangle/ exception/ gcov-tool/ branch/ merge/ format errs multiple follow initializer lambda mcdc diff --git a/tests/lcov/add/Makefile b/tests/lcov/add/Makefile index 3f1e869f..d9e5359c 100644 --- a/tests/lcov/add/Makefile +++ b/tests/lcov/add/Makefile @@ -1,6 +1,11 @@ include ../../common.mak -TESTS := zero.sh zero2.sh full.sh full2.sh part.sh part2.sh concatenated4.sh +# disabling some old tests because generated data is inconsistent +# (line/branch/function hit/miss stats do not match). +# Those tests have probably outlived their usefulness - so eliminating for now +# rather than enhancing the generation to become consistent +TESTS := zero.sh zero2.sh full.sh full2.sh prune.sh track.sh +DISABLED: part.sh part2.sh concatenated4.sh clean: - rm -f *.info + rm -f *.info prune prune2 prune3 track diff --git a/tests/lcov/add/concatenated4.sh b/tests/lcov/add/concatenated4.sh index b3ecf6b3..edc28695 100755 --- a/tests/lcov/add/concatenated4.sh +++ b/tests/lcov/add/concatenated4.sh @@ -8,4 +8,4 @@ cat "$TARGETINFO" "$TARGETINFO" "$TARGETINFO" "$TARGETINFO" >concatenated.info -exec ./helper.sh 0.25 "$TARGETINFO" concatenated.info +exec ./helper.sh $@ 0.25 "$TARGETINFO" concatenated.info diff --git a/tests/lcov/add/full.sh b/tests/lcov/add/full.sh index 483a12d2..7c22c1b7 100755 --- a/tests/lcov/add/full.sh +++ b/tests/lcov/add/full.sh @@ -5,4 +5,4 @@ # Add single 100% coverage file - output should be same as input # -exec ./helper.sh 1 "$FULLINFO" "$FULLINFO" +exec ./helper.sh $@ 1 "$FULLINFO" "$FULLINFO" diff --git a/tests/lcov/add/full2.sh b/tests/lcov/add/full2.sh index 07f9d49a..f1a69f9d 100755 --- a/tests/lcov/add/full2.sh +++ b/tests/lcov/add/full2.sh @@ -6,4 +6,4 @@ # be same as input # -exec ./helper.sh 0.5 "$FULLINFO" "$FULLINFO" "$FULLINFO" +exec ./helper.sh $@ 0.5 "$FULLINFO" "$FULLINFO" "$FULLINFO" diff --git a/tests/lcov/add/helper.sh b/tests/lcov/add/helper.sh index 4ff5ffeb..2eb42abb 100755 --- a/tests/lcov/add/helper.sh +++ b/tests/lcov/add/helper.sh @@ -8,18 +8,49 @@ # with multiplier. Compare against reference file. Report deviations. # +echo "helper: $@" + +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + --verbose | -v ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + + MULTI=$1 REFFILE=$2 shift 2 +echo "helper: $@" ADD= for INFO in $* ; do - ADD="$ADD -a $INFO" + ADD="$ADD -a $INFO" done if [ -z "$MULTI" -o -z "$REFFILE" -o -z "$ADD" ] ; then - echo "Usage: $0 [...]" >&2 - exit 1 + echo "Usage: $0 [...]" >&2 + exit 1 fi OUTFILE="add_"$(basename "$REFFILE") @@ -28,19 +59,22 @@ SORTFILE="norm_$OUTFILE" set -x echo "Adding files..." +echo $LCOV $ADD -o "$OUTFILE" if ! $LCOV $ADD -o "$OUTFILE" ; then - echo "Error: lcov returned with non-zero exit code $?" >&2 - exit 1 + if [ $KEEP_GOING != 1 ] ; then + echo "Error: lcov returned with non-zero exit code $?" >&2 + exit 1 + fi fi echo "Normalizing result..." if ! norminfo "$OUTFILE" "$MULTI" > "$SORTFILE" ; then - echo "Error: Normalization of lcov result file failed" >&2 - exit 1 + echo "Error: Normalization of lcov result file failed" >&2 + exit 1 fi echo "Comparing with reference..." if ! diff -u "$REFFILE" "$SORTFILE" ; then - echo "Error: Result of combination differs from reference file" >&2 - exit 1 + echo "Error: Result of combination differs from reference file" >&2 + exit 1 fi diff --git a/tests/lcov/add/part.sh b/tests/lcov/add/part.sh index b007484d..035bb1b1 100755 --- a/tests/lcov/add/part.sh +++ b/tests/lcov/add/part.sh @@ -6,4 +6,4 @@ # be same as input # -exec ./helper.sh 1 "$PART1INFO" "$PART1INFO" +exec ./helper.sh $@ 1 "$PART1INFO" "$PART1INFO" diff --git a/tests/lcov/add/part2.sh b/tests/lcov/add/part2.sh index a537dd97..3eb55f29 100755 --- a/tests/lcov/add/part2.sh +++ b/tests/lcov/add/part2.sh @@ -6,4 +6,4 @@ # should be same as target file # -exec ./helper.sh 1 "$TARGETINFO" "$PART1INFO" "$PART2INFO" +exec ./helper.sh $@ 1 "$TARGETINFO" "$PART1INFO" "$PART2INFO" diff --git a/tests/lcov/add/prune.sh b/tests/lcov/add/prune.sh new file mode 100755 index 00000000..7ad38ccd --- /dev/null +++ b/tests/lcov/add/prune.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +set +x +: ${USER:="$(id -u -n)"} + +source ../../common.tst + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +# adding zero does not change anything +$COVER $LCOV_TOOL -o prune -a $FULLINFO -a $ZEROINFO --prune +if [[ $? != 0 && $KEEP_GOING != 1 ]] ; then + echo "lcov -prune failed" + exit 1 +fi +PRUNED=`cat prune` +if [ "$PRUNED" != "$FULLINFO" ] ; then + echo "Expected '$FULLINFO' - got '$PRUNED'" + exit 1 +fi + +# expect that all the additions did something... +# note that the generated data is inconsistent: sometimes, function +# has zero hit count but some contained lines are hit +$COVER $LCOV_TOOL -o prune2 -a $PART1INFO -a $PART2INFO -a $FULLINFO --prune --ignore inconsistent +if [[ $? != 0 && $KEEP_GOING != 1 ]] ; then + echo "lcov -prune2 failed" + exit 1 +fi +PRUNED2=`cat prune2` +EXP=$(printf "$PART1INFO\n$PART2INFO\n$FULLINFO\n") +if [ "$PRUNED2" != "$EXP" ] ; then + echo "Expected 1 '$EXP' - got '$PRUNED2'" + exit 1 +fi + +# sorting the input changes order so different file is pruned (only 'full' remains) +$COVER $LCOV_TOOL -o prune3s -a $PART1INFO -a $PART2INFO -a $FULLINFO --prune --ignore inconsistent --rc sort_input=1 +if [[ $? != 0 && $KEEP_GOING != 1 ]] ; then + echo "lcov -prune2 failed" + exit 1 +fi +PRUNED3S=`cat prune3s` +EXP2=$(printf "$FULLINFO\n") +if [ "$PRUNED3S" != "$EXP2" ] ; then + echo "Expected 1 '$EXP2' - got '$PRUNED3S'" + exit 1 +fi + +# using the --sort-input flag +$COVER $LCOV_TOOL -o prune3t -a $PART1INFO -a $PART2INFO -a $FULLINFO --prune --ignore inconsistent --sort-input +if [[ $? != 0 && $KEEP_GOING != 1 ]] ; then + echo "lcov -prune2 failed" + exit 1 +fi +PRUNED3T=`cat prune3t` +EXP3=$(printf "$FULLINFO\n") +if [ "$PRUNED3T" != "$EXP3" ] ; then + echo "Expected 2 '$EXP3' - got '$PRUNED3T'" + exit 1 +fi + +# expect no effect from adding 'part1' or 'part2' after 'full' +$COVER $LCOV_TOOL -o prune3 -a $FULLINFO -a $PART1INFO -a $PART2INFO --prune --ignore inconsistent +if [[ $? != 0 && $KEEP_GOING != 1 ]] ; then + echo "lcov -prune3 failed" + exit 1 +fi +PRUNED3=`cat prune3` +if [ "$PRUNED3" != "$FULLINFO" ] ; then + echo "Expected '$FULLINFO' - got '$PRUNED3'" + exit 1 +fi + +if [ "x$COVER" != "x" ] && [ 0 != $LOCAL_COVERAGE ] ; then + cover +fi diff --git a/tests/lcov/add/track.sh b/tests/lcov/add/track.sh new file mode 100755 index 00000000..9b4ae82c --- /dev/null +++ b/tests/lcov/add/track.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# test function coverage mapping (trivial tests at the moment) + +COVER_DB='cover_db' +LOCAL_COVERAGE=1 +KEEP_GOING=0 +COVER= + +while [ $# -gt 0 ] ; do + + OPT=$1 + shift + case $OPT in + + --clean | clean ) + CLEAN_ONLY=1 + ;; + + -v | --verbose | verbose ) + set -x + ;; + + --keep-going ) + KEEP_GOING=1 + ;; + + --coverage ) + if [[ "$1"x != 'x' && $1 != "-"* ]] ; then + COVER_DB=$1 + LOCAL_COVERAGE=0 + shift + fi + echo '$LCOV' + if [[ $LCOV =~ 'perl' ]] ; then + COVER= + else + COVER="perl -MDevel::Cover=-db,$COVER_DB,-coverage,statement,branch,condition,subroutine " + fi + KEEP_GOING=1 + ;; + + --home | -home ) + LCOV_HOME=$1 + shift + if [ ! -f $LCOV_HOME/bin/lcov ] ; then + echo "LCOV_HOME '$LCOV_HOME' does not exist" + exit 1 + fi + ;; + + + * ) + echo "Error: unexpected option '$OPT'" + exit 1 + ;; + esac +done + +if [ "x$COVER" != 'x' ] && [ 0 != $LOCAL_COVERAGE ] ; then + cover -delete +fi + +if [ 'x' == "x$GENHTML_TOOL" ] ; then + GENHTML_TOOL=${LCOV_HOME}/bin/genhtml + LCOV_TOOL=${LCOV_HOME}/bin/lcov + GENINFO_TOOL=${LCOV_HOME}/bin/geninfo +fi + +# adding zero does not change anything +$COVER $LCOV_TOOL -o track -a $FULLINFO -a $ZEROINFO --map-functions +if [[ $? != 0 && $KEEP_GOING != 1 ]] ; then + echo "lcov -map-functions failed" + exit 1 +fi +grep $ZEROINFO track +if [ $? == 0 ] ; then + echo "Expected not to find '$ZEROINFO'" + exit 1 +fi +grep $FULLINFO track +if [ $? != 0 ] ; then + echo "Expected to find '$FULLINFO'" + exit 1 +fi + +if [ "x$COVER" != "x" ] && [ 0 != $LOCAL_COVERAGE ] ; then + cover +fi diff --git a/tests/lcov/add/zero.sh b/tests/lcov/add/zero.sh index c0db5bcf..7d41b2f8 100755 --- a/tests/lcov/add/zero.sh +++ b/tests/lcov/add/zero.sh @@ -5,4 +5,4 @@ # Add single zero coverage file - output should be same as input # -exec ./helper.sh 1 "$ZEROINFO" "$ZEROINFO" +exec ./helper.sh $@ 1 "$ZEROINFO" "$ZEROINFO" diff --git a/tests/lcov/add/zero2.sh b/tests/lcov/add/zero2.sh index 0b442f6e..0bcd72ce 100755 --- a/tests/lcov/add/zero2.sh +++ b/tests/lcov/add/zero2.sh @@ -5,4 +5,4 @@ # Add two zero coverage files - output should be same as input # -exec ./helper.sh 1 "$ZEROINFO" "$ZEROINFO" "$ZEROINFO" +exec ./helper.sh $@ 1 "$ZEROINFO" "$ZEROINFO" "$ZEROINFO" diff --git a/tests/lcov/branch/Makefile b/tests/lcov/branch/Makefile new file mode 100644 index 00000000..2404a677 --- /dev/null +++ b/tests/lcov/branch/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := branch.sh + +clean: + $(shell ./branch.sh --clean) diff --git a/tests/lcov/branch/branch.cpp b/tests/lcov/branch/branch.cpp new file mode 100644 index 00000000..1473eda4 --- /dev/null +++ b/tests/lcov/branch/branch.cpp @@ -0,0 +1,39 @@ +/** + * @file branch.cpp + * @author Henry Cox + * @date Wed Jun 7 09:25:11 2023 + * + * @brief test branches with different numbers of expressions on same line + */ + +#include + +template +void func(bool a, bool b) +{ + if ((v == 1 && a) || (v == 0 && a && b)) + std::cout << "true"; +} + +#ifdef MACRO +# define EXPR a || b +#else +# define EXPR a +#endif +int +main(int ac, char **av) +{ + bool a = ac > 1; + bool b = ac > 2; + + if (EXPR) { + std::cout << "EXPR was true" << std::endl; + } +#ifdef MACRO + func<1>(a, b); +#else + func<0>(a, b); +#endif + func<2>(a, b); + return 0; +} diff --git a/tests/lcov/branch/branch.sh b/tests/lcov/branch/branch.sh new file mode 100755 index 00000000..25a0b987 --- /dev/null +++ b/tests/lcov/branch/branch.sh @@ -0,0 +1,148 @@ +#!/bin/bash +set +x + +source ../../common.tst + +rm -rf *.gcda *.gcno a.out *.info* *.txt* *.json dumper* testRC *.gcov *.gcov.* no_macro* macro* total.* +if [ -d separate ] ; then + chmod -R u+w separate + rm -rf separate +fi + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +#use geninfo to capture - to collect coverage data +CAPTURE="$GENINFO_TOOL ." +#CAPTURE="$LCOV_TOOL --capture --directory ." + +ROOT=`pwd` +PARENT=`(cd .. ; pwd)` + +LCOV_OPTS="--branch $PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" +fi + +# filter exception branches to avoid spurious differences for old compiler +FILTER='--filter branch' + + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +${CXX} -std=c++1y --coverage branch.cpp -o no_macro +if [ 0 != $? ] ; then + echo "Error: unexpected error from gcc -o no_macro" + exit 1 +fi + +./no_macro 1 +if [ 0 != $? ] ; then + echo "Error: unexpected error return from no_macro" + exit 1 +fi + +$COVER $CAPTURE $LCOV_OPTS . -o no_macro.info $FILTER $IGNORE --no-external +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --capture" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +COUNT=`grep -c BRDA: no_macro.info` +if [ $COUNT != 6 ] ; then + echo "ERROR: unexpected branch count in no_macro: $COUNT (expected 6)" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +rm -f *.gcda *.gcno + +${CXX} -std=c++1y --coverage branch.cpp -DMACRO -o macro +if [ 0 != $? ] ; then + echo "Error: unexpected error from gcc -o macro" + exit 1 +fi + +./macro 1 +if [ 0 != $? ] ; then + echo "Error: unexpected error return from macro" + exit 1 +fi + +$COVER $CAPTURE $LCOV_OPTS -o macro.info $FILTER $IGNORE --no-external +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --capture" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +COUNT2=`grep -c BRDA: macro.info` +if [ $COUNT2 != 6 ] ; then + echo "ERROR: unexpected branch count in macro: $COUNT2 (expected 6)" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -a no_macro.info -a macro.info -o total.info $IGNORE $FILTER +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --aggregate" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# in 'macro' test, older versions of gcc show 2 blocks on line 29, each with +# newer gcc shows 1 block with 4 branches +# This output data format affects merging +grep -E BRDA:[0-9]+,0,3 macro.info +if [ $? == 0 ] ; then + echo 'newer gcc found' + EXPECT=12 +else + echo 'found old gcc result' + EXPECT=8 +fi + +TOTAL=`grep -c BRDA: total.info` +if [ $TOTAL != $EXPECT ] ; then + echo "ERROR: unexpected branch count in total: $TOTAL (expected $EXPECT)" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -a macro.info -a no_macro.info -o total2.info +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --aggregate (2)" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +TOTAL2=`grep -c BRDA: total2.info` +if [ $TOTAL2 != $EXPECT ] ; then + echo "ERROR: unexpected branch count in total2: $TOTAL2 (expected $EXPECT)" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/demangle/Makefile b/tests/lcov/demangle/Makefile new file mode 100644 index 00000000..38e64e10 --- /dev/null +++ b/tests/lcov/demangle/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := demangle.sh + +clean: + $(shell ./demangle.sh --clean) diff --git a/tests/lcov/demangle/demangle.cpp b/tests/lcov/demangle/demangle.cpp new file mode 100644 index 00000000..b3261118 --- /dev/null +++ b/tests/lcov/demangle/demangle.cpp @@ -0,0 +1,45 @@ +/** + * @file demangle.cpp + * @date Mon Dec 12 05:40:40 2022 + * + * @brief test bugs: + @li capture does not demangle .info result + @li duplicate function records in result + */ + +#include + +class Animal +{ +public: + Animal() + { + std::cout << "Animal" </dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +if [ 'x' == "x$GENHTML_TOOL" ] ; then + GENHTML_TOOL=${LCOV_HOME}/bin/genhtml + LCOV_TOOL=${LCOV_HOME}/bin/lcov + GENINFO_TOOL=${LCOV_HOME}/bin/geninfo +fi + +SIMPLIFY_SCRIPT=${SCRIPT_DIR}/simplify.pm + +${CXX} -std=c++1y --coverage demangle.cpp +./a.out 1 + +$COVER $LCOV_TOOL $LCOV_OPTS --capture --filter branch --demangle --directory . -o demangle.info --rc derive_function_end_line=0 + +$COVER $LCOV_TOOL $LCOV_OPTS --list demangle.info + +# how many branches reported? +COUNT=`grep -c BRDA: demangle.info` +if [ $COUNT != '0' ] ; then + echo "expected 0 branches - found $COUNT" + exit 1 +fi + +for k in FNA ; do + # how many functions reported? + grep $k: demangle.info + COUNT=`grep -v __ demangle.info | grep -c $k:` + if [ $COUNT != '5' ] ; then + echo "expected 5 $k function entries in demangle.info - found $COUNT" + exit 1 + fi + + # were the function names demangled? + grep $k: demangle.info | grep :: + COUNT=`grep $k: demangle.info | grep -c ::` + if [ $COUNT != '4' ] ; then + echo "expected 4 $k function entries in demangele.info - found $COUNT" + exit 1 + fi +done + +# see if we can "simplify" the function names.. +for callback in './simplify.pl' "${SIMPLIFY_SCRIPT},--sep,;,--re,s/Animal::Animal/subst1/;s/Cat::Cat/subst2/;s/subst2/subst3/" "${SIMPLIFY_SCRIPT},--file,simplify.cmd" ; do + + $COVER $GENHTML_TOOL --branch $PARLLEL $PROFILE -o simplify demangle.info --flat --simplify $callback + if [ $? != 0 ] ; then + echo "genhtml --simplify '$callback' failed" + exit 1 + fi + grep subst1 simplify/demangle/demangle.cpp.func.html + if [ $? != 0 ] ; then + echo "didn't find subst1 pattern after $callback" + exit 1 + fi + grep Animal::Animal simplify/demangle/demangle.cpp.func.html + if [ $? == 0 ] ; then + echo "found pattern that was supposed to be substituted after $callback" + exit 1 + fi + grep subst3 simplify/demangle/demangle.cpp.func.html + if [ $? != 0 ] ; then + echo "didn't find subst3 pattern after $callback" + exit 1 + fi + grep subst2 simplify/demangle/demangle.cpp.func.html + if [ $? == 0 ] ; then + echo "iteratative substitute failed after $callback " + exit 1 + fi +done + +$COVER $LCOV_TOOL $LCOV_OPTS --capture --filter branch --directory . -o vanilla.info + +$COVER $LCOV_TOOL $LCOV_OPTS --list vanilla.info + +# how many branches reported? +COUNT=`grep -c BRDA: vanilla.info` +if [ $COUNT != '0' ] ; then + echo "expected 0 branches - found $COUNT" + exit 1 +fi + +for k in FNA ; do + # how many functions reported? + grep $k: vanilla.info + COUNT=`grep -v __ demangle.info | grep -c $k: vanilla.info` + # gcc may generate multiple entries for the inline functions.. + if [ $COUNT -lt 5 ] ; then + echo "expected 5 $k function entries in $vanilla.info - found $COUNT" + exit 1 + fi + + # were the function names demangled? + grep $k: vanilla.info | grep :: + COUNT=`grep $k: vanilla.info | grep -c ::` + if [ $COUNT != '0' ] ; then + echo "expected 0 demangled $k function entries in vanilla.info - found $COUNT" + exit 1 + fi +done + +# see if we can exclude a function - does the generated data contain +# function end line numbers? +grep -E 'FNL:[0-9]+,[0-9]+,[0-9]+' demangle.info +if [ $? == 0 ] ; then + echo "----------------------" + echo " compiler version support start/end reporting - testing erase" + + # end line is captured - so we should be able to filter + $COVER $LCOV_TOOL $LCOV_OPTS --capture --filter branch --demangle-cpp --directory . --erase-functions main -o exclude.info -v -v + if [ $? != 0 ] ; then + echo "geninfo with exclusion failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + for type in DA FNA ; do + ORIG=`grep -c -E "^$type:" demangle.info` + NOW=`grep -c -E "^$type:" exclude.info` + if [ $ORIG -le $NOW ] ; then + echo "unexpected $type count: $ORIG -> $NOW" + exit 1 + fi + done + + # check that the same lines are removed by 'aggregate' + $COVER $LCOV_TOOL $LCOV_OPTS -o aggregate.info -a demangle.info --erase-functions main -v + + diff exclude.info aggregate.info + if [ $? != 0 ] ; then + echo "unexpected 'exclude function' mismatch" + exit 1 + fi + + perl -pe 's/(FNL:[0-9]+),([0-9]+),[0-9]+/$1,$2/' demangle.info > munged.info + $COVER $LCOV_TOOL $LCOV_OPTS --filter branch --demangle-cpp -a munged.info --erase-functions main -o munged_exclude.info --rc derive_function_end_line=0 + if [ $? == 0 ] ; then + echo "lcov exclude with no function end lines passed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + $COVER $LCOV_TOOL $LCOV_OPTS --filter branch --demangle-cpp -a munged.info --erase-functions main -o munged_exclude.info --rc derive_function_end_line=0 --ignore unsupported + if [ $? != 0 ] ; then + echo "didn't ignore exclusion message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + +else + # no end line in data - check for error message... + echo "----------------------" + echo " compiler version DOESN't support start/end reporting - check error" + $COVER $LCOV_TOOL $LCOV_OPTS --capture --filter branch --demangle-cpp --directory . --erase-functions main --ignore unused -o exclude.info --rc derive_function_end_line=0 --msg-log exclude.log + if [ 0 == $? ] ; then + echo "Error: expected exit for unsupported feature" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + grep -E 'ERROR: .+Function begin/end line exclusions not supported' exclude.log + if [ 0 != $? ] ; then + echo "Error: didn't find unsupported message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + $COVER $LCOV_TOOL $LCOV_OPTS --capture --filter branch --demangle-cpp --directory . --erase-functions main --ignore unused -o exclude2.info --rc derive_function_end_line=1 --msg-log exclude2.log + if [ 0 != $? ] ; then + echo "Error: unexpected exit when 'derive' enabled" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + grep -E 'WARNING: .+Function begin/end line exclusions.+attempting to derive' exclude2.log + if [ 0 != $? ] ; then + echo "Error: didn't find derive warning" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + + fi + + $COVER $LCOV_TOOL $LCOV_OPTS --capture --filter branch --demangle-cpp --directory . --erase-functions main --rc derive_function_end_line=0 --ignore unsupported,unused -o ignore.info --msg-log=exclude3.log + if [ 0 != $? ] ; then + echo "Error: expected to ignore unsupported message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + grep -E 'WARNING: .+Function begin/end line exclusions.+See lcovrc man entry' exclude3.log + if [ 0 != $? ] ; then + echo "Error: didn't find derive warning2" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + + fi + + # expect not to find 'main' + grep main ignore.info + if [ $? == 0 ] ; then + echo "expected 'main' to be filtered out" + exit 1 + fi + # but expect to find coverpoint within main.. + grep DA:40,1 ignore.info + if [ $? != 0 ] ; then + echo "expected to find coverpoint at line 40" + exit 1 + fi +fi + + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/demangle/simplify.cmd b/tests/lcov/demangle/simplify.cmd new file mode 100644 index 00000000..8a628285 --- /dev/null +++ b/tests/lcov/demangle/simplify.cmd @@ -0,0 +1,6 @@ +# test the 'simplify.pm --file ...' option + +s/Animal::Animal/subst1/ +s/Cat::Cat/subst2/ +s/subst2/subst3/ + diff --git a/tests/lcov/demangle/simplify.pl b/tests/lcov/demangle/simplify.pl new file mode 100755 index 00000000..992dcde8 --- /dev/null +++ b/tests/lcov/demangle/simplify.pl @@ -0,0 +1,11 @@ +#!/usr/bin/env perl +use strict; + +my $name = shift; +$name =~ s/Animal::Animal/subst1/; +$name =~ s/Cat::Cat/subst2/; +$name =~ s/subst2/subst3/; + +print $name; +exit 0; + diff --git a/tests/lcov/diff/Makefile b/tests/lcov/diff/Makefile deleted file mode 100644 index 2935cda9..00000000 --- a/tests/lcov/diff/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -include ../../common.mak - -TESTS := test.sh - -clean: - rm -f *.info diff - make -C old clean - make -C new clean diff --git a/tests/lcov/diff/new/Makefile b/tests/lcov/diff/new/Makefile deleted file mode 100644 index 2a2edea1..00000000 --- a/tests/lcov/diff/new/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -prog.info: - -include ../../../common.mak - -prog.info: prog.gcda - $(LCOV) -c -d . -o prog.info - -prog.gcda: prog - ./prog || true - -prog: prog.c - $(CC) prog.c -o prog --coverage - -clean: - rm -f prog prog.gcda prog.gcno prog.info - -.PHONY: all clean diff --git a/tests/lcov/diff/new/prog.c b/tests/lcov/diff/new/prog.c deleted file mode 100644 index 6f4607cc..00000000 --- a/tests/lcov/diff/new/prog.c +++ /dev/null @@ -1,41 +0,0 @@ - - - -int fn(int x) -{ - switch (x) { - case -1: return 0; - - - case 0: return 2; - case 2: return 3; - - - case 12: return 7; - default: return 255; - } - - - -} - -int fn2() -{ - - - return 7; -} - - - -int main(int argc, char *argv[]) -{ - - - if (argc > 1) - return fn(argc); - - return fn2(); - - -} diff --git a/tests/lcov/diff/old/Makefile b/tests/lcov/diff/old/Makefile deleted file mode 100644 index 2a2edea1..00000000 --- a/tests/lcov/diff/old/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -prog.info: - -include ../../../common.mak - -prog.info: prog.gcda - $(LCOV) -c -d . -o prog.info - -prog.gcda: prog - ./prog || true - -prog: prog.c - $(CC) prog.c -o prog --coverage - -clean: - rm -f prog prog.gcda prog.gcno prog.info - -.PHONY: all clean diff --git a/tests/lcov/diff/old/prog.c b/tests/lcov/diff/old/prog.c deleted file mode 100644 index a4eda255..00000000 --- a/tests/lcov/diff/old/prog.c +++ /dev/null @@ -1,22 +0,0 @@ -int fn(int x) -{ - switch (x) { - case -1: return 0; - case 0: return 2; - case 2: return 3; - case 12: return 7; - default: return 255; - } -} - -int fn2() -{ - return 7; -} - -int main(int argc, char *argv[]) -{ - if (argc > 1) - return fn(argc); - return fn2(); -} diff --git a/tests/lcov/diff/test.sh b/tests/lcov/diff/test.sh deleted file mode 100755 index b755f0d6..00000000 --- a/tests/lcov/diff/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright IBM Corp. 2017 -# -# Check lcov's diff function: -# - Compile two slightly different test programs -# - Run the programs and collect coverage data -# - Generate a patch containing the difference between the source code -# - Apply the patch to the coverage data -# - Compare the resulting patched coverage data file with the data from the -# patched source file -# - -function die() -{ - echo "Error: $@" >&2 - exit 1 -} - -make -C old || die "Failed to compile old source" -make -C new || die "Failed to compile new source" -diff -u $PWD/old/prog.c $PWD/new/prog.c > diff - -$LCOV --diff old/prog.info diff --convert-filenames -o patched.info -t bla || \ - die "Failed to apply patch to coverage data file" -norminfo new/prog.info > new_normalized.info -norminfo patched.info > patched_normalized.info -sed -i -e 's/^TN:.*$/TN:/' patched_normalized.info - -diff -u patched_normalized.info new_normalized.info || \ - die "Mismatch in patched coverage data file" - -echo "Patched coverage data file matches expected file" diff --git a/tests/lcov/errs/Makefile b/tests/lcov/errs/Makefile new file mode 100644 index 00000000..9ed22c9b --- /dev/null +++ b/tests/lcov/errs/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := errs.sh + +clean: + $(shell ./errs.sh --clean) diff --git a/tests/lcov/errs/badBranchLine.info b/tests/lcov/errs/badBranchLine.info new file mode 100644 index 00000000..1443fab7 --- /dev/null +++ b/tests/lcov/errs/badBranchLine.info @@ -0,0 +1,43 @@ +TN: +SF:test.cpp +VER:#1 +FN:1,45,main +FNDA:1,main +DA:0,0 +FNF:1 +FNH:1 +BRDA:15,e0,0,1 +BRDA:15,e0,1,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRDA:0,0,0,- +BRDA:0,0,1,- +BRF:6 +BRH:2 +DA:12,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record +TN: +SF:/mtkoss/gcc/10.2.0-rhel7/x86_64/include/c++/10.2.0/iostream +VER:2020-10-05T04:38:04-04:00 +FNF:0 +FNH:0 +LF:0 +LH:0 +end_of_record diff --git a/tests/lcov/errs/badFncEndLine.info b/tests/lcov/errs/badFncEndLine.info new file mode 100644 index 00000000..e5cdee08 --- /dev/null +++ b/tests/lcov/errs/badFncEndLine.info @@ -0,0 +1,44 @@ +TN: name with space +SF:test.cpp +VER:#1 +VER:#1 +FN:1,0,main +#FNDA:1,main <- FNDA record mismatches because FN record is skipped +DA:0,0 +FNF:1 +FNH:1 +BRDA:15,e0,0,1 +BRDA:15,e0,1,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRDA:44,0,0,- +BRDA:44,0,1,- +BRF:6 +BRH:2 +DA:12,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record +TN: +SF:/mtkoss/gcc/10.2.0-rhel7/x86_64/include/c++/10.2.0/iostream +VER:2020-10-05T04:38:04-04:00 +FNF:0 +FNH:0 +LF:0 +LH:0 +end_of_record diff --git a/tests/lcov/errs/badFncLine.info b/tests/lcov/errs/badFncLine.info new file mode 100644 index 00000000..9f5a22b4 --- /dev/null +++ b/tests/lcov/errs/badFncLine.info @@ -0,0 +1,43 @@ +TN: +SF:test.cpp +VER:#1 +FN:0,45,main +#FNDA:1,main <- FNDA record mismatches because FN record is skipped +DA:0,0 +FNF:1 +FNH:1 +BRDA:15,e0,0,1 +BRDA:15,e0,1,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRDA:44,0,0,- +BRDA:44,0,1,- +BRF:6 +BRH:2 +DA:12,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record +TN: +SF:/mtkoss/gcc/10.2.0-rhel7/x86_64/include/c++/10.2.0/iostream +VER:2020-10-05T04:38:04-04:00 +FNF:0 +FNH:0 +LF:0 +LH:0 +end_of_record diff --git a/tests/lcov/errs/badLine.info b/tests/lcov/errs/badLine.info new file mode 100644 index 00000000..e4f8a554 --- /dev/null +++ b/tests/lcov/errs/badLine.info @@ -0,0 +1,43 @@ +TN: +SF:test.cpp +VER:#1 +FN:1,45,main +FNDA:1,main +DA:0,0 +FNF:1 +FNH:1 +BRDA:15,e0,0,1 +BRDA:15,e0,1,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRDA:44,0,0,- +BRDA:44,0,1,- +BRF:6 +BRH:2 +DA:0,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record +TN: +SF:/mtkoss/gcc/10.2.0-rhel7/x86_64/include/c++/10.2.0/iostream +VER:2020-10-05T04:38:04-04:00 +FNF:0 +FNH:0 +LF:0 +LH:0 +end_of_record diff --git a/tests/lcov/errs/branchNoLine.info b/tests/lcov/errs/branchNoLine.info new file mode 100644 index 00000000..855817d3 --- /dev/null +++ b/tests/lcov/errs/branchNoLine.info @@ -0,0 +1,22 @@ +TN: +SF:test.cpp +VER:#1 +FN:12,30,main +FNDA:1,main +FNF:1 +FNH:1 +BRDA:15,0,0,0 +BRDA:15,0,1,1 +BRF:2 +BRH:1 +DA:12,0 +DA:14,0 +DA:15,0 +DA:17,0 +DA:22,10 +DA:23,10 +DA:25,0 +DA:30,0 +LF:8 +LH:0 +end_of_record diff --git a/tests/lcov/errs/emptyFileRecord.info b/tests/lcov/errs/emptyFileRecord.info new file mode 100644 index 00000000..4487ba20 --- /dev/null +++ b/tests/lcov/errs/emptyFileRecord.info @@ -0,0 +1,43 @@ +SF: test.cpp +VER:#1 +VER:#1 +FN:1,46,main +FNDA:1,main +FNF:1 +FNH:1 +BRDA:15,e0,0,1 +BRDA:15,e0,1,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRDA:44,0,0,- +BRDA:44,0,1,1 +BRF:6 +BRH:2 +DA:12,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record + +# empty file record here.. +SF: +VER:2020-10-05T04:38:04-04:00 +FNF:0 +FNH:0 +LF:0 +LH:0 +end_of_record diff --git a/tests/lcov/errs/errs.sh b/tests/lcov/errs/errs.sh new file mode 100755 index 00000000..16e0831a --- /dev/null +++ b/tests/lcov/errs/errs.sh @@ -0,0 +1,258 @@ +#!/bin/bash +set +x + +source ../../common.tst + +rm -f *.log *.json dumper* *.out +rm -rf emptyDir + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +LCOV_OPTS="--branch $PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" + # and filter exception branches to avoid spurious differences for old compiler + FILTER='--filter branch' +fi + +status=0 + +for f in badFncLine badFncEndLine fncMismatch badBranchLine badLine ; do + echo lcov $LCOV_OPTS --summary $f.info + $COVER $LCOV_TOOL $LCOV_OPTS --summary $f.info 2>&1 | tee $f.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "failed to notice incorrect decl in $f" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + grep -E '(unexpected|mismatched) .*line' $f.log + if [ 0 != $? ] ; then + echo "missing error message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + + echo lcov $LCOV_OPTS --summary $f.info --ignore inconsistent,format + $COVER $LCOV_TOOL $LCOV_OPTS --summary $f.info --ignore format,inconsistent 2>&1 | tee ${f}2.log + if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "failed to ignore message ${f}2.log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + # and print the data out again.. + echo lcov $LCOV_OPTS -o $f.out -a $f.info --ignore format,inconsistent + $COVER $LCOV_TOOL $LCOV_OPTS -o $f.out -a $f.info --ignore format,inconsistent --msg-log $f{3}.log + if [ 0 != $? ] ; then + echo "failed to ignore message ${f}3.log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + +done + +for f in noFunc ; do + echo lcov $LCOV_OPTS --summary $f.info + $COVER $LCOV_TOOL $LCOV_OPTS --summary $f.info 2>&1 | tee $f.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "failed to notice incorrect decl in $f" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + grep -E 'unknown function' $f.log + if [ 0 != $? ] ; then + echo "missing error message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + + echo lcov $LCOV_OPTS --summary $f.info --ignore mismatch + $COVER $LCOV_TOOL $LCOV_OPTS --summary $f.info --ignore mismatch 2>&1 | tee ${f}2.log + if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "failed to ignore message ${f}2.log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi +done + +for f in emptyFileRecord ; do + echo lcov $LCOV_OPTS --summary $f.info + $COVER $LCOV_TOOL $LCOV_OPTS --summary $f.info 2>&1 | tee $f.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "failed to notice incorrect decl in $f" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + grep -E 'unexpected empty file name' $f.log + if [ 0 != $? ] ; then + echo "missing error message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + + echo lcov $LCOV_OPTS --summary $f.info --ignore mismatch + $COVER $LCOV_TOOL $LCOV_OPTS --summary $f.info --ignore format 2>&1 | tee ${f}2.log + if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "failed to ignore message ${f}2.log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi +done + + +for f in exceptionBranch ; do + echo lcov $LCOV_OPTS -a ${f}1.info -a ${f}2.info -o $f.out + $COVER $LCOV_TOOL $LCOV_OPTS -a ${f}1.info -a ${f}2.info -o $f.out 2>&1 | tee $f.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "failed to notice incorrect decl in $f" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + grep -E 'mismatched exception tag' $f.log + if [ 0 != $? ] ; then + echo "missing error message" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + if [ -f $f.out ] ; then + echo "should not have created file, on error" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + + echo lcov $LCOV_OPTS -a ${f}1.info -a ${f}2.info --ignore mismatch -o ${f}2.log + $COVER $LCOV_TOOL $LCOV_OPTS -a ${f}1.info -a ${f}2.info --ignore mismatch -o $f.log + + if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "failed to ignore message ${f}2.log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi +done + +mkdir -p emptyDir + +echo lcov $LCOV_OPTS -a emptyDir -a exceptionBranch1.info -o emptyDir.info +$COVER $LCOV_TOOL $LCOV_OPTS -a emptyDir -a exceptionBranch1.info -o emptyDir.info 2>&1 | tee emptyDir.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "failed to notice empty dir" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi +fi +grep 'no files matching' emptyDir.log +if [ 0 != $? ] ; then + echo "did not find expected empty dir message" +fi +echo lcov $LCOV_OPTS -a emptyDir -a exceptionBranch1.info -o emptyDir.info --ignore empty +$COVER $LCOV_TOOL $LCOV_OPTS -a emptyDir -a exceptionBranch1.info -o emptyDir.info --ignore empty 2>&1 | tee emptyDir2.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "failed to ignore empty dir" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi +fi + +# trigger error from unreadable directory +chmod ugo-rx emptyDir +echo lcov $LCOV_OPTS -a emptyDir -a exceptionBranch1.info -o emptyDir.info +$COVER $LCOV_TOOL $LCOV_OPTS -a emptyDir -a exceptionBranch1.info -o emptyDir.info 2>&1 | tee noRead.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "failed to notice unreadable" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi +fi +grep 'error in "find' noRead.log +if [ 0 != $? ] ; then + echo "did not find expected unreadable dir message" +fi +echo lcov $LCOV_OPTS -a emptyDir -a exceptionBranch1.info -o emptyDir.info --ignore utility,empty +$COVER $LCOV_TOOL $LCOV_OPTS -a emptyDir -a exceptionBranch1.info -o emptyDir.info --ignore utility,empty 2>&1 | tee noRead2.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "failed to ignore unreadable dir" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi +fi +chmod ugo+rx emptyDir + +# data consistency errors: +# - function marked 'hit' but no contained lines are hit +# - function marked 'not hit' but some contained line is hit +# - line marked 'hit' but no contained branches have been evaluated +# - line marked 'not hit' but at least one contained branch has been evaluated +for i in funcNoLine lineNoFunc branchNoLine lineNoBranch ; do + + $COVER $LCOV_TOOL $LCOV_OPTS --summary $i.info 2>&1 | tee $i.log + if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "failed to see error ${i}.log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi + $COVER $LCOV_TOOL $LCOV_OPTS --summary $i.info 2>&1 --ignore inconsistent | tee ${i}2.log + if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "failed to ignore error ${i}2.log" + status=1 + if [ 0 == $KEEP_GOING ] ; then + exit $status + fi + fi +done + + +if [ 0 == $status ] ; then + echo "Tests passed" +else + echo "Tests failed" +fi + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi + +exit $status diff --git a/tests/lcov/errs/exceptionBranch1.info b/tests/lcov/errs/exceptionBranch1.info new file mode 100644 index 00000000..4a133944 --- /dev/null +++ b/tests/lcov/errs/exceptionBranch1.info @@ -0,0 +1,34 @@ +TN: +SF:test.cpp +VER:#1 +FN:12,45,main +FNDA:1,main +FNF:1 +FNH:1 +BRDA:15,e0,0,1 +BRDA:15,e0,1,0 +BRDA:40,0,0,condition,1 +BRDA:40,0,1,!condition,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRF:6 +BRH:2 +DA:12,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record diff --git a/tests/lcov/errs/exceptionBranch2.info b/tests/lcov/errs/exceptionBranch2.info new file mode 100644 index 00000000..a969acca --- /dev/null +++ b/tests/lcov/errs/exceptionBranch2.info @@ -0,0 +1,36 @@ +TN: +SF:test.cpp +VER:#1 +FN:12,45,main +FNDA:1,main +FNF:1 +FNH:1 +# not the same exception flag +BRDA:15,0,0,1 +BRDA:15,0,1,0 +# different condition - cause error +BRDA:40,0,0,cond,1 +BRDA:40,0,1,!condition,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRF:6 +BRH:2 +DA:12,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record diff --git a/tests/lcov/errs/fncMismatch.info b/tests/lcov/errs/fncMismatch.info new file mode 100644 index 00000000..576c3481 --- /dev/null +++ b/tests/lcov/errs/fncMismatch.info @@ -0,0 +1,36 @@ +TN: +SF:test.cpp +VER:#1 +FN:12,45,main +FNDA:1,main +FN:12,38,mismatchFunc +FNDA:0,mismatchFunc +FNF:2 +FNH:1 +BRDA:15,e0,0,1 +BRDA:15,e0,1,0 +BRDA:40,0,0,condition,1 +BRDA:40,0,1,!condition,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRF:6 +BRH:2 +DA:12,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record diff --git a/tests/lcov/errs/funcNoLine.info b/tests/lcov/errs/funcNoLine.info new file mode 100644 index 00000000..b42deb4e --- /dev/null +++ b/tests/lcov/errs/funcNoLine.info @@ -0,0 +1,22 @@ +TN: +SF:test.cpp +VER:#1 +FN:12,30,main +FNDA:1,main +FNF:1 +FNH:1 +BRDA:15,e0,0,0 +BRDA:15,e0,1,0 +BRF:2 +BRH:1 +DA:12,0 +DA:14,0 +DA:15,0 +DA:17,0 +DA:22,0 +DA:23,0 +DA:25,0 +DA:30,0 +LF:8 +LH:0 +end_of_record diff --git a/tests/lcov/errs/lineNoBranch.info b/tests/lcov/errs/lineNoBranch.info new file mode 100644 index 00000000..4e189b59 --- /dev/null +++ b/tests/lcov/errs/lineNoBranch.info @@ -0,0 +1,22 @@ +TN: +SF:test.cpp +VER:#1 +FN:12,30,main +FNDA:1,main +FNF:1 +FNH:1 +BRDA:15,0,0,0 +BRDA:15,0,1,0 +BRF:2 +BRH:1 +DA:12,0 +DA:14,0 +DA:15,10 +DA:17,0 +DA:22,10 +DA:23,10 +DA:25,0 +DA:30,0 +LF:8 +LH:0 +end_of_record diff --git a/tests/lcov/errs/lineNoFunc.info b/tests/lcov/errs/lineNoFunc.info new file mode 100644 index 00000000..2f8b41f1 --- /dev/null +++ b/tests/lcov/errs/lineNoFunc.info @@ -0,0 +1,22 @@ +TN: +SF:test.cpp +VER:#1 +FN:12,30,main +FNDA:0,main +FNF:1 +FNH:1 +BRDA:15,e0,0,0 +BRDA:15,e0,1,0 +BRF:2 +BRH:1 +DA:12,0 +DA:14,0 +DA:15,0 +DA:17,0 +DA:22,10 +DA:23,10 +DA:25,0 +DA:30,0 +LF:8 +LH:0 +end_of_record diff --git a/tests/lcov/errs/noFunc.info b/tests/lcov/errs/noFunc.info new file mode 100644 index 00000000..502381c5 --- /dev/null +++ b/tests/lcov/errs/noFunc.info @@ -0,0 +1,35 @@ +TN: +SF:test.cpp +VER:#1 +FN:12,45,main +FNDA:1,main +FNDA:0,noSuchFunc +FNF:2 +FNH:1 +BRDA:15,e0,0,1 +BRDA:15,e0,1,0 +BRDA:40,0,0,condition,1 +BRDA:40,0,1,!condition,0 +BRDA:42,0,0,- +BRDA:42,0,1,- +BRF:6 +BRH:2 +DA:12,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:22,1 +DA:23,1 +DA:25,1 +DA:30,0 +DA:31,0 +DA:36,0 +DA:38,0 +DA:40,1 +DA:41,1 +DA:42,0 +DA:43,0 +DA:44,1 +LF:16 +LH:10 +end_of_record diff --git a/tests/lcov/exception/Makefile b/tests/lcov/exception/Makefile new file mode 100644 index 00000000..eab4de0d --- /dev/null +++ b/tests/lcov/exception/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := exception.sh + +clean: + $(shell ./exception.sh --clean) diff --git a/tests/lcov/exception/example.data b/tests/lcov/exception/example.data new file mode 100644 index 00000000..cbdfa0f3 --- /dev/null +++ b/tests/lcov/exception/example.data @@ -0,0 +1,36 @@ +TN: +SF:./exception.cpp +# slightly faked data - to hit additional branch +FN:5,23,main +FNDA:1,main +FNF:1 +FNH:1 +BRDA:8,0,0,1 +BRDA:8,e0,1,0 +BRDA:12,0,0,1 +BRDA:12,0,1,0 +BRDA:17,0,0,1 +#fake block with all exception branches here +BRDA:17,e0,1,0 +BRDA:18,e0,0,1 +BRDA:18,e0,1,0 +# fake orphan here +BRDA:19,0,0,- +BRDA:20,0,0,1 +BRDA:20,e0,1,0 +BRF:10 +BRH:5 +DA:5,1 +DA:8,1 +DA:10,1 +DA:12,1 +DA:13,1 +DA:15,0 +DA:17,1 +DA:18,1 +DA:19,0 +DA:20,1 +DA:22,1 +LF:11 +LH:10 +end_of_record diff --git a/tests/lcov/exception/exception.cpp b/tests/lcov/exception/exception.cpp new file mode 100644 index 00000000..5e86c372 --- /dev/null +++ b/tests/lcov/exception/exception.cpp @@ -0,0 +1,23 @@ +#include +#include + +int +main(int ac, char **av) +{ + // LCOV_EXCL_EXCEPTION_BR_START + printf("Simple branching: argc=%d\n", ac); + // LCOV_EXCL_EXCEPTION_BR_STOP + int branch(0); + + if (ac == 1) + branch = 1; + else + branch = 2; + + printf("std::string creating branches...\n"); + std::string name("name1"); // LCOV_EXCL_EXCEPTION_BR_LINE + std::size_t len = name.length(); + name.resize(len + 10); + + return 0; +} diff --git a/tests/lcov/exception/exception.sh b/tests/lcov/exception/exception.sh new file mode 100755 index 00000000..d1c52441 --- /dev/null +++ b/tests/lcov/exception/exception.sh @@ -0,0 +1,274 @@ +#!/bin/bash +set +x + +source ../../common.tst + +# use geninfo to capture - so we collect coverage data... +CAPTURE="$GENINFO_TOOL ." +# CAPTURE="$LCOV_TOOL --capture -d ." + + +LCOV_OPTS="--branch-coverage $PARALLEL $PROFILE" + +IFS='.' read -r -a VER <<< `${CC} -dumpversion` + +rm -rf *.gcda *.gcno a.out *.info* *.txt* *.json dumper* testRC *.gcov *.gcov.* *.log precedence.rc + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +${CXX} -std=c++1y --coverage exception.cpp +if [ 0 != $? ] ; then + echo "Error: unexpected error from gcc" + exit 1 +fi +NO_INITIAL_CAPTURE=0 +if [[ "${VER[0]}" -gt 4 && "${VER[0]}" -lt 7 ]] ; then + # no data generated by initial capture + IGNORE_EMPTY="--ignore empty" + NO_INITIAL_CAPTURE=1 +fi +if [ "${VER[0]}" -lt 8 ] ; then + # cannot generate branch data unless 'intermediate' + IGNORE_USAGE="--ignore usage" +fi +$COVER $CAPTURE $LCOV_OPTS --initial -o initial.info $IGNORE_EMPTY $IGNORE_USAGE +if [ $NO_INITIAL_CAPTURE != $? ] ; then + echo "Error: unexpected error code from lcov --initial" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# enable branch filter without line/region filter - to hit some additional code +$COVER $CAPTURE $LCOV_OPTS --initial -o initial_br.info $IGNORE_EMPTY $IGNORE_USAGE --filter branch_region +if [ $NO_INITIAL_CAPTURE != $? ] ; then + echo "Error: unexpected error code from lcov --initial" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +for info in initial.info initial_br.info ; do + grep 'BRDA:8,e0' $info + if [ 0 == $? ] ; then + echo "Error: exception branch should be filtered out of $info" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +done + +./a.out +if [ 0 != $? ] ; then + echo "Error: unexpected error return from a.out" + exit 1 +fi + +$COVER $CAPTURE $LCOV_OPTS -o all.info --include '*/exception.cpp' --no-markers + +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov extract" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS --list all.info + +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --list" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# how many branches reported? +BRANCHES=`grep -c BRDA: all.info` +EXCEPTIONS=`grep -c ',e' all.info` + +if [ $EXCEPTIONS != '0' ] ; then + + # when run without 'no markers", then we should remove exception + # branches in the marked region + $COVER $CAPTURE $LCOV_OPTS -o filter.info --include '*/exception.cpp' | tee noFilter.log + if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error code from lcov extract filter" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + FILTER_BRANCHES=`grep -c BRDA: filter.info` + FILTER_EXCEPTIONS=`grep -c ',e' filter.info` + # we expect the number of exception branches found in 'filter.info' + # (when we applied 'exception branch markers') should be the less than + # the number of total branches (when we excluded nothing) + if [ $FILTER_BRANCHES -ge $BRANCHES ] ; then + echo "Error: did not filter exception branches: $BRANCHES -> $FILTER_BRANCHES" + exit 1 + fi + let DIFF=$BRANCHES-$FILTER_BRANCHES + let DIFF2=$EXCEPTIONS-$FILTER_EXCEPTIONS + # 'DIFF' is the number of branches that got removed by 'marker' filtering + # we expect that to be the same as the number of exception branches that + # got removed + # however, this is slightly complicated because gcc might not have + # all the exception branches - leaving a long "exception not taken" + # branch on the line...but we explicitly remove such lone + # branches - so the total difference in number of branches might + # larger than the difference between the 'e' branches in the info files. + if [ $DIFF -lt $DIFF2 ] ; then + echo "Error: we seem to have filtered non-exception branches: $DIFF -> $DIFF2" + exit 1 + fi + + # override the exclusion markers and check that we didn't remove + # exception branches.. + $COVER $CAPTURE $LCOV_OPTS -o override.info --include '*/exception.cpp' --rc lcov_excl_exception_br_start=nomatch_start --rc lcov_excl_exception_br_stop=nomatch_stop --rc lcov_excl_exception_br_line=notThere + + if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov exclusion override filter" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + OVERRIDE_BRANCHES=`grep -c BRDA: override.info` + if [ $OVERRIDE_BRANCHES != $BRANCHES ] ; then + echo "did not honor exception overrides. Expected $BRANCHES found $OVERRIDE_BRANCHES" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + +else + echo "no exceptions identified - so nothing to do" +fi + +# test some filtering options +$COVER $CAPTURE $LCOV_OPTS -o vanilla.info --ignore inconsistent +if [ 0 != $? ] ; then + echo "Error: unexpected error code from vanilla capture-external" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +VANILLA_LINES=`grep -c '^DA:' vanialla.info` + +$COVER $CAPTURE $LCOV_OPTS -o no_external.info --no-external +if [ 0 != $? ] ; then + echo "Error: unexpected error code from capture no-external" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +NO_EXTERNAL_LINES=`grep -c '^DA:' no_external.info` + +if [ "$NO_EXTERNAL_LINES" -ge "$VANILLA_LINES" ] ; then + echo "Error: no_external had no effect" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE $LCOV_OPTS -o external_0.info --rc geninfo_external=0 +if [ 0 != $? ] ; then + echo "Error: unexpected error code from geninfo_external=0" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +diff no_external.info external_0.info +if [ $? != 0 ] ; then + echo "geninfo_external=0 didn't work" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +EXTERNAL_0_LINES=`grep -c '^DA:' external_0.info` +echo "geninfo_external = 0" > precedence.rc +$COVER $CAPTURE $LCOV_OPTS -o external_1.info --rc geninfo_external=1 --config-file precedence.rc --ignore inconsistent +if [ 0 != $? ] ; then + echo "Error: unexpected error code from geninfo_external=1" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +diff vanilla.info external_1.info +if [ $? != 0 ] ; then + echo "geninfo_external=1 didn't work" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +$COVER $LCOV_TOOL $LCOV_OPTS -o filtExceptOrphan.info -a example.data --filter exception,orphan 2>&1 | tee exceptOrphanFilter.log +if [ 0 != $? ] ; then + echo "Error: unexpected error code from except/orphan filtering" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +BRANCH_RPT=`grep branches... exceptOrphanFilter.log` +$COVER $LCOV_TOOL $LCOV_OPTS --summary filtExceptOrphan.info | tee summaryFilt.log +SUMMARY_RPT=`grep branches... summaryFilt.log` +if [ "$BRANCH_RPT" != "$SUMMARY_RPT" ] ; then + echo "Error: extract '$BRANCH_RPT' and summary '$SUMMARY_RPT' reports are different" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +$COVER $LCOV_TOOL $LCOV_OPTS -o filtExcept.info -a example.data --filter exception 2>&1 | tee exceptFilter.log +if [ 0 != $? ] ; then + echo "Error: unexpected error code from except filering" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +$COVER $LCOV_TOOL $LCOV_OPTS -o filtOrphan.info -a example.data --filter orphan 2>&1 | tee orphanFilter.log +if [ 0 != $? ] ; then + echo "Error: unexpected error code from orphan filering" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +N=`grep -c BRDA: example.data` +E=`grep -c BRDA: filtExcept.info` +O=`grep -c BRDA: filtOrphan.info` +EO=`grep -c BRDA: filtExceptOrphan.info` + +# strict ordering +if [ "$N" -le "$EO" ] ; then + echo "exception/orphan count" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +if [ "$E" -le "$EO" ] || [ "$E" -ge "$N" ] ; then + echo "exception $E <-> exception/orphan $EO N $N count" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +if [ "$O" -le "$EO" ] || [ "$O" -ge "$N" ] ; then + echo "N: $N exception $E orphan $O <-> exception/orphan $EO count" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/extract/Makefile b/tests/lcov/extract/Makefile new file mode 100644 index 00000000..ac7c0bf5 --- /dev/null +++ b/tests/lcov/extract/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := extract.sh + +clean: + $(shell ./extract.sh --clean) diff --git a/tests/lcov/extract/brokenCallback.pm b/tests/lcov/extract/brokenCallback.pm new file mode 100644 index 00000000..87034b94 --- /dev/null +++ b/tests/lcov/extract/brokenCallback.pm @@ -0,0 +1,19 @@ +#!/usr/bin/env perl + +package brokenCallback; + +sub new +{ + my $class = shift; + my $self = [@_]; + return bless $self, $class; +} + +sub resolve +{ + my ($self, $path) = @_; + die("dying in resolve") if scalar(@$self) <= 1 || $self->[1] eq 'die'; + return scalar(@$self) > 2 && $self->[2] eq 'present' ? $path : undef; +} + +1; diff --git a/tests/lcov/extract/envErr.rc b/tests/lcov/extract/envErr.rc new file mode 100644 index 00000000..7b7b9665 --- /dev/null +++ b/tests/lcov/extract/envErr.rc @@ -0,0 +1,6 @@ +# comment here +# key should be assigned to something... +ignore_errors +ignore_errors = inconsistent +parallel = 1 +memory = 1024 diff --git a/tests/lcov/extract/envVar.rc b/tests/lcov/extract/envVar.rc new file mode 100644 index 00000000..05aca5d4 --- /dev/null +++ b/tests/lcov/extract/envVar.rc @@ -0,0 +1,7 @@ + +ignore_errors = $ENV{ENV_IGNORE} +# trigger an append operation +ignore_errors = empty +ignore_errors = inconsistent +parallel = 1 +memory_percentage = 50 diff --git a/tests/lcov/extract/extract.cpp b/tests/lcov/extract/extract.cpp new file mode 100644 index 00000000..d2c5fb72 --- /dev/null +++ b/tests/lcov/extract/extract.cpp @@ -0,0 +1,36 @@ +#include +#include +#include +#include + +int main(int argc, const char *argv[]) // TEST_UNREACH_FUNCTION +{ + bool b = false; + if (strcmp(argv[1], "1") == 0) + b = true; + + char *a = nullptr; + // TEST_BRANCH_START + if (b) // TEST_BRANCH_LINE + // TEST_BRANCH_STOP + printf("Hai\n"); + delete[] a; + + // TEST_OVERLAP_START + // TEST_OVERLAP_START + // TEST_UNREACHABLE_START + std::string str("asdads"); + // TEST_UNREACHABLE_END + str = "cd"; + // TEST_OVERLAP_END + + //TEST_DANGLING_START + //TEST_UNMATCHED_END + + std::cout << str << std::endl; // TEST_UNREACHABLE_LINE + + // LCOV_EXCL_START_1 + std::cout << "adding some code to ignore" << std::endl; + // LCOV_EXCL_STOP_1 + return 0; +} diff --git a/tests/lcov/extract/extract.sh b/tests/lcov/extract/extract.sh new file mode 100755 index 00000000..fe61beb5 --- /dev/null +++ b/tests/lcov/extract/extract.sh @@ -0,0 +1,1092 @@ +#!/bin/bash +set +x + +: ${USER:="$(id -u -n)"} + +source ../../common.tst + +rm -rf *.gcda *.gcno a.out *.info* *.txt* *.json dumper* testRC *.gcov *.gcov.* *.log *.o errs *.msg *.dat mytest spaces +rm -rf rcOptBug + +if [ -d separate ] ; then + chmod -R ug+rxw separate + rm -rf separate +fi + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +#use geninfo for capture - so we can collect coverage info +CAPTURE=$GENINFO_TOOL +#CAPTURE="$LCOV_TOOL --capture --directory" + +LCOV_OPTS="--branch-coverage $PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" + # and filter exception branches to avoid spurious differences for old compiler + FILTER='--filter branch' +fi + + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +COMPILE_OPTS="--coverage" +# gcc 5 and 6 just do not work for initial capture +if [[ "${VER[0]}" -gt 4 && "${VER[0]}" -lt 7 ]] ; then + # no data generated by initial capture + IGNORE_EMPTY="--ignore empty" + NO_INITIAL_CAPTURE=1 +elif [ "${VER[0]}" -ge 14 ] ; then + ENABLE_MCDC=1 + # enable MCDC + LCOV_OPTS="$LCOV_OPTS --mcdc" + COMPILE_OPTS="$COMPILE_OPTS -fcondition-coverage" +fi + +${CXX} -std=c++1y $COMPILE_OPTS extract.cpp +if [ 0 != $? ] ; then + echo "Error: unexpected error from g++" + exit 1 +fi + +if [ "${VER[0]}" -lt 8 ] ; then + # cannot generate branch data unless 'intermediate' + IGNORE_USAGE="--ignore usage" + DERIVE_END='--rc derive_function_end_line=0' +fi + +if [ 1 != "$NO_INITIAL_CAPTURE" ] ; then + $COVER $CAPTURE . $LCOV_OPTS --initial -o initial.info $IGNORE_EMPTY $IGNORE_USAGE + if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --initial" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +fi + +${CC} -c --coverage $COMPILE_OPTS unused.c +if [ 0 != $? ] ; then + echo "Error: unexpected error from gcc" + exit 1 +fi + +if [ "$NO_INITIAL_CAPTURE" != 1 ] ; then + # capture 'all' - which will pick up the unused file + $COVER $CAPTURE . $LCOV_OPTS --all -o all_initial.info $IGNORE_EMPTY $IGNORE_USAGE + if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --capture --all" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + # does the result contain file 'uused' + grep -E "SF:.+unused.c$" all_initial.info + if [ $? != 0 ] ; then + echo "Error: did not find 'unused'" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +fi + +./a.out 1 +if [ 0 != $? ] ; then + echo "Error: unexpected error return from a.out" + exit 1 +fi + +$COVER $CAPTURE . $LCOV_OPTS -o external.info $FILTER $IGNORE +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --capture" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS --list external.info $FILTER $IGNORE +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --list" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# how many files reported? +COUNT=`grep -c SF: external.info` +if [ $COUNT == '1' ] ; then + echo "expected at least 2 files in external.info - found $COUNT" + exit 1 +fi + +# callback tests +echo $COVER $CAPTURE . $LCOV_OPTS -o callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,90,--branch,65,--function,100 +$COVER $CAPTURE . $LCOV_OPTS -o callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,90,--branch,65,--function,100 2>&1 | tee callback_fail.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected criteria fail from lcov --capture - but not found" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -i 'failed coverage criteria' callback_fail.log +if [ 0 != $? ] ; then + echo "Error: didn't find expected criteria message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +echo $COVER $CAPTURE . $LCOV_OPTS -o callback2.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,20 +$COVER $CAPTURE . $LCOV_OPTS -o callback2.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,20 +if [ 0 != $? ] ; then + echo "Error: expected criteria pass from lcov --capture - but failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +echo $COVER $LCOV_TOOL $LCOV_OPTS -o aggregata.info -a callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,90,--branch,65,--function,100 +$COVER $LCOV_TOOL $LCOV_OPTS -o aggregata.info -a callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,90,--branch,65,--function,100 2>&1 | tee callback_fail2.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected criteria fail from lcov --aggregate - but not found" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -i 'failed coverage criteria' callback_fail2.log +if [ 0 != $? ] ; then + echo "Error: didn't find second expected criteria message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +$COVER $LCOV_TOOL $LCOV_OPTS -o aggregate2.info -a callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,20 +if [ 0 != $? ] ; then + echo "Error: expected criteria pass from lcov --aggregate - but failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# error check for typo in command line - "--branchy" +echo $COVER $CAPTURE . $LCOV_OPTS -o callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,90,--branchy,65,--function,100 +$COVER $CAPTURE . $LCOV_OPTS -o callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,90,--branchy,65,--function,100 2>&1 | tee callback_err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected criteria config fail from lcov --capture" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -i 'Error: unexpected option' callback_err.log +if [ 0 != $? ] ; then + echo "Error: didn't find expected criteria config message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +#bad value - not numeric +echo $COVER $CAPTURE . $LCOV_OPTS -o callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,90,--branch,x,--function,100 +$COVER $CAPTURE . $LCOV_OPTS -o callback.info $FILTER $IGNORE --criteria $SCRIPT_DIR/threshold.pm,--line,90,--branch,x,--function,100 2>&1 | tee callback_err2.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected another criteria config fail from lcov --capture" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -i 'unexpected branch threshold' callback_err2.log +if [ 0 != $? ] ; then + echo "Error: didn't find expected criteria config message 2" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# context callbacks... +echo $CAPTURE . $LCOV_OPTS --all -o context.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context $SCRIPT_DIR/context.pm +$COVER $CAPTURE . $LCOV_OPTS --all -o context.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context $SCRIPT_DIR/context.pm +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --capture --context" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep -F "\"user\":\"$USER\"" context.info.json +if [ 0 != $? ] ; then + echo "Error: did not find expected context field" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep user: context.info +if [ 0 == $? ] ; then + echo "Error: did not expect to find context field in info" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +echo $CAPTURE . $LCOV_OPTS --all -o context_comment.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context $SCRIPT_DIR/context.pm,--comment +$COVER $CAPTURE . $LCOV_OPTS --all -o context_comment.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context $SCRIPT_DIR/context.pm,--comment +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --capture --context" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep -F "\"user\":\"$USER\"" context.info.json +if [ 0 != $? ] ; then + echo "Error: did not find expected context field" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep "#user: $USER" context_comment.info +if [ 0 != $? ] ; then + echo "Error: did not find context data in comment field" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +# check error... +$COVER $LCOV_TOOL -d . $LCOV_OPTS --all -o err.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context $SCRIPT_DIR/context.pm --context tooManyArgs +if [ 0 == $? ] ; then + echo "Error: expected error lcov --capture --context ..." + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# call a context shellscript... +echo $CAPTURE . $LCOV_OPTS --all -o context2.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context ./testContext.sh +$COVER $CAPTURE . $LCOV_OPTS --all -o context2.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context ./testContext.sh +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --capture --context shellscript" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# call a context shellscript which fails... +echo $CAPTURE . $LCOV_OPTS --all -o context3.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context ./testContext.sh --context die +$COVER $CAPTURE . $LCOV_OPTS --all -o context3.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context ./testContext.sh --context die +if [ 0 == $? ] ; then + echo "Error: expected error code from lcov --capture --context shellscript" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +echo $CAPTURE . $LCOV_OPTS --all -o context4.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context ./testContext.sh --context arg --ignore callback +$COVER $CAPTURE . $LCOV_OPTS --all -o context4.info $IGNORE $IGNORE_EMPTY $IGNORE_USAGE --context ./testContext.sh --context arg --ignore callback +if [ 0 != $? ] ; then + echo "Error: unexpected error code: ignore not applied" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# applying EXCLUDE directive - so we can test both EXCLUDE and UNREACHABLE +# without changing the test much +#CAPTURE="$GENINFO_TOOL --rc lcov_excl_start=LCOV_EXCL_START_1 --rc lcov_excl_stop=LCOV_EXCL_STOP_1" + +$COVER $CAPTURE . $LCOV_OPTS --no-external -o internal.info --rc lcov_excl_start=LCOV_EXCL_START_1 --rc lcov_excl_stop=LCOV_EXCL_STOP_1 +if [ 0 != $? ] ; then + echo "Error: unexpected error from capture-internal" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# substitute PWD so the test isn't dependent on directory layout. +# quiet, to suppress core count and (empty) message summary +$COVER $LCOV_TOOL $LCOV_OPTS --list internal.info --subst "s#$PWD#.#" -q -q --filter function > list.dat + +if [ "$ENABLE_MCDC" == 1 ] ; then + diff list.dat list_mcdc.gold +else + # substitute the actual numbers - to become insensitive to compiler version + # which produce different numbers of coverpoints + sed -E 's/[1-9][0-9]*\b/N/g' list.dat > munged.dat + diff munged.dat list.gold +fi +if [ 0 != $? ] ; then + echo "Error: unexpected list difference" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +COUNT=`grep -c SF: internal.info` +if [ $COUNT != '1' ] ; then + echo "expected 1 file in internal.info - found $COUNT" + exit 1 +fi +INITIAL_COUNT=`grep -c BRDA internal.info` + +# capture again, using --all - should pick up 'unused.c' +$COVER $CAPTURE . $LCOV_OPTS --all -o all_internal.info --no-external $FILTER $IGNORE --rc lcov_excl_start=LCOV_EXCL_START_1 --rc lcov_excl_stop=LCOV_EXCL_STOP_1 +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --capture --all" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +if [ "$NO_INITIAL_CAPTURE" != 1 ] ; then + # does the result contain file 'uused' + grep -E "SF:.+unused.c$" all_internal.info + if [ $? != 0 ] ; then + echo "Error: did not find 'unused' 2" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + if [ "${VER[0]}" -gt 7 ] ; then + # should have found the branch in 'unused.c' + C=`grep -c BRDA: all_internal.info` + let DIFF=$C-$INITIAL_COUNT + if [ "$DIFF" != 2 ] ; then + echo "Error: unexpected branch count $C in 'unused' - expected $INITIAL_COUNT + 2" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + fi +fi + +# test some config file options + +# error message for missing env var in RC file +$COVER $LCOV_TOOL $IGNORE --capture -d . $LCOV_OPTS -o err1.info --config-file envVar.rc 2>&1 | tee err1.log +if [ ${PIPESTATUS[0]} == 0 ] ; then + echo "expected 'ERROR_USAGE' - did not find" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# skip ignore error +$COVER $LCOV_TOOL $IGNORE --capture -d . $LCOV_OPTS -o ignore1.info --config-file envVar.rc --ignore usage +if [ 0 != $? ] ; then + echo "expected to ignore 'ERROR_USAGE'" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +export ENV_IGNORE='empty' +# error message for missing env var in RC file +$COVER $LCOV_TOOL $IGNORE --capture -d . $LCOV_OPTS -o setVar.info --config-file envVar.rc +if [ 0 != $? ] ; then + echo "expected to set var from env - but didn't" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# error message for missing env var in RC file +$COVER $LCOV_TOOL $IGNORE --capture -d . $LCOV_OPTS -o err2.info --config-file envErr.rc 2>&1 | tee err2.log +if [ ${PIPESTATUS[0]} == 0 ] ; then + echo "expected missing value error - not found" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# ignore the error +$COVER $LCOV_TOOL $IGNORE --capture -d . $LCOV_OPTS -o ignore2.info --config-file envErr.rc --ignore format +if [ 0 != $? ] ; then + echo "expected to ignore error - but didn't" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +# use legacy RC 'geninfo_adjust_src_path option (had been a bug) +$COVER $CAPTURE . $LCOV_OPTS --no-external -o rcOptBug $PARALLEL $PROFILE --rc "geninfo_adjust_src_path='/tmp/foo => /build/bar'" --ignore unused 2>&1 | tee rcOptBug.log +if [ 0 != $? ] ; then + echo "Error: extract with RC option failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E "'substitute' pattern .+ is unused" rcOptBug.log +if [ 0 != $? ] ; then + echo "Error: missing RC pattern unused message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E "RC option 'geninfo_adjust_src_path' is deprecated" rcOptBug.log +if [ 0 != $? ] ; then + echo "Error: missing RC pattern unused message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +# workaround: depending on compiler version, we see a coverpoint on the +# close brace line (gcc/6 for example) or we don't (gcc/10 for example) +BRACE_LINE='^DA:36' +MARKER_LINES=`grep -v $BRACE_LINE internal.info | grep -c "^DA:"` + +# check 'no-markers': is the excluded line back? +$COVER $CAPTURE . $LCOV_OPTS --no-external -o nomarkers.info --no-markers +if [ $? != 0 ] ; then + echo "error return from extract no-markers" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +NOMARKER_LINES=`grep -v $BRACE_LINE nomarkers.info | grep -c "^DA:"` +NOMARKER_BRANCHES=`grep -c "^BRDA:" nomarkers.info` +if [ $NOMARKER_LINES != '13' ] ; then + echo "did not honor --no-markers expected 13 found $NOMARKER_LINES" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# check overlap detection for both exclude and unreachable attributes +for attrib in "excl" "unreachable" ; do + # override excl region start/stop and look for error + $COVER $CAPTURE . $LCOV_OPTS --no-external -o regionErr1.info --rc lcov_${attrib}_start=TEST_OVERLAP_START --rc lcov_${attrib}_stop=TEST_OVERLAP_END --msg-log + if [ $? == 0 ] ; then + echo "error expected overlap $attrib fail" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + grep -E 'overlapping exclude directives. Found TEST_OVERLAP_START at .+ but no matching TEST_OVERLAP_END for TEST_OVERLAP_START at line ' regionErr1.msg + if [ 0 != $? ] ; then + echo "error expected overlap message but didn't find" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + $COVER $CAPTURE . $LCOV_OPTS --no-external -o regionErr2.info --rc lcov_${attrib}_start=TEST_DANGLING_START --rc lcov_${attrib}_stop=TEST_DANGLING_END --msg-log + if [ $? == 0 ] ; then + echo "error expected dangling fail" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + grep -E 'unmatched TEST_DANGLING_START at line .+ saw EOF while looking for matching TEST_DANGLING_END' regionErr2.msg + if [ 0 != $? ] ; then + echo "error expected dangling message but didn't find" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + $COVER $CAPTURE . $LCOV_OPTS --no-external -o regionErr3.info --rc lcov_${attrib}_start=TEST_UNMATCHED_START --rc lcov_${attrib}_stop=TEST_UNMATCHED_END --msg-log + if [ $? == 0 ] ; then + echo "error expected unmatched fail" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + grep -E 'found TEST_UNMATCHED_END directive at line .+ without matching TEST_UNMATCHED_START' regionErr3.msg + if [ 0 != $? ] ; then + echo "error expected unmapted message but didn't find" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + # override excl_line start/stop - and make sure we didn't match + $COVER $CAPTURE . $LCOV_OPTS --no-external -o ${attrib}.info --rc lcov_${attrib}_start=nomatch_start --rc lcov_${attrib}_stop=nomatch_end + if [ $? != 0 ] ; then + echo "error return from marker override" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + EXCL_LINES=`grep -v $BRACE_LINE ${attrib}.info | grep -c "^DA:"` + if [ $EXCL_LINES != $NOMARKER_LINES ] ; then + echo "did not honor marker override: expected $NOMARKER_LINES found $EXCL_LINES" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +done + +# override excl_br line start/stop - and make sure we match match +$COVER $CAPTURE . $LCOV_OPTS --no-external -o exclbr.info --rc lcov_excl_br_start=TEST_BRANCH_START --rc lcov_excl_br_stop=TEST_BRANCH_STOP +if [ $? != 0 ] ; then + echo "error return from branch marker override" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +EXCL_BRANCHES=`grep -c "^BRDA:" exclbr.info` + +if [ $EXCL_BRANCHES -ge $NOMARKER_BRANCHES ] ; then + echo "did not honor br marker override: expected $NOMARKER_BRANCHES to be larger than $EXCL_BRANCHES" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# override excl_br line start/stop - and make sure we match match +$COVER $CAPTURE . $LCOV_OPTS --no-external -o exclbrline.info --rc lcov_excl_br_line=TEST_BRANCH_LINE +if [ $? != 0 ] ; then + echo "error return from branch line marker override" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +EXCL_LINE_BRANCHES=`grep -c "^BRDA:" exclbrline.info` + +if [ $EXCL_LINE_BRANCHES != $EXCL_BRANCHES ] ; then + echo "did not honor br line marker override: expected $EXCL_BRANCHES found $EXCL_LINE_BRANCHES" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# check to see if "--omit-lines" works properly... +$COVER $CAPTURE . $LCOV_OPTS --no-external --omit-lines '\s+std::string str.+' -o omit.info --rc lcov_excl_start=LCOV_EXCL_START_1 --rc lcov_excl_stop=LCOV_EXCL_STOP_1 2>&1 | tee omitLines.log + +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error code from lcov --omit" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +BRACE_LINE="DA:36" +# a bit of a hack: gcc/10 doesn't put a DA entry on the closing brace +COUNT=`grep -v $BRACE_LINE omit.info | grep -c ^DA:` +if [ $COUNT != '11' ] ; then + echo "expected 11 DA entries in 'omit.info' - found $COUNT" + exit 1 +fi + +# check to see if "--omit-lines" works fails if no match +$COVER $CAPTURE . $LCOV_OPTS --no-external --omit-lines 'xyz\s+std::string str.+' -o omitErr.info + +if [ 0 == $? ] ; then + echo "Error: did not see expected error code from lcov --omit" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE . $LCOV_OPTS --no-external --omit-lines 'xyz\s+std::string str.+' -o omitWarn.info --ignore unused --rc lcov_excl_start=LCOV_EXCL_START_1 --rc lcov_excl_stop=LCOV_EXCL_STOP_1 + +if [ 0 != $? ] ; then + echo "Error: unexpected expected error code from lcov --omit --ignore.." + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +COUNT=`grep -v $BRACE_LINE omitWarn.info | grep -c ^DA:` +if [ $COUNT != '12' ] ; then + echo "expected 12 DA entries in 'omitWarn.info' - found $COUNT" + exit 1 +fi + +# try again, with rc file instead +echo "omit_lines = ^std::string str.+\$" > testRC # no space at start ofline +echo "omit_lines = ^\\s+std::string str.+\$" >> testRC +#should fail due to no match... +$COVER $CAPTURE . $LCOV_OPTS --no-external --config-file testRC -o rc_omitErr.info + +if [ 0 == $? ] ; then + echo "Error: did not see expected error code from lcov --config with bad omit" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +echo "ignore_errors = unused" >> testRC +echo "ignore_errors = empty" >> testRC + +$COVER $CAPTURE . $LCOV_OPTS --no-external --config-file testRC -o rc_omitWarn.info --rc lcov_excl_start=LCOV_EXCL_START_1 --rc lcov_excl_stop=LCOV_EXCL_STOP_1 + +if [ 0 != $? ] ; then + echo "Error: saw unexpected error code from lcov --config with ignored bad omit" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +COUNT=`grep -v $BRACE_LINE rc_omitWarn.info | grep -c ^DA:` +if [ $COUNT != '11' ] ; then + echo "expected 11 DA entries in 'rc_omitWarn.info' - found $COUNT" + exit 1 +fi + +# test with checksum.. +$COVER $CAPTURE . $LCOV_OPTS --no-external -o checksum.info --checksum +if [ $? != 0 ] ; then + echo "capture with checksum failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +# read file with matching checksum... +$COVER $LCOV_TOOL $LCOV_OPTS --summary checksum.info --checksum +if [ $? != 0 ] ; then + echo "summary with checksum failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +#munge the checksum in the output file +perl -i -pe 's/DA:6,1.+/DA:6,1,abcde/g' < checksum.info > mismatch.info +$COVER $LCOV_TOOL $LCOV_OPTS --summary mismatch.info --checksum +if [ $? == 0 ] ; then + echo "summary with mismatched checksum expected to fail" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +perl -i -pe 's/DA:6,1.+/DA:6,1/g' < checksum.info > missing.info +$COVER $LCOV_TOOL $LCOV_OPTS --summary missing.info --checksum +if [ $? == 0 ] ; then + echo "summary with missing checksum expected to fail" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# some tests for 'unreachable' implementation +$COVER $CAPTURE . $LCOV_OPTS --no-external -o unreachable.info --rc lcov_unreachable_start=LCOV_EXCL_START_1 --rc lcov_unreachable_stop=LCOV_EXCL_STOP_1 2>&1 | tee unreachableErr1.txt +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error from capture-internal" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep -E "\(unreachable\) .+ BRDA record in 'unreachable' region has non-zero hit count" unreachableErr1.txt +if [ 0 != $? ] ; then + echo "Error: didn't find expected unreachable DA record" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# exclude branch coverage and run again - to get to unreachable line error +NOBRANCH_OPT=${LCOV_OPTS/--branch-coverage} +$COVER $CAPTURE . $NOBRANCH_OPT --no-external -o unreachable.info --rc lcov_unreachable_start=LCOV_EXCL_START_1 --rc lcov_unreachable_stop=LCOV_EXCL_STOP_1 2>&1 | tee unreachableErr2.txt +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error from capture-internal" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep -E "\(unreachable\) .+ 'unreachable' line has non-zero hit count" unreachableErr2.txt +if [ 0 != $? ] ; then + echo "Error: didn't find expected unreachable DA record" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE . $LCOV_OPTS --no-external -o unreachable.info --rc lcov_unreachable_start=LCOV_EXCL_START_1 --rc lcov_unreachable_stop=LCOV_EXCL_STOP_1 --ignore unreachable 2>&1 | tee unreachableWarn1.txt +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error from capture-internal" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +COUNT=`grep -c -E "\(unreachable\) .+ 'unreachable' .+ has non-zero hit count" unreachableWarn1.txt` +if [ $COUNT != 2 ] ; then + echo "Error: didn't find expected 'unreachable warnings" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + +fi + +$COVER $CAPTURE . $LCOV_OPTS --no-external -o exclLine.info --rc lcov_excl_line=TEST_UNREACHABLE_LINE +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error from exclude line" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep DA:30 exclLine.info +if [ 0 == $? ] ; then + echo "Error: line exclusion didn't exclude" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +$COVER $CAPTURE . $LCOV_OPTS --no-external -o unreachLine.info --rc lcov_unreachable_line=TEST_UNREACHABLE_LINE --ignore unreachable +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error from unreachable_line" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep DA:30 unreachLine.info +if [ 0 != $? ] ; then + echo "Error: unreached line dropped by default" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -a unreachLine.info --rc lcov_unreachable_line=TEST_UNREACHABLE_LINE --filter region --ignore unreachable --rc retain_unreachable_coverpoints_if_executed=0 -o removeUnreach.info +if [ 0 != $? ] ; then + echo "Error: lcov unreached failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep DA:30 removeUnreach.info +if [ 0 == $? ] ; then + echo "Error: unreached line not removed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE . $LCOV_OPTS --no-external -o unreachable.info --rc lcov_unreachable_line=TEST_UNREACH_FUNCTION 2>&1 | tee unreachableErr3.txt +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error from unreach function" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep -E '\(unreachable\) .+ function main is executed but was marked unreachable' unreachableErr3.txt +if [ 0 != $? ] ; then + echo "Error: didn't find expected unreachable function record" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E "\(unreachable\) .+ 'unreachable' line has non-zero hit count" unreachableErr3.txt +if [ 0 == $? ] ; then + echo "Error: found unexpected unreachable DA record (should have stopped at function)" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -a unreachLine.info --rc lcov_unreachable_line=TEST_UNREACH_FUNCTION --filter region --ignore unreachable --rc retain_unreachable_coverpoints_if_executed=0 -o removeUnreachFunc.info 2>&1 | tee unreachFunc.txt +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: lcov unreached failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E '\(unreachable\) .+ function main is executed but was marked unreachable' unreachFunc.txt +if [ 0 != $? ] ; then + echo "Error: expected unreached function message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E "\(unreachable\) .+ 'unreachable' line has non-zero hit count" unreachFunc.txt +if [ 0 != $? ] ; then + echo "Error: didn't find expected unreachable DA warning" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep FNA:0,1,main removeUnreachFunc.info +if [ 0 == $? ] ; then + echo "Error: expected function record to be removed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# check case when build dir and GCOV_PREFIX directory are not the same - +# so .gcno and .gcda files are in different places +export DEPTH=0 +BASE=`pwd` +while [ $BASE != '/' ] ; do + echo $BASE + BASE=`dirname $BASE` + let DEPTH=$DEPTH+1 +done +echo "found depth $DEPTH" +let STRIP=$DEPTH+2 + +mkdir -p separate/build +mkdir -p separate/run +mkdir -p separate/copy +( cd separate/build ; ${CXX} -std=c++1y $COMPILE_OPTS ../../extract.cpp ) +cp separate/build/*.gcno separate/copy +# make unwritable - so we don't allow lcov to write temporaries +# this emulates what happens when the build job is owned by one user, +# the test job by another, and a third person is trying to create coverage reports +chmod ugo-w separate/build +chmod ugo-w separate/copy +if [ 0 != $? ] ; then + echo "Error: no .gcno files to copy" + exit 1 +fi + +( cd separate/run ; GCOV_PREFIX=my/test GCOV_PREFIX_STRIP=$STRIP ../build/a.out 1 ) +if [ 0 != $? ] ; then + echo "Error: execution failed" + exit 1 +fi +mkdir separate/run/my/test/no_read +chmod ugo-w separate/run +$COVER $CAPTURE separate/run/my/test $LCOV_OPTS --build-directory separate/build -o separate.info $FILTER $IGNORE +if [ 0 != $? ] ; then + echo "Error: extract failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +$COVER $CAPTURE separate/run/my/test $LCOV_OPTS --build-directory separate/copy -o copy.info $FILTER $IGNORE +if [ 0 != $? ] ; then + echo "Error: extract from copy failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# use --resolve-script instead - simply echo the right value of the gcno file +$COVER $CAPTURE separate/run/my/test $LCOV_OPTS --resolve-script ./fakeResolve.sh --resolve-script separate/copy/*extract.gcno -o resolve.info $FILTER $IGNORE +if [ 0 != $? ] ; then + echo "Error: extract with resolve-script failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# captured data from GCOV_PREFIX result should be identical to vanilla build +for d in separate.info copy.info resolve.info ; do + diff external.info $d + if [ $? != 0 ] ; then + echo "Error: unexpected GCOV_PREFIX result '$d'" + exit 1 + fi +done + + +# trigger an error from an unreadable directory.. +chmod ugo-rx separate/run/my/test/no_read +$COVER $CAPTURE separate/run/my/test $LCOV_OPTS --build-directory separate/copy -o unreadable.info $FILTER $IGNORE 2>&1 | tee err.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected fail from unreadable dir" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +grep "error in 'find" err.log +if [ 0 != $? ] ; then + echo "expected error not found" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE separate/run/my/test $LCOV_OPTS --build-directory separate/copy -o unreadable.info $FILTER $IGNORE --ignore utility 2>&1 | tee warn.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: extract from unreadable failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep "error in 'find" warn.log +if [ 0 != $? ] ; then + echo "expected warning not found" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +chmod -R ug+rxw separate + +# try filtering missing files +sed -e s/extract.cpp/notfound.cpp/ external.info > missing_file.info +$COVER $LCOV_TOOL $LCOV_OPTS -o removeMissing.info -a missing_file.info --filter missing $DERIVE_END +if [ 0 != $? ] ; then + echo "filter missing failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E 'SF:.*notfound.cpp' removeMissingb.info +if [ 0 == $? ] ; then + echo "expected to remove missing file" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -o removeMissing_cb.info -a missing_file.info --filter missing --resolve-script brokenCallback.pm,live,missing $DERIVE_END +if [ 0 != $? ] ; then + echo "filter missing callback failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E 'SF:.*notfound.cpp' removeMissing_cb.info +if [ 0 == $? ] ; then + echo "expected to remove missing file" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -o removeMissing_cb2.info -a missing_file.info --filter missing --resolve-script brokenCallback.pm,live,present --ignore source $DERIVE_END +if [ 0 != $? ] ; then + echo "filter missing callback failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E 'SF:.*notfound.cpp' removeMissing_cb2.info +if [ 0 != $? ] ; then + echo "expected to keep file" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -o removeMissing_cb3.info -a missing_file.info --filter missing --resolve-script brokenCallback.pm,die --ignore callback $DERIVE_END 2>&1 | tee removeMissing.log +if [ ${PIPESTATUS[0]} != $? ] ; then + echo "filter missing callback failed" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E 'SF:.*notfound.cpp' removeMissing_cb3.info +if [ 0 == $? ] ; then + echo "expected to remove file" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E 'resolve.*failed' removeMissing.log +if [ 0 != $? ] ; then + echo "expected to find messages" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# try to produce some errors that were hit by user :-( +mkdir -p errs +rm -f errs/* +( cd errs ; ln -s ../*extract.gcda ; ln -s ../missing.gcno *extract.gcno ) +$COVER $CAPTURE errs $LCOV_OPTS -o err1.info $FILTER $IGNORE --msg-log +if [ 0 == $? ] ; then + echo "Error: expected error code from lcov --capture" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep ERROR: err1.msg +if [ 0 != $? ] ; then + echo "Error: expected error message not foune" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE errs $LCOV_OPTS -o err2.info $FILTER $IGNORE --initial --msg-log +if [ 0 == $? ] ; then + echo "Error: expected error code from lcov --capture --initial" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep ERROR: err2.msg +if [ 0 != $? ] ; then + echo "Error: expected error message 2 not foune" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE errs $LCOV_OPTS -o err3.info $FILTER $IGNORE --initial --ignore path --msg-log err.3.msg +if [ 0 == $? ] ; then + echo "Error: expected error code from lcov --capture --initial --ignore" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep ERROR: err.3.msg +if [ 0 != $? ] ; then + echo "Error: expected error message 3 not foune" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE errs $LCOV_OPTS -o err4.info $FILTER $IGNORE --initial --keep-going --msg-log +if [ 0 == $? ] ; then + echo "Error: expected error code from lcov --capture --initial --keep-going" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# test filename containing spaces +rm -rf ./mytest +mkdir -pv ./mytest +echo "int main(){}" > './mytest/main space.cpp' +( cd ./mytest ; ${CXX} -c 'main space.cpp' --coverage ) + +if [ 1 != "$NO_INITIAL_CAPTURE" ] ; then + $COVER $CAPTURE mytest -i -o spaces.info + if [ 0 != $? ] ; then + echo "Error: unexpected error from filename containing space" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + $COVER $LCOV_TOOL --list spaces.info + if [ 0 != $? ] ; then + echo "Error: unable to list filename containing space" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + $COVER $GENHTML_TOOL -o spaces spaces.info + if [ 0 != $? ] ; then + echo "Error: unable to generate HTML for filename containing space" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/extract/fakeResolve.sh b/tests/lcov/extract/fakeResolve.sh new file mode 100755 index 00000000..31337785 --- /dev/null +++ b/tests/lcov/extract/fakeResolve.sh @@ -0,0 +1,2 @@ +#!/bin/sh +echo $1 diff --git a/tests/lcov/extract/list.gold b/tests/lcov/extract/list.gold new file mode 100644 index 00000000..4437f2af --- /dev/null +++ b/tests/lcov/extract/list.gold @@ -0,0 +1,7 @@ + |Lines |Functions |Branches +Filename |Rate Num|Rate Num|Rate Num +====================================================== +[./] +extract.cpp | N% N| N% N|N.0% N +====================================================== + Total:| N% N| N% N|N.0% N diff --git a/tests/lcov/extract/list_mcdc.gold b/tests/lcov/extract/list_mcdc.gold new file mode 100644 index 00000000..10aba46b --- /dev/null +++ b/tests/lcov/extract/list_mcdc.gold @@ -0,0 +1,7 @@ + |Lines |Functions |Branches |MC/DC +Filename |Rate Num|Rate Num|Rate Num|Rate Num +=================================================================== +[./] +extract.cpp | 100% 13| 100% 1|50.0% 16|50.0% 6 +=================================================================== + Total:| 100% 13| 100% 1|50.0% 16|50.0% 6 diff --git a/tests/lcov/extract/testContext.sh b/tests/lcov/extract/testContext.sh new file mode 100755 index 00000000..9de72fc7 --- /dev/null +++ b/tests/lcov/extract/testContext.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +if [ 'die' = "$1" ] ; then + echo "dying" + exit 1 +fi + +echo USERNAME `whoami` +echo MULTILINE line1 +echo MULTILINE line2 +echo MULTILINE line3 +exit 0 diff --git a/tests/lcov/extract/unused.c b/tests/lcov/extract/unused.c new file mode 100644 index 00000000..b4262b7f --- /dev/null +++ b/tests/lcov/extract/unused.c @@ -0,0 +1,9 @@ +/* this code is not linked into the exe/not called. + Testing 'initial' capture and combined capture */ +#include + +void f(int y) +{ + if (y) + printf("y == %d\n", y); +} diff --git a/tests/lcov/follow/Makefile b/tests/lcov/follow/Makefile new file mode 100644 index 00000000..a41b5a44 --- /dev/null +++ b/tests/lcov/follow/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := follow.sh + +clean: + $(shell ./follow.sh --clean) diff --git a/tests/lcov/follow/follow.sh b/tests/lcov/follow/follow.sh new file mode 100755 index 00000000..b3f0a8e2 --- /dev/null +++ b/tests/lcov/follow/follow.sh @@ -0,0 +1,127 @@ +#!/bin/bash +set +x + +source ../../common.tst + +rm -rf rundir *.info + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +LCOV_OPTS="$PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" +fi +NO_INITIAL_CAPTURE=0 +if [[ "${VER[0]}" -gt 4 && "${VER[0]}" -lt 7 ]] ; then + # no data generated by initial capture + IGNORE_EMPTY="--ignore empty" + NO_INITIAL_CAPTURE=1 +fi + +if [ 1 == $NO_INITIAL_CAPTURE ] ; then + # all test test use --initial + echo 'all tests skipped' + exit 0 +fi + + +mkdir -p rundir +cd rundir + +rm -Rf src src2 + +mkdir src +ln -s src src2 + +echo 'int a (int x) { return x + 1; }' > src/a.c +echo 'int b (int x) { return x + 2; }' > src/b.c + +${CC} -c --coverage src/a.c -o src/a.o +${CC} -c --coverage src2/b.c -o src/b.o + +$COVER $LCOV_TOOL -o out2.info --capture --initial --no-external -d src --follow --rc geninfo_follow_file_links=1 +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --initial --follow" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +COUNT2=`grep -c SF: out2.info` +if [ 2 != $COUNT2 ] ; then + echo "Error: expected COUNT==2, found $COUNT2" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $GENINFO_TOOL -o out3.info --initial --no-external src --follow --rc geninfo_follow_file_links=1 +if [ 0 != $? ] ; then + echo "Error: unexpected error code from geninfo --initial --follow" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +diff out3.info out2.info +if [ 0 != $? ] ; then + echo "Error: expected identical geninfo output" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $GENINFO_TOOL -o out4.info --initial --no-external src2 --follow +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov src2" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +diff out4.info out2.info +# should not be identical as the 'src2/b.c' path should be in 'out4.info' +if [ 0 == $? ] ; then + echo "Error: expected not identical geninfo output" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +cat out4.info | sed -e s/src2/src/g > out5.info +diff out5.info out2.info +# should be identical now +if [ 0 != $? ] ; then + echo "Error: expected identical geninfo output after substitution" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +cd .. +$COVER $GENINFO_TOOL -o top.info --initial --no-external rundir --follow +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov src2" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +diff top.info rundir/out4.info +# should be identical now +if [ 0 != $? ] ; then + echo "Error: expected identical geninfo output after substitution" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/format/Makefile b/tests/lcov/format/Makefile new file mode 100644 index 00000000..7ebfbb52 --- /dev/null +++ b/tests/lcov/format/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := format.sh + +clean: + $(shell ./format.sh --clean) diff --git a/tests/lcov/format/format.info b/tests/lcov/format/format.info new file mode 100644 index 00000000..67e77862 --- /dev/null +++ b/tests/lcov/format/format.info @@ -0,0 +1,43 @@ +TN: +SF:a.cpp +DA:1,1 +#common line: my count is zero and yours is nonzero +DA:2,1 +DA:3,1 +DA:4,-3 +DA:10,1.a0e+19 +DA:11,0 +DA:12,1.0e+19 +LF:4 +LH:3 +FN:1,2,fcn +FN:1,2,alias +FN:1,2,alias2 +FN:1,2,alias3 +FN:3,3,noCommonAlias +FN:11,11,onlyA +FNF:4 +FNH:3 +# my count is zero yours is nonzero +FNDA:0,fcn +FNDA:-2,alias +FNDA:1.5eb+20,alias2 +FNDA:1.5e+20,alias3 +FNDA:-0,onlyA +FNDA:1,noCommonAlias + +BRDA:1,1,0,1 +BRDA:1,1,1,-1 +BRDA:1,1,2,- +BRDA:1,2,0,1.67+20 +BRDA:1,2,1,1.67e+20 +# common branch expr count zero in my, nonzero in you +BRDA:1,2,1,0 + +# branch in A only +BRDA:11,0,0,0 +BRDA:11,0,1,-0 + +BRF:7 +BRH:4 +end_of_record diff --git a/tests/lcov/format/format.sh b/tests/lcov/format/format.sh new file mode 100755 index 00000000..960aa429 --- /dev/null +++ b/tests/lcov/format/format.sh @@ -0,0 +1,167 @@ +#!/bin/bash +set +x + +# test various errors in .info data + +source ../../common.tst + +LCOV_OPTS="--branch $PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" + # and filter exception branches to avoid spurious differences for old compiler + FILTER='--filter branch' +fi + +rm -rf *.gcda *.gcno a.out out.info out2.info *.txt* *.json dumper* testRC *.gcov *.gcov.* *.log + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +$COVER $LCOV_TOOL $LCOV_OPTS --summary format.info 2>&1 | tee err1.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected error from lcov --summary but didn't see it" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +ERRS=`grep -c 'ERROR: (negative)' err1.log` +if [ "$ERRS" != 1 ] ; then + echo "didn't see expected 'negative' error" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS --summary format.info --ignore negative 2>&1 | tee err2.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected error from lcov --summary negative but didn't see it" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +ERRS=`grep -c 'ERROR: (format)' err2.log` +if [ "$ERRS" != 1 ] ; then + echo "didn't see expected 'format' error" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -o out.info -a format.info --ignore format,negative 2>&1 | tee warn.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error from lcov -add" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +for type in format negative ; do + COUNT=`grep -c "WARNING: ($type)" warn.log` + if [ "$COUNT" != 3 ] ; then + echo "didn't see expected '$type' warnings: $COUNT" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + # and look for the summary count: + grep "$type: 3" warn.log + if [ 0 != $? ] ; then + echo "didn't see Type summary count" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +done + + +# the file we wrote should be clean +$COVER $LCOV_TOOL $LCOV_OPTS --summary out.info +if [ 0 != $? ] ; then + echo "Error: unexpected error from lcov --summary" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +rm -f out2.info +# test excessive count messages +$COVER $LCOV_TOOL $LCOV_OPTS -o out2.info -a format.info --ignore format,format,negative,negative --rc excessive_count_threshold=1000000 2>&1 | tee excessive.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected excessive hit count message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep "ERROR: (excessive) Unexpected excessive hit count" excessive.log +if [ 0 != $? ] ; then + echo "Error: expected excessive hit count message but didn't find it" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +if [ -e out2.info ] ; then + echo "Error: expected error to terminate processing - but out2.info generated" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# check that --keep-going works as expected +$COVER $LCOV_TOOL $LCOV_OPTS -o out2.info -a format.info --ignore format,format,negative,negative --rc excessive_count_threshold=1000000 --keep-going 2>&1 | tee keepGoing.log +if [ 0 == ${PIPESTATUS[0]} ] ; then + echo "Error: expected excessive hit count message" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep "ERROR: (excessive) Unexpected excessive hit count" excessive.log +if [ 0 != $? ] ; then + echo "Error: expected excessive hit count message but didn't find it" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +if [ ! -e out2.info ] ; then + echo "Error: expected --keep-going to continue execution - but out2.info not found" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +diff out.info out2.info +if [ 0 != $? ] ; then + echo "Error: mismatched output generated" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -o out.info -a format.info --ignore format,format,negative,negative,excessive --rc excessive_count_threshold=1000000 2>&1 | tee warnExcessive.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: expected to warn" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +COUNT=`grep -c -E 'WARNING: \(excessive\) Unexpected excessive .+ count' warnExcessive.log` +if [ $COUNT -lt 3 ] ; then + echo "Error: unexpectedly found only $COUNT messages" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/gcov-tool/Makefile b/tests/lcov/gcov-tool/Makefile new file mode 100644 index 00000000..dc7d19bf --- /dev/null +++ b/tests/lcov/gcov-tool/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := path.sh + +clean: + rm -f test *.info *.gcda *.gcno diff --git a/tests/lcov/gcov-tool/mygcov.sh b/tests/lcov/gcov-tool/mygcov.sh new file mode 100755 index 00000000..e8445fdb --- /dev/null +++ b/tests/lcov/gcov-tool/mygcov.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +exec gcov "$@" diff --git a/tests/lcov/gcov-tool/path.sh b/tests/lcov/gcov-tool/path.sh new file mode 100755 index 00000000..53123f4d --- /dev/null +++ b/tests/lcov/gcov-tool/path.sh @@ -0,0 +1,148 @@ +#!/bin/bash +# +# Check if --gcov-tool works with relative path specifications. +# + +export CC="${CC:-gcc}" + +TOOLS=( "$CC" "gcov" ) + +function check_tools() { + local tool + + for tool in "${TOOLS[@]}" ; do + if ! type -P "$tool" >/dev/null ; then + echo "Error: Missing tool '$tool'" + exit 2 + fi + done +} + +set +x + +source ../../common.tst + +rm -f test *.gcno *.gcda + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +check_tools + + +echo "Build test program" +"$CC" test.c -o test --coverage +if [ 0 != $? ] ; then + echo "compile failed" + exit 1 +fi + +echo "Run test program" +./test +if [ 0 != $? ] ; then + echo "test execution failed" + exit 1 +fi + +status=0 +for TOOL in "$LCOV_TOOL --capture -d" "$GENINFO_TOOL" ; do + + : "-----------------------------" + : "No gcov-tool option" + : "-----------------------------" + $COVER $TOOL . -o test.info --verbose + if [ 0 != $? ] ; then + echo "failed vanilla" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit $status + fi + fi + + : "-----------------------------" + : "gcov-tool option without path" + : "-----------------------------" + $COVER $TOOL . -o test.info --verbose --gcov-tool "gcov" + if [ 0 != $? ] ; then + echo "failed gcov" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit $status + fi + fi + + : "-----------------------------" + : "gcov-tool option with absolute path" + : "-----------------------------" + $COVER $TOOL . -o test.info --verbose --gcov-tool "$PWD/mygcov.sh" + if [ 0 != $? ] ; then + echo "failed script" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit $status + fi + fi + + : "-----------------------------" + : "gcov-tool option with relative path" + : "-----------------------------" + $COVER $TOOL . -o test.info --verbose --gcov-tool "./mygcov.sh" + if [ 0 != $? ] ; then + echo "failed relative script" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit $status + fi + fi + + : "-----------------------------" + : "gcov-tool option specifying nonexistent tool without path" + : "-----------------------------" + $COVER $TOOL . -o test.info --verbose --gcov-tool gcov.nonexistent + if [ 0 == $? ] ; then + echo "missing tool: should have failed" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit $status + fi + fi + + : "-----------------------------" + : "gcov-tool option specifying nonexistent tool with absolute path" + : "-----------------------------" + $COVER $TOOL . -o test.info --verbose --gcov-tool "/gcov.nonexistent" + if [ 0 == $? ] ; then + echo "should have failed absolute path" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit $status + fi + fi + + : "-----------------------------" + : "gcov-tool option specifying nonexistent tool with relative path" + : "-----------------------------" + $COVER $TOOL . -o test.info --verbose --gcov-tool "./gcov.nonexistent" + if [ 0 == $? ] ; then + echo "should have failed relative nonexistent" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit $status + fi + fi +done + +if [ 0 == $status ] ; then + echo "Tests passed" +else + echo "Tests failed" +fi + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi + +exit $status diff --git a/tests/lcov/gcov-tool/test.c b/tests/lcov/gcov-tool/test.c new file mode 100644 index 00000000..4a7c3ee3 --- /dev/null +++ b/tests/lcov/gcov-tool/test.c @@ -0,0 +1,4 @@ +int main(int argc, char *argv[]) +{ + return 0; +} diff --git a/tests/lcov/initializer/Makefile b/tests/lcov/initializer/Makefile new file mode 100644 index 00000000..c2c7baac --- /dev/null +++ b/tests/lcov/initializer/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := initializer.sh + +clean: + $(shell ./initializer.sh --clean) diff --git a/tests/lcov/initializer/initializer.cpp b/tests/lcov/initializer/initializer.cpp new file mode 100644 index 00000000..6de4a60b --- /dev/null +++ b/tests/lcov/initializer/initializer.cpp @@ -0,0 +1,17 @@ +#include +#include +#include + +int main() +{ + const std::unordered_map quotes{ + { "a", 0.011}, + { "b", 0.022}, + { "c", 0.033} + }; + + for (const auto& [s, v] : quotes) + std::cout << " >> " << s << ": " << v << "\n"; + + return 0; +} diff --git a/tests/lcov/initializer/initializer.sh b/tests/lcov/initializer/initializer.sh new file mode 100755 index 00000000..88096cae --- /dev/null +++ b/tests/lcov/initializer/initializer.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +# lambda function extents +set +x + +source ../../common.tst + +rm -rf *.log *.json report initializer *.gcda *.gcno *.info + +clean_cover + +if [[ 1 == "$CLEAN_ONLY" ]] ; then + exit 0 +fi + +LCOV_OPTS="--branch $PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" + # and filter exception branches to avoid spurious differences for old compiler + FILTER='--filter branch' + + # gcc older than 5 doesn't support lambda + echo "Compiler version is too old - skipping lambda test" + exit 0 +fi + +if [[ "${VER[0]}" -lt 8 ]] ; then + # c++17 not supported + echo "no c++ support" + exit 0 +fi + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +#use geninfo for capture - so we can collect coverage info +CAPTURE=$GENINFO_TOOL +#CAPTURE="$LCOV_TOOL --capture --directory" + +${CXX} -o initializer --coverage initializer.cpp -std=c++17 + +./initializer +if [ 0 != $? ] ; then + echo "Error: 'initializer' returned error code" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $CAPTURE . $LCOV_OPTS -o initializer.info --demangle --rc derive_function_end_line=0 --filter line,branch --include '*/initializer.cpp' +if [ 0 != $? ] ; then + echo "Error: unexpected error code from capture" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +COUNT=`grep -c "^DA:" initializer.info` + +$COVER $CAPTURE . $LCOV_OPTS -o filtered.info --demangle --rc derive_function_end_line=0 --filter line,branch,initializer --include '*/initializer.cpp' 2>&1 | tee filt.log +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "Error: unexpected error code from capture2" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# did the non-filtered capture command find linecov entried on line 8, 9, or 10? +grep -E '^DA:\(8|9|10\)' initializer.info +if [ $? == 0 ] ; then + COUNT2=`grep -c "^DA:" filtered.info` + if [ "$COUNT" -le $COUNT2 ] ; then + echo "ERROR: expected to filter out 3 initializer-list lines" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + DIFF=`expr $COUNT - $COUNT2` + if [ "$DIFF" != 3 ] ; then + echo "ERROR: expected to filter out 3 initializer-list lines" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +else + echo "no linecov points on std::initializer lines" +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/lambda/Makefile b/tests/lcov/lambda/Makefile new file mode 100644 index 00000000..24f5514d --- /dev/null +++ b/tests/lcov/lambda/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := lambda.sh + +clean: + $(shell ./lambda.sh --clean) diff --git a/tests/lcov/lambda/lambda.dat b/tests/lcov/lambda/lambda.dat new file mode 100644 index 00000000..ef42c34e --- /dev/null +++ b/tests/lcov/lambda/lambda.dat @@ -0,0 +1,2655 @@ +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/Jacoco2LCOV.java +FN:51,com.mediatek.jacoco2lcov.Jacoco2LCOV.() +FN:63,com.mediatek.jacoco2lcov.Jacoco2LCOV.main([Ljava/lang/String;) +FN:78,com.mediatek.jacoco2lcov.Jacoco2LCOV.([Ljava/lang/String;) +FN:116,com.mediatek.jacoco2lcov.Jacoco2LCOV.showUsage() +FN:147,com.mediatek.jacoco2lcov.Jacoco2LCOV.parseArgs([Ljava/lang/String;)I +FN:213,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$parseArgs$0(Lcom/mediatek/jacoco2lcov/Plugin;) +FN:226,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$parseArgs$1(Ljava/lang/String;) +FN:248,com.mediatek.jacoco2lcov.Jacoco2LCOV.isValid(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:337,com.mediatek.jacoco2lcov.Jacoco2LCOV.getAbsolutePath(Ljava/lang/String;)Ljava/lang/String; +FN:347,com.mediatek.jacoco2lcov.Jacoco2LCOV.getRelativePath(Ljava/lang/String;)Ljava/lang/String; +FN:362,com.mediatek.jacoco2lcov.Jacoco2LCOV.run(I) +FN:372,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$2(Ljava/lang/String;) +FN:375,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$3(Ljava/lang/String;) +FN:390,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$4(Ljava/lang/String;) +FN:395,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$5(Ljava/lang/String;) +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.() +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.main([Ljava/lang/String;) +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.([Ljava/lang/String;) +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.showUsage() +FNDA:2,com.mediatek.jacoco2lcov.Jacoco2LCOV.parseArgs([Ljava/lang/String;)I +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$parseArgs$0(Lcom/mediatek/jacoco2lcov/Plugin;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$parseArgs$1(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.isValid(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.getAbsolutePath(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.getRelativePath(Ljava/lang/String;)Ljava/lang/String; +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.run(I) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$2(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$3(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$4(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$5(Ljava/lang/String;) +FNF:15 +FNH:6 +BRDA:68,0,0,1 +BRDA:68,0,1,0 +BRDA:151,0,0,1 +BRDA:151,0,1,1 +BRDA:151,0,2,1 +BRDA:151,0,3,1 +BRDA:151,0,4,0 +BRDA:151,0,5,0 +BRDA:154,0,0,1 +BRDA:154,0,1,1 +BRDA:154,0,2,0 +BRDA:154,0,3,0 +BRDA:158,0,0,0 +BRDA:158,0,1,0 +BRDA:158,0,2,0 +BRDA:158,0,3,0 +BRDA:162,0,0,0 +BRDA:162,0,1,0 +BRDA:162,0,2,0 +BRDA:162,0,3,0 +BRDA:166,0,0,0 +BRDA:166,0,1,0 +BRDA:166,0,2,0 +BRDA:166,0,3,0 +BRDA:170,0,0,0 +BRDA:170,0,1,0 +BRDA:170,0,2,0 +BRDA:170,0,3,0 +BRDA:174,0,0,0 +BRDA:174,0,1,0 +BRDA:174,0,2,0 +BRDA:174,0,3,0 +BRDA:179,0,0,0 +BRDA:179,0,1,0 +BRDA:179,0,2,0 +BRDA:179,0,3,0 +BRDA:184,0,0,0 +BRDA:184,0,1,0 +BRDA:184,0,2,0 +BRDA:184,0,3,0 +BRDA:203,0,0,1 +BRDA:203,0,1,0 +BRDA:207,0,0,0 +BRDA:207,0,1,0 +BRDA:227,0,0,0 +BRDA:227,0,1,0 +BRDA:228,0,0,0 +BRDA:228,0,1,0 +BRDA:231,0,0,1 +BRDA:231,0,1,0 +BRDA:234,0,0,1 +BRDA:234,0,1,0 +BRDA:236,0,0,1 +BRDA:236,0,1,0 +BRDA:248,0,0,0 +BRDA:248,0,1,0 +BRDA:253,0,0,0 +BRDA:253,0,1,0 +BRDA:256,0,0,0 +BRDA:256,0,1,0 +BRDA:262,0,0,0 +BRDA:262,0,1,0 +BRDA:267,0,0,0 +BRDA:267,0,1,0 +BRDA:268,0,0,0 +BRDA:268,0,1,0 +BRDA:273,0,0,0 +BRDA:273,0,1,0 +BRDA:280,0,0,0 +BRDA:280,0,1,0 +BRDA:290,0,0,0 +BRDA:290,0,1,0 +BRDA:292,0,0,0 +BRDA:292,0,1,0 +BRDA:300,0,0,0 +BRDA:300,0,1,0 +BRDA:305,0,0,0 +BRDA:305,0,1,0 +BRDA:307,0,0,0 +BRDA:307,0,1,0 +BRDA:315,0,0,0 +BRDA:315,0,1,0 +BRDA:320,0,0,0 +BRDA:320,0,1,0 +BRDA:322,0,0,0 +BRDA:322,0,1,0 +BRDA:337,0,0,0 +BRDA:337,0,1,0 +BRDA:347,0,0,0 +BRDA:347,0,1,0 +BRDA:350,0,0,0 +BRDA:350,0,1,0 +BRDA:362,0,0,1 +BRDA:362,0,1,0 +BRDA:365,0,0,0 +BRDA:365,0,1,0 +BRF:96 +BRH:12 +DA:51,1 +DA:52,1 +DA:63,1 +DA:68,1 +DA:72,0 +DA:73,0 +DA:78,1 +DA:81,1 +DA:84,1 +DA:85,1 +DA:88,1 +DA:91,1 +DA:94,1 +DA:97,1 +DA:100,1 +DA:106,1 +DA:107,1 +DA:108,1 +DA:116,1 +DA:137,1 +DA:138,1 +DA:147,1 +DA:148,1 +DA:150,1 +DA:151,4 +DA:152,1 +DA:154,2 +DA:155,1 +DA:158,0 +DA:159,0 +DA:162,0 +DA:163,0 +DA:166,0 +DA:167,0 +DA:170,0 +DA:171,0 +DA:174,0 +DA:175,0 +DA:176,0 +DA:179,0 +DA:180,0 +DA:181,0 +DA:184,0 +DA:185,0 +DA:186,0 +DA:191,0 +DA:192,0 +DA:193,0 +DA:199,1 +DA:200,1 +DA:201,1 +DA:203,1 +DA:207,0 +DA:208,0 +DA:210,0 +DA:212,0 +DA:213,0 +DA:214,0 +DA:215,0 +DA:216,0 +DA:217,0 +DA:218,0 +DA:219,0 +DA:220,0 +DA:221,0 +DA:225,0 +DA:226,0 +DA:227,0 +DA:228,0 +DA:231,1 +DA:232,1 +DA:234,1 +DA:235,0 +DA:236,1 +DA:237,1 +DA:239,0 +DA:248,0 +DA:249,0 +DA:250,0 +DA:253,0 +DA:254,0 +DA:255,0 +DA:256,0 +DA:257,0 +DA:258,0 +DA:261,0 +DA:262,0 +DA:263,0 +DA:264,0 +DA:266,0 +DA:267,0 +DA:268,0 +DA:269,0 +DA:270,0 +DA:273,0 +DA:274,0 +DA:275,0 +DA:279,0 +DA:280,0 +DA:281,0 +DA:282,0 +DA:290,0 +DA:291,0 +DA:292,0 +DA:293,0 +DA:294,0 +DA:298,0 +DA:300,0 +DA:301,0 +DA:302,0 +DA:305,0 +DA:306,0 +DA:307,0 +DA:308,0 +DA:309,0 +DA:313,0 +DA:315,0 +DA:316,0 +DA:317,0 +DA:320,0 +DA:321,0 +DA:322,0 +DA:323,0 +DA:324,0 +DA:328,0 +DA:330,0 +DA:337,0 +DA:338,0 +DA:340,0 +DA:347,0 +DA:348,0 +DA:350,0 +DA:351,0 +DA:352,0 +DA:362,1 +DA:364,0 +DA:365,0 +DA:366,0 +DA:367,0 +DA:368,0 +DA:369,0 +DA:370,0 +DA:371,0 +DA:372,0 +DA:373,0 +DA:374,0 +DA:375,0 +DA:376,0 +DA:381,0 +DA:388,0 +DA:390,0 +DA:392,0 +DA:393,0 +DA:395,0 +DA:397,0 +DA:398,0 +DA:400,0 +DA:401,0 +DA:403,0 +DA:404,0 +DA:406,0 +LH:36 +LF:161 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/NoCoverageGenerated.java +FNF:0 +FNH:0 +BRF:0 +BRH:0 +LH:0 +LF:0 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/Plugin.java +FN:35,com.mediatek.jacoco2lcov.Plugin.getPlugins(Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:45,com.mediatek.jacoco2lcov.Plugin.getPlugins(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:57,com.mediatek.jacoco2lcov.Plugin.isSourcePlugin(Ljava/io/File;)Z +FN:65,com.mediatek.jacoco2lcov.Plugin.findSourceDirs(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:79,com.mediatek.jacoco2lcov.Plugin.findClassDirs(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:103,com.mediatek.jacoco2lcov.Plugin.(Ljava/io/File;) +FN:115,com.mediatek.jacoco2lcov.Plugin.toString(Ljava/lang/String;) +FN:127,com.mediatek.jacoco2lcov.Plugin.addSourceDirectory(Ljava/lang/String;) +FN:137,com.mediatek.jacoco2lcov.Plugin.getSourceDirectories(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:146,com.mediatek.jacoco2lcov.Plugin.addClassDirectory(Ljava/lang/String;) +FN:156,com.mediatek.jacoco2lcov.Plugin.getClassDirectories(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.getPlugins(Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.Plugin.getPlugins(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.Plugin.isSourcePlugin(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.Plugin.findSourceDirs(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.Plugin.findClassDirs(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.Plugin.(Ljava/io/File;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.addSourceDirectory(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.getSourceDirectories(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.addClassDirectory(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.getClassDirectories(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNF:11 +FNH:0 +BRDA:35,0,0,0 +BRDA:35,0,1,0 +BRDA:45,0,0,0 +BRDA:45,0,1,0 +BRDA:46,0,0,0 +BRDA:46,0,1,0 +BRDA:57,0,0,0 +BRDA:57,0,1,0 +BRDA:127,0,0,0 +BRDA:127,0,1,0 +BRDA:128,0,0,0 +BRDA:128,0,1,0 +BRDA:146,0,0,0 +BRDA:146,0,1,0 +BRDA:147,0,0,0 +BRDA:147,0,1,0 +BRF:16 +BRH:0 +DA:35,0 +DA:36,0 +DA:45,0 +DA:46,0 +DA:47,0 +DA:48,0 +DA:50,0 +DA:51,0 +DA:52,0 +DA:57,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +DA:69,0 +DA:70,0 +DA:71,0 +DA:79,0 +DA:80,0 +DA:81,0 +DA:82,0 +DA:83,0 +DA:84,0 +DA:103,0 +DA:104,0 +DA:105,0 +DA:106,0 +DA:107,0 +DA:115,0 +DA:127,0 +DA:128,0 +DA:129,0 +DA:130,0 +DA:131,0 +DA:137,0 +DA:146,0 +DA:147,0 +DA:148,0 +DA:149,0 +DA:150,0 +DA:156,0 +LH:0 +LF:41 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/jacoco/JacocoData.java +FN:46,com.mediatek.jacoco2lcov.jacoco.JacocoData.(Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;Lorg/jacoco/core/analysis/CoverageBuilder;) +FN:57,com.mediatek.jacoco2lcov.jacoco.JacocoData.getSourceFileData(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:62,com.mediatek.jacoco2lcov.jacoco.JacocoData.getPackageData(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoData.(Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;Lorg/jacoco/core/analysis/CoverageBuilder;) +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoData.getSourceFileData(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoData.getPackageData(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNF:3 +FNH:0 +BRF:0 +BRH:0 +DA:46,0 +DA:47,0 +DA:48,0 +DA:49,0 +DA:50,0 +DA:57,0 +DA:62,0 +LH:0 +LF:7 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/jacoco/JacocoUtils.java +FN:126,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.loadJacocoData(Ljava/lang/String;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:135,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$loadJacocoData$1(Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:138,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$loadJacocoData$0(Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;Lorg/jacoco/core/analysis/CoverageBuilder;)Lcom/mediatek/jacoco2lcov/jacoco/JacocoData; +FN:146,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.loadCoverageData(Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:175,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.analyzeData(Lorg/jacoco/core/tools/ExecFileLoader;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:181,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$2(Lorg/jacoco/core/analysis/Analyzer;Ljava/io/File;)Ljava/lang/Throwable; +FN:195,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$4(Lcom/mediatek/jacoco2lcov/util/ListF;)Ljava/lang/String; +FN:196,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$3(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:206,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.getSourceLineData(Lorg/jacoco/core/analysis/ISourceFileCoverage;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:209,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLineData$6(Lorg/jacoco/core/analysis/ISourceFileCoverage;Ljava/lang/Integer;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:210,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLineData$5(Ljava/lang/Integer;Lorg/jacoco/core/analysis/ILine;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:255,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLocator$7(Lorg/jacoco/report/MultiSourceFileLocator;ILjava/io/File;) +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.loadJacocoData(Ljava/lang/String;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$loadJacocoData$1(Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$loadJacocoData$0(Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;Lorg/jacoco/core/analysis/CoverageBuilder;)Lcom/mediatek/jacoco2lcov/jacoco/JacocoData; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.loadCoverageData(Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.analyzeData(Lorg/jacoco/core/tools/ExecFileLoader;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$2(Lorg/jacoco/core/analysis/Analyzer;Ljava/io/File;)Ljava/lang/Throwable; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$4(Lcom/mediatek/jacoco2lcov/util/ListF;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$3(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.getSourceLineData(Lorg/jacoco/core/analysis/ISourceFileCoverage;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLineData$6(Lorg/jacoco/core/analysis/ISourceFileCoverage;Ljava/lang/Integer;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLineData$5(Ljava/lang/Integer;Lorg/jacoco/core/analysis/ILine;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLocator$7(Lorg/jacoco/report/MultiSourceFileLocator;ILjava/io/File;) +FNF:12 +FNH:0 +BRDA:126,0,0,0 +BRDA:126,0,1,0 +BRDA:152,0,0,0 +BRDA:152,0,1,0 +BRDA:152,0,2,0 +BRDA:152,0,3,0 +BRDA:156,0,0,0 +BRDA:156,0,1,0 +BRDA:166,0,0,0 +BRDA:166,0,1,0 +BRF:10 +BRH:0 +DA:126,0 +DA:127,0 +DA:128,0 +DA:132,0 +DA:135,0 +DA:138,0 +DA:146,0 +DA:147,0 +DA:149,0 +DA:150,0 +DA:151,0 +DA:152,0 +DA:153,0 +DA:156,0 +DA:157,0 +DA:161,0 +DA:165,0 +DA:166,0 +DA:175,0 +DA:177,0 +DA:178,0 +DA:180,0 +DA:181,0 +DA:183,0 +DA:184,0 +DA:185,0 +DA:186,0 +DA:187,0 +DA:191,0 +DA:193,0 +DA:194,0 +DA:195,0 +DA:196,0 +DA:206,0 +DA:207,0 +DA:208,0 +DA:209,0 +DA:210,0 +DA:211,0 +DA:212,0 +DA:255,0 +LH:0 +LF:41 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/ClassData.java +FN:33,com.mediatek.jacoco2lcov.lcov.ClassData.() +FN:61,com.mediatek.jacoco2lcov.lcov.ClassData.(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;Lorg/jacoco/core/analysis/IClassCoverage;) +FN:76,com.mediatek.jacoco2lcov.lcov.ClassData.sort() +FN:96,com.mediatek.jacoco2lcov.lcov.ClassData.getSourceFileData(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;) +FN:103,com.mediatek.jacoco2lcov.lcov.ClassData.getPackageName(Ljava/lang/String;) +FN:110,com.mediatek.jacoco2lcov.lcov.ClassData.getClassName(Ljava/lang/String;) +FN:117,com.mediatek.jacoco2lcov.lcov.ClassData.getFirstLine(I) +FN:124,com.mediatek.jacoco2lcov.lcov.ClassData.getLastLine(I) +FN:131,com.mediatek.jacoco2lcov.lcov.ClassData.getMethods(Ljava/util/List;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;Lorg/jacoco/core/analysis/IClassCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.sort() +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getSourceFileData(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getPackageName(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getClassName(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getFirstLine(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getLastLine(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getMethods(Ljava/util/List;) +FN:34,com.mediatek.jacoco2lcov.lcov.ClassData$1.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData$1.() +FNF:10 +FNH:0 +BRDA:65,0,0,0 +BRDA:65,0,1,0 +BRDA:67,0,0,0 +BRDA:67,0,1,0 +BRF:4 +BRH:0 +DA:33,0 +DA:34,0 +DA:61,0 +DA:62,0 +DA:63,0 +DA:64,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +DA:69,0 +DA:76,0 +DA:77,0 +DA:96,0 +DA:103,0 +DA:110,0 +DA:117,0 +DA:124,0 +DA:131,0 +LH:0 +LF:19 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/LCOVUtils.java +FN:72,com.mediatek.jacoco2lcov.lcov.LCOVUtils.() +FN:77,com.mediatek.jacoco2lcov.lcov.LCOVUtils.dispose() +FN:93,com.mediatek.jacoco2lcov.lcov.LCOVUtils.generateLCOVReport(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:104,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$0(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:108,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$1(Ljava/lang/String;) +FN:112,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$2(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:116,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$3(Ljava/lang/String;) +FN:129,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$4(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/jacoco/JacocoData;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:133,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$5(Ljava/lang/String;Ljava/lang/String;Ljava/util/List;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:144,com.mediatek.jacoco2lcov.lcov.LCOVUtils.getSourcePathname(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/lang/String;)Ljava/lang/String; +FN:145,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$getSourcePathname$6(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:147,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$getSourcePathname$7(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:163,com.mediatek.jacoco2lcov.lcov.LCOVUtils.constructLCOVData(Lcom/mediatek/jacoco2lcov/jacoco/JacocoData;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:172,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$8(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/util/Map;Lorg/jacoco/core/analysis/ISourceFileCoverage;) +FN:193,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$9(Ljava/util/Map;Lorg/jacoco/core/analysis/IPackageCoverage;) +FN:227,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$10(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;) +FN:239,com.mediatek.jacoco2lcov.lcov.LCOVUtils.writeLCOVData(Ljava/lang/String;Ljava/util/List;Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:262,com.mediatek.jacoco2lcov.lcov.LCOVUtils.generateLCOVData(Ljava/util/List;Ljava/lang/String;Ljava/io/PrintWriter;Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.dispose() +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.generateLCOVReport(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$0(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$1(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$2(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$3(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$4(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/jacoco/JacocoData;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$5(Ljava/lang/String;Ljava/lang/String;Ljava/util/List;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.getSourcePathname(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$getSourcePathname$6(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$getSourcePathname$7(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.constructLCOVData(Lcom/mediatek/jacoco2lcov/jacoco/JacocoData;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$8(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/util/Map;Lorg/jacoco/core/analysis/ISourceFileCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$9(Ljava/util/Map;Lorg/jacoco/core/analysis/IPackageCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$10(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.writeLCOVData(Ljava/lang/String;Ljava/util/List;Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.generateLCOVData(Ljava/util/List;Ljava/lang/String;Ljava/io/PrintWriter;Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FNF:18 +FNH:0 +BRDA:95,0,0,0 +BRDA:95,0,1,0 +BRDA:100,0,0,0 +BRDA:100,0,1,0 +BRDA:105,0,0,0 +BRDA:105,0,1,0 +BRDA:113,0,0,0 +BRDA:113,0,1,0 +BRDA:120,0,0,0 +BRDA:120,0,1,0 +BRDA:181,0,0,0 +BRDA:181,0,1,0 +BRDA:182,0,0,0 +BRDA:182,0,1,0 +BRDA:194,0,0,0 +BRDA:194,0,1,0 +BRDA:200,0,0,0 +BRDA:200,0,1,0 +BRDA:202,0,0,0 +BRDA:202,0,1,0 +BRDA:203,0,0,0 +BRDA:203,0,1,0 +BRDA:221,0,0,0 +BRDA:221,0,1,0 +BRDA:224,0,0,0 +BRDA:224,0,1,0 +BRDA:248,0,0,0 +BRDA:248,0,1,0 +BRDA:262,0,0,0 +BRDA:262,0,1,0 +BRDA:269,0,0,0 +BRDA:269,0,1,0 +BRDA:282,0,0,0 +BRDA:282,0,1,0 +BRDA:284,0,0,0 +BRDA:284,0,1,0 +BRDA:286,0,0,0 +BRDA:286,0,1,0 +BRDA:293,0,0,0 +BRDA:293,0,1,0 +BRDA:297,0,0,0 +BRDA:297,0,1,0 +BRDA:299,0,0,0 +BRDA:299,0,1,0 +BRDA:321,0,0,0 +BRDA:321,0,1,0 +BRDA:323,0,0,0 +BRDA:323,0,1,0 +BRDA:328,0,0,0 +BRDA:328,0,1,0 +BRDA:329,0,0,0 +BRDA:329,0,1,0 +BRDA:331,0,0,0 +BRDA:331,0,1,0 +BRDA:345,0,0,0 +BRDA:345,0,1,0 +BRDA:347,0,0,0 +BRDA:347,0,1,0 +BRDA:352,0,0,0 +BRDA:352,0,1,0 +BRF:60 +BRH:0 +DA:72,0 +DA:73,0 +DA:77,0 +DA:93,0 +DA:94,0 +DA:95,0 +DA:96,0 +DA:98,0 +DA:99,0 +DA:100,0 +DA:101,0 +DA:103,0 +DA:104,0 +DA:105,0 +DA:106,0 +DA:107,0 +DA:108,0 +DA:111,0 +DA:112,0 +DA:113,0 +DA:114,0 +DA:115,0 +DA:116,0 +DA:120,0 +DA:121,0 +DA:124,0 +DA:128,0 +DA:129,0 +DA:132,0 +DA:133,0 +DA:144,0 +DA:145,0 +DA:146,0 +DA:147,0 +DA:148,0 +DA:163,0 +DA:164,0 +DA:167,0 +DA:168,0 +DA:169,0 +DA:172,0 +DA:173,0 +DA:174,0 +DA:176,0 +DA:179,0 +DA:181,0 +DA:182,0 +DA:183,0 +DA:187,0 +DA:191,0 +DA:192,0 +DA:193,0 +DA:194,0 +DA:195,0 +DA:196,0 +DA:197,0 +DA:200,0 +DA:201,0 +DA:202,0 +DA:203,0 +DA:204,0 +DA:206,0 +DA:207,0 +DA:211,0 +DA:212,0 +DA:215,0 +DA:216,0 +DA:220,0 +DA:221,0 +DA:222,0 +DA:224,0 +DA:225,0 +DA:226,0 +DA:227,0 +DA:230,0 +DA:239,0 +DA:240,0 +DA:242,0 +DA:244,0 +DA:245,0 +DA:248,0 +DA:251,0 +DA:262,0 +DA:264,0 +DA:269,0 +DA:272,0 +DA:275,0 +DA:277,0 +DA:278,0 +DA:282,0 +DA:284,0 +DA:286,0 +DA:288,0 +DA:289,0 +DA:291,0 +DA:292,0 +DA:293,0 +DA:294,0 +DA:295,0 +DA:297,0 +DA:299,0 +DA:301,0 +DA:302,0 +DA:303,0 +DA:304,0 +DA:305,0 +DA:306,0 +DA:310,0 +DA:311,0 +DA:318,0 +DA:319,0 +DA:321,0 +DA:323,0 +DA:325,0 +DA:326,0 +DA:328,0 +DA:329,0 +DA:331,0 +DA:335,0 +DA:336,0 +DA:337,0 +DA:339,0 +DA:340,0 +DA:343,0 +DA:344,0 +DA:345,0 +DA:347,0 +DA:349,0 +DA:351,0 +DA:352,0 +DA:354,0 +DA:355,0 +DA:357,0 +DA:361,0 +DA:362,0 +DA:365,0 +DA:366,0 +DA:368,0 +LH:0 +LF:138 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/LineData.java +FN:51,com.mediatek.jacoco2lcov.lcov.LineData.(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;ILorg/jacoco/core/analysis/ILine;) +FN:75,com.mediatek.jacoco2lcov.lcov.LineData.getLineNumber(I) +FN:80,com.mediatek.jacoco2lcov.lcov.LineData.hasCode(Z) +FN:85,com.mediatek.jacoco2lcov.lcov.LineData.isCovered(Z) +FN:90,com.mediatek.jacoco2lcov.lcov.LineData.isPartlyCovered(Z) +FN:95,com.mediatek.jacoco2lcov.lcov.LineData.isFullyCovered(Z) +FN:100,com.mediatek.jacoco2lcov.lcov.LineData.getBranchCount(I) +FN:105,com.mediatek.jacoco2lcov.lcov.LineData.getBranchesHit(I) +FN:116,com.mediatek.jacoco2lcov.lcov.LineData.getExecutionCount(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;ILorg/jacoco/core/analysis/ILine;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.getLineNumber(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.hasCode(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.isCovered(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.isPartlyCovered(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.isFullyCovered(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.getBranchCount(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.getBranchesHit(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.getExecutionCount(I) +FNF:9 +FNH:0 +BRDA:80,0,0,0 +BRDA:80,0,1,0 +BRDA:85,0,0,0 +BRDA:85,0,1,0 +BRDA:85,0,2,0 +BRDA:85,0,3,0 +BRDA:90,0,0,0 +BRDA:90,0,1,0 +BRDA:95,0,0,0 +BRDA:95,0,1,0 +BRDA:119,0,0,0 +BRDA:119,0,1,0 +BRDA:119,0,2,0 +BRDA:122,0,0,0 +BRDA:122,0,1,0 +BRDA:127,0,0,0 +BRDA:127,0,1,0 +BRF:17 +BRH:0 +DA:51,0 +DA:52,0 +DA:53,0 +DA:54,0 +DA:55,0 +DA:75,0 +DA:80,0 +DA:85,0 +DA:90,0 +DA:95,0 +DA:100,0 +DA:105,0 +DA:116,0 +DA:117,0 +DA:118,0 +DA:119,0 +DA:122,0 +DA:123,0 +DA:124,0 +DA:127,0 +DA:128,0 +DA:131,0 +LH:0 +LF:22 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/MethodData.java +FN:29,com.mediatek.jacoco2lcov.lcov.MethodData.() +FN:60,com.mediatek.jacoco2lcov.lcov.MethodData.(Lcom/mediatek/jacoco2lcov/lcov/ClassData;Lorg/jacoco/core/analysis/IMethodCoverage;) +FN:75,com.mediatek.jacoco2lcov.lcov.MethodData.dispose() +FN:94,com.mediatek.jacoco2lcov.lcov.MethodData.getClassData(Lcom/mediatek/jacoco2lcov/lcov/ClassData;) +FN:101,com.mediatek.jacoco2lcov.lcov.MethodData.getName(Ljava/lang/String;) +FN:106,com.mediatek.jacoco2lcov.lcov.MethodData.hasDebugInfo(Z) +FN:111,com.mediatek.jacoco2lcov.lcov.MethodData.getFirstLine(I) +FN:116,com.mediatek.jacoco2lcov.lcov.MethodData.getLastLine(I) +FN:125,com.mediatek.jacoco2lcov.lcov.MethodData.getExecutionCount(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.(Lcom/mediatek/jacoco2lcov/lcov/ClassData;Lorg/jacoco/core/analysis/IMethodCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.dispose() +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getClassData(Lcom/mediatek/jacoco2lcov/lcov/ClassData;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getName(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.hasDebugInfo(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getFirstLine(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getLastLine(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getExecutionCount(I) +FN:30,com.mediatek.jacoco2lcov.lcov.MethodData$1.() +FN:33,com.mediatek.jacoco2lcov.lcov.MethodData$1.compare(Lcom/mediatek/jacoco2lcov/lcov/MethodData;Lcom/mediatek/jacoco2lcov/lcov/MethodData;)I +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData$1.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData$1.compare(Lcom/mediatek/jacoco2lcov/lcov/MethodData;Lcom/mediatek/jacoco2lcov/lcov/MethodData;)I +FNF:11 +FNH:0 +BRDA:36,0,0,0 +BRDA:36,0,1,0 +BRDA:37,0,0,0 +BRDA:37,0,1,0 +BRDA:65,0,0,0 +BRDA:65,0,1,0 +BRDA:66,0,0,0 +BRDA:66,0,1,0 +BRDA:67,0,0,0 +BRDA:67,0,1,0 +BRDA:68,0,0,0 +BRDA:68,0,1,0 +BRDA:106,0,0,0 +BRDA:106,0,1,0 +BRF:14 +BRH:0 +DA:29,0 +DA:30,0 +DA:33,0 +DA:34,0 +DA:36,0 +DA:37,0 +DA:38,0 +DA:60,0 +DA:61,0 +DA:62,0 +DA:64,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +DA:70,0 +DA:71,0 +DA:75,0 +DA:94,0 +DA:101,0 +DA:106,0 +DA:111,0 +DA:116,0 +DA:125,0 +LH:0 +LF:24 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/SourceFileData.java +FN:33,com.mediatek.jacoco2lcov.lcov.SourceFileData.() +FN:59,com.mediatek.jacoco2lcov.lcov.SourceFileData.(Ljava/lang/String;Lorg/jacoco/core/analysis/ISourceFileCoverage;) +FN:66,com.mediatek.jacoco2lcov.lcov.SourceFileData.lambda$new$0(Lcom/mediatek/jacoco2lcov/util/Pair;)Lcom/mediatek/jacoco2lcov/lcov/LineData; +FN:75,com.mediatek.jacoco2lcov.lcov.SourceFileData.addClass(Lorg/jacoco/core/analysis/IClassCoverage;) +FN:83,com.mediatek.jacoco2lcov.lcov.SourceFileData.sort() +FN:104,com.mediatek.jacoco2lcov.lcov.SourceFileData.getPathname(Ljava/lang/String;) +FN:109,com.mediatek.jacoco2lcov.lcov.SourceFileData.getPackageName(Ljava/lang/String;) +FN:114,com.mediatek.jacoco2lcov.lcov.SourceFileData.getFilePathname(Ljava/lang/String;) +FN:119,com.mediatek.jacoco2lcov.lcov.SourceFileData.getClasses(Ljava/util/List;) +FN:124,com.mediatek.jacoco2lcov.lcov.SourceFileData.getLines(Ljava/util/List;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.(Ljava/lang/String;Lorg/jacoco/core/analysis/ISourceFileCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.lambda$new$0(Lcom/mediatek/jacoco2lcov/util/Pair;)Lcom/mediatek/jacoco2lcov/lcov/LineData; +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.addClass(Lorg/jacoco/core/analysis/IClassCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.sort() +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getPathname(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getPackageName(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getFilePathname(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getClasses(Ljava/util/List;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getLines(Ljava/util/List;) +FN:34,com.mediatek.jacoco2lcov.lcov.SourceFileData$1.() +FN:37,com.mediatek.jacoco2lcov.lcov.SourceFileData$1.compare(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;)I +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData$1.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData$1.compare(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;)I +FNF:12 +FNH:0 +BRDA:84,0,0,0 +BRDA:84,0,1,0 +BRF:2 +BRH:0 +DA:33,0 +DA:34,0 +DA:37,0 +DA:59,0 +DA:60,0 +DA:61,0 +DA:63,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +DA:75,0 +DA:76,0 +DA:83,0 +DA:84,0 +DA:85,0 +DA:86,0 +DA:104,0 +DA:109,0 +DA:114,0 +DA:119,0 +DA:124,0 +LH:0 +LF:22 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/Either.java +FN:30,com.mediatek.jacoco2lcov.util.Either.left(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:35,com.mediatek.jacoco2lcov.util.Either.right(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:43,com.mediatek.jacoco2lcov.util.Either.from(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:51,com.mediatek.jacoco2lcov.util.Either.from(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:59,com.mediatek.jacoco2lcov.util.Either.from(Ljava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:70,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:79,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:88,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/lang/Object;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:97,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/util/function/Supplier;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:106,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:116,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:125,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/lang/Object;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:135,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/util/function/Supplier;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:142,com.mediatek.jacoco2lcov.util.Either.(ZLjava/lang/Object;Ljava/lang/Object;) +FN:166,com.mediatek.jacoco2lcov.util.Either.toString(Ljava/lang/String;) +FN:180,com.mediatek.jacoco2lcov.util.Either.debug(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:185,com.mediatek.jacoco2lcov.util.Either.isLeft(Z) +FN:190,com.mediatek.jacoco2lcov.util.Either.getLeft(Ljava/lang/Object;) +FN:198,com.mediatek.jacoco2lcov.util.Either.isRight(Z) +FN:203,com.mediatek.jacoco2lcov.util.Either.get(Ljava/lang/Object;) +FN:215,com.mediatek.jacoco2lcov.util.Either.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FN:220,com.mediatek.jacoco2lcov.util.Either.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FN:229,com.mediatek.jacoco2lcov.util.Either.leftCast(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:239,com.mediatek.jacoco2lcov.util.Either.rightCast(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:250,com.mediatek.jacoco2lcov.util.Either.removeNulls(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:250,com.mediatek.jacoco2lcov.util.Either.lambda$removeNulls$0(Ljava/lang/Object;)Z +FN:258,com.mediatek.jacoco2lcov.util.Either.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:266,com.mediatek.jacoco2lcov.util.Either.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:275,com.mediatek.jacoco2lcov.util.Either.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:284,com.mediatek.jacoco2lcov.util.Either.ifLeftDo(Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:294,com.mediatek.jacoco2lcov.util.Either.andEitherDo(Ljava/util/function/Consumer;Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.left(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.right(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.from(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.from(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.from(Ljava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/lang/Object;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/util/function/Supplier;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/lang/Object;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/util/function/Supplier;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.(ZLjava/lang/Object;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.debug(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.isLeft(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.getLeft(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.isRight(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.get(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.leftCast(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.rightCast(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.removeNulls(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.lambda$removeNulls$0(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Either.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.ifLeftDo(Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.andEitherDo(Ljava/util/function/Consumer;Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/Either; +FNF:31 +FNH:0 +BRDA:51,0,0,0 +BRDA:51,0,1,0 +BRDA:59,0,0,0 +BRDA:59,0,1,0 +BRDA:60,0,0,0 +BRDA:60,0,1,0 +BRDA:70,0,0,0 +BRDA:70,0,1,0 +BRDA:79,0,0,0 +BRDA:79,0,1,0 +BRDA:88,0,0,0 +BRDA:88,0,1,0 +BRDA:97,0,0,0 +BRDA:97,0,1,0 +BRDA:106,0,0,0 +BRDA:106,0,1,0 +BRDA:116,0,0,0 +BRDA:116,0,1,0 +BRDA:125,0,0,0 +BRDA:125,0,1,0 +BRDA:135,0,0,0 +BRDA:135,0,1,0 +BRDA:166,0,0,0 +BRDA:166,0,1,0 +BRDA:185,0,0,0 +BRDA:185,0,1,0 +BRDA:190,0,0,0 +BRDA:190,0,1,0 +BRDA:203,0,0,0 +BRDA:203,0,1,0 +BRDA:215,0,0,0 +BRDA:215,0,1,0 +BRDA:220,0,0,0 +BRDA:220,0,1,0 +BRDA:229,0,0,0 +BRDA:229,0,1,0 +BRDA:239,0,0,0 +BRDA:239,0,1,0 +BRDA:250,0,0,0 +BRDA:250,0,1,0 +BRDA:258,0,0,0 +BRDA:258,0,1,0 +BRDA:258,0,2,0 +BRDA:258,0,3,0 +BRDA:266,0,0,0 +BRDA:266,0,1,0 +BRDA:275,0,0,0 +BRDA:275,0,1,0 +BRDA:284,0,0,0 +BRDA:284,0,1,0 +BRDA:294,0,0,0 +BRDA:294,0,1,0 +BRF:52 +BRH:0 +DA:30,0 +DA:35,0 +DA:43,0 +DA:51,0 +DA:59,0 +DA:60,0 +DA:61,0 +DA:70,0 +DA:79,0 +DA:88,0 +DA:97,0 +DA:106,0 +DA:116,0 +DA:125,0 +DA:135,0 +DA:142,0 +DA:154,0 +DA:155,0 +DA:156,0 +DA:157,0 +DA:158,0 +DA:166,0 +DA:167,0 +DA:169,0 +DA:180,0 +DA:185,0 +DA:190,0 +DA:191,0 +DA:193,0 +DA:198,0 +DA:203,0 +DA:204,0 +DA:206,0 +DA:215,0 +DA:220,0 +DA:229,0 +DA:239,0 +DA:250,0 +DA:258,0 +DA:266,0 +DA:275,0 +DA:284,0 +DA:285,0 +DA:294,0 +DA:295,0 +DA:297,0 +DA:298,0 +LH:0 +LF:47 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/ListF.java +FN:37,com.mediatek.jacoco2lcov.util.ListF.() +FN:50,com.mediatek.jacoco2lcov.util.ListF.nil(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:58,com.mediatek.jacoco2lcov.util.ListF.nil(Ljava/lang/Class;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:66,com.mediatek.jacoco2lcov.util.ListF.cons(Ljava/lang/Object;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:75,com.mediatek.jacoco2lcov.util.ListF.cons(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:81,com.mediatek.jacoco2lcov.util.ListF.listOf(Ljava/lang/Class;[Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:87,com.mediatek.jacoco2lcov.util.ListF.list([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:98,com.mediatek.jacoco2lcov.util.ListF.of([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:103,com.mediatek.jacoco2lcov.util.ListF.from(Ljava/util/List;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:112,com.mediatek.jacoco2lcov.util.ListF.from(Ljava/util/Collection;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:118,com.mediatek.jacoco2lcov.util.ListF.from([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:130,com.mediatek.jacoco2lcov.util.ListF.range(II)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:154,com.mediatek.jacoco2lcov.util.ListF.() +FN:158,com.mediatek.jacoco2lcov.util.ListF.(Ljava/lang/Object;Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:165,com.mediatek.jacoco2lcov.util.ListF.dispose() +FN:176,com.mediatek.jacoco2lcov.util.ListF.toString(Ljava/lang/String;) +FN:178,com.mediatek.jacoco2lcov.util.ListF.lambda$toString$0(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:184,com.mediatek.jacoco2lcov.util.ListF.equals(Ljava/lang/Object;)Z +FN:190,com.mediatek.jacoco2lcov.util.ListF.equals(Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/util/ListF;)Z +FN:210,com.mediatek.jacoco2lcov.util.ListF.debug(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:218,com.mediatek.jacoco2lcov.util.ListF.isNull(Z) +FN:223,com.mediatek.jacoco2lcov.util.ListF.isEmpty(Z) +FN:228,com.mediatek.jacoco2lcov.util.ListF.isNotEmpty(Z) +FN:236,com.mediatek.jacoco2lcov.util.ListF.get(Ljava/lang/Object;) +FN:244,com.mediatek.jacoco2lcov.util.ListF.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FN:252,com.mediatek.jacoco2lcov.util.ListF.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FN:260,com.mediatek.jacoco2lcov.util.ListF.getOrElseNull(Ljava/lang/Object;) +FN:268,com.mediatek.jacoco2lcov.util.ListF.head(Ljava/lang/Object;) +FN:276,com.mediatek.jacoco2lcov.util.ListF.tail(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:284,com.mediatek.jacoco2lcov.util.ListF.car(Ljava/lang/Object;) +FN:289,com.mediatek.jacoco2lcov.util.ListF.cdr(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:296,com.mediatek.jacoco2lcov.util.ListF.toList(Ljava/util/List;) +FN:297,com.mediatek.jacoco2lcov.util.ListF.lambda$toList$1(Ljava/util/ArrayList;Ljava/lang/Object;) +FN:303,com.mediatek.jacoco2lcov.util.ListF.toArrayOf(Ljava/lang/Class;)[Ljava/lang/Object; +#COM: lambda on same line as function +FN:319,com.mediatek.jacoco2lcov.util.ListF.removeNulls(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:319,com.mediatek.jacoco2lcov.util.ListF.lambda$removeNulls$2(Ljava/lang/Object;)Z +FN:326,com.mediatek.jacoco2lcov.util.ListF.length(I) +FN:340,com.mediatek.jacoco2lcov.util.ListF.forEach(Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:352,com.mediatek.jacoco2lcov.util.ListF.reverse(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:366,com.mediatek.jacoco2lcov.util.ListF.concat(Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:380,com.mediatek.jacoco2lcov.util.ListF.map(Ljava/util/function/Function;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:387,com.mediatek.jacoco2lcov.util.ListF.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:400,com.mediatek.jacoco2lcov.util.ListF.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:415,com.mediatek.jacoco2lcov.util.ListF.foldLeft(Ljava/util/function/BiFunction;Ljava/lang/Object;)Ljava/lang/Object; +FN:429,com.mediatek.jacoco2lcov.util.ListF.foldLeft1(Ljava/util/function/BiFunction;)Ljava/lang/Object; +FN:447,com.mediatek.jacoco2lcov.util.ListF.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:456,com.mediatek.jacoco2lcov.util.ListF.flatten(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:458,com.mediatek.jacoco2lcov.util.ListF.lambda$flatten$3(Ljava/util/List;Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.() +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.nil(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.nil(Ljava/lang/Class;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.cons(Ljava/lang/Object;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.cons(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.listOf(Ljava/lang/Class;[Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.list([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.of([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.from(Ljava/util/List;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.from(Ljava/util/Collection;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.from([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.range(II)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.() +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.(Ljava/lang/Object;Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.dispose() +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.lambda$toString$0(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.equals(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.equals(Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/util/ListF;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.debug(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.isNull(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.isEmpty(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.isNotEmpty(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.get(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.getOrElseNull(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.head(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.tail(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.car(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.cdr(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.toList(Ljava/util/List;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.lambda$toList$1(Ljava/util/ArrayList;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.toArrayOf(Ljava/lang/Class;)[Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.removeNulls(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.lambda$removeNulls$2(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.length(I) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.forEach(Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.reverse(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.concat(Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.map(Ljava/util/function/Function;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.foldLeft(Ljava/util/function/BiFunction;Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.foldLeft1(Ljava/util/function/BiFunction;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.flatten(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.lambda$flatten$3(Ljava/util/List;Lcom/mediatek/jacoco2lcov/util/ListF;) +FNF:48 +FNH:5 +BRDA:88,0,0,0 +BRDA:88,0,1,0 +BRDA:89,0,0,0 +BRDA:89,0,1,0 +BRDA:105,0,0,0 +BRDA:105,0,1,0 +BRDA:112,0,0,0 +BRDA:112,0,1,0 +BRDA:119,0,0,0 +BRDA:119,0,1,0 +BRDA:120,0,0,0 +BRDA:120,0,1,0 +BRDA:131,0,0,0 +BRDA:131,0,1,0 +BRDA:132,0,0,0 +BRDA:132,0,1,0 +BRDA:135,0,0,0 +BRDA:135,0,1,0 +BRDA:165,0,0,0 +BRDA:165,0,1,0 +BRDA:184,0,0,0 +BRDA:184,0,1,0 +BRDA:190,0,0,0 +BRDA:190,0,1,0 +BRDA:190,0,2,0 +BRDA:190,0,3,0 +BRDA:191,0,0,0 +BRDA:191,0,1,0 +BRDA:191,0,2,0 +BRDA:191,0,3,0 +BRDA:194,0,0,0 +BRDA:194,0,1,0 +BRDA:194,0,2,0 +BRDA:194,0,3,0 +BRDA:195,0,0,0 +BRDA:195,0,1,0 +BRDA:195,0,2,0 +BRDA:195,0,3,0 +BRDA:198,0,0,0 +BRDA:198,0,1,0 +BRDA:218,0,0,1 +BRDA:218,0,1,0 +BRDA:228,0,0,0 +BRDA:228,0,1,0 +BRDA:244,0,0,0 +BRDA:244,0,1,0 +BRDA:252,0,0,0 +BRDA:252,0,1,0 +BRDA:268,0,0,0 +BRDA:268,0,1,0 +BRDA:276,0,0,0 +BRDA:276,0,1,0 +BRDA:306,0,0,0 +BRDA:306,0,1,0 +BRDA:319,0,0,0 +BRDA:319,0,1,0 +BRDA:328,0,0,0 +BRDA:328,0,1,0 +BRDA:341,0,0,0 +BRDA:341,0,1,0 +BRDA:354,0,0,1 +BRDA:354,0,1,0 +BRDA:368,0,0,0 +BRDA:368,0,1,0 +BRDA:389,0,0,0 +BRDA:389,0,1,0 +BRDA:402,0,0,0 +BRDA:402,0,1,0 +BRDA:403,0,0,0 +BRDA:403,0,1,0 +BRDA:417,0,0,0 +BRDA:417,0,1,0 +BRDA:429,0,0,0 +BRDA:429,0,1,0 +BRDA:434,0,0,0 +BRDA:434,0,1,0 +BRF:76 +BRH:2 +DA:37,1 +DA:50,1 +DA:58,0 +DA:66,0 +DA:67,0 +DA:75,0 +DA:81,0 +DA:87,0 +DA:88,0 +DA:89,0 +DA:90,0 +DA:92,0 +DA:98,0 +DA:103,0 +DA:104,0 +DA:105,0 +DA:106,0 +DA:107,0 +DA:112,0 +DA:118,0 +DA:119,0 +DA:120,0 +DA:121,0 +DA:122,0 +DA:130,0 +DA:131,0 +DA:132,0 +DA:133,0 +DA:135,0 +DA:136,0 +DA:137,0 +DA:154,1 +DA:155,1 +DA:158,0 +DA:159,0 +DA:160,0 +DA:161,0 +DA:165,0 +DA:166,0 +DA:167,0 +DA:168,0 +DA:176,0 +DA:177,0 +DA:178,0 +DA:184,0 +DA:185,0 +DA:190,0 +DA:191,0 +DA:192,0 +DA:193,0 +DA:194,0 +DA:195,0 +DA:196,0 +DA:197,0 +DA:198,0 +DA:199,0 +DA:210,0 +DA:218,1 +DA:223,0 +DA:228,0 +DA:236,0 +DA:244,0 +DA:252,0 +DA:260,0 +DA:268,0 +DA:269,0 +DA:271,0 +DA:276,0 +DA:277,0 +DA:279,0 +DA:284,0 +DA:289,0 +DA:296,0 +DA:297,0 +DA:298,0 +DA:303,0 +DA:304,0 +DA:305,0 +DA:306,0 +DA:307,0 +DA:309,0 +DA:319,0 +DA:326,0 +DA:327,0 +DA:328,0 +DA:329,0 +DA:330,0 +DA:332,0 +DA:340,0 +DA:341,0 +DA:342,0 +DA:343,0 +DA:345,0 +DA:352,1 +DA:353,1 +DA:354,1 +DA:355,0 +DA:356,0 +DA:358,1 +DA:366,0 +DA:367,0 +DA:368,0 +DA:369,0 +DA:370,0 +DA:372,0 +DA:380,0 +DA:387,0 +DA:388,0 +DA:389,0 +DA:390,0 +DA:391,0 +DA:393,0 +DA:400,0 +DA:401,0 +DA:402,0 +DA:403,0 +DA:404,0 +DA:405,0 +DA:407,0 +DA:415,0 +DA:416,0 +DA:417,0 +DA:418,0 +DA:419,0 +DA:421,0 +DA:429,0 +DA:430,0 +DA:432,0 +DA:433,0 +DA:434,0 +DA:435,0 +DA:436,0 +DA:438,0 +DA:447,0 +DA:456,0 +DA:457,0 +DA:458,0 +DA:459,0 +LH:9 +LF:138 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/Maybe.java +FN:29,com.mediatek.jacoco2lcov.util.Maybe.() +FN:41,com.mediatek.jacoco2lcov.util.Maybe.nothing(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FN:49,com.mediatek.jacoco2lcov.util.Maybe.nothing(Ljava/lang/Class;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:57,com.mediatek.jacoco2lcov.util.Maybe.just(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:62,com.mediatek.jacoco2lcov.util.Maybe.from(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:67,com.mediatek.jacoco2lcov.util.Maybe.from(Ljava/lang/Class;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:86,com.mediatek.jacoco2lcov.util.Maybe.() +FN:90,com.mediatek.jacoco2lcov.util.Maybe.(Ljava/lang/Object;) +FN:97,com.mediatek.jacoco2lcov.util.Maybe.dispose() +FN:107,com.mediatek.jacoco2lcov.util.Maybe.toString(Ljava/lang/String;) +FN:113,com.mediatek.jacoco2lcov.util.Maybe.equals(Ljava/lang/Object;)Z +FN:127,com.mediatek.jacoco2lcov.util.Maybe.hashCode(I) +FN:142,com.mediatek.jacoco2lcov.util.Maybe.debug(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FN:149,com.mediatek.jacoco2lcov.util.Maybe.isNothing(Z) +FN:156,com.mediatek.jacoco2lcov.util.Maybe.exists(Z) +FN:164,com.mediatek.jacoco2lcov.util.Maybe.get(Ljava/lang/Object;) +FN:176,com.mediatek.jacoco2lcov.util.Maybe.getOrElseNull(Ljava/lang/Object;) +FN:186,com.mediatek.jacoco2lcov.util.Maybe.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FN:194,com.mediatek.jacoco2lcov.util.Maybe.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FN:205,com.mediatek.jacoco2lcov.util.Maybe.removeNulls(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FN:205,com.mediatek.jacoco2lcov.util.Maybe.lambda$removeNulls$0(Ljava/lang/Object;)Z +FN:213,com.mediatek.jacoco2lcov.util.Maybe.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:223,com.mediatek.jacoco2lcov.util.Maybe.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:231,com.mediatek.jacoco2lcov.util.Maybe.mapN(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:239,com.mediatek.jacoco2lcov.util.Maybe.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.() +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.nothing(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.nothing(Ljava/lang/Class;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.just(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.from(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.from(Ljava/lang/Class;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.() +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.dispose() +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.equals(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.hashCode(I) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.debug(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.isNothing(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.exists(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.get(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.getOrElseNull(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.removeNulls(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.lambda$removeNulls$0(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.mapN(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNF:25 +FNH:0 +BRDA:62,0,0,0 +BRDA:62,0,1,0 +BRDA:67,0,0,0 +BRDA:67,0,1,0 +BRDA:97,0,0,0 +BRDA:97,0,1,0 +BRDA:107,0,0,0 +BRDA:107,0,1,0 +BRDA:113,0,0,0 +BRDA:113,0,1,0 +BRDA:115,0,0,0 +BRDA:115,0,1,0 +BRDA:117,0,0,0 +BRDA:117,0,1,0 +BRDA:127,0,0,0 +BRDA:127,0,1,0 +BRDA:130,0,0,0 +BRDA:130,0,1,0 +BRDA:149,0,0,0 +BRDA:149,0,1,0 +BRDA:156,0,0,0 +BRDA:156,0,1,0 +BRDA:164,0,0,0 +BRDA:164,0,1,0 +BRDA:186,0,0,0 +BRDA:186,0,1,0 +BRDA:194,0,0,0 +BRDA:194,0,1,0 +BRDA:205,0,0,0 +BRDA:205,0,1,0 +BRDA:213,0,0,0 +BRDA:213,0,1,0 +BRDA:213,0,2,0 +BRDA:213,0,3,0 +BRDA:223,0,0,0 +BRDA:223,0,1,0 +BRDA:231,0,0,0 +BRDA:231,0,1,0 +BRDA:239,0,0,0 +BRDA:239,0,1,0 +BRF:40 +BRH:0 +DA:29,0 +DA:41,0 +DA:49,0 +DA:57,0 +DA:62,0 +DA:67,0 +DA:86,0 +DA:87,0 +DA:90,0 +DA:91,0 +DA:92,0 +DA:97,0 +DA:98,0 +DA:99,0 +DA:107,0 +DA:113,0 +DA:114,0 +DA:115,0 +DA:116,0 +DA:117,0 +DA:118,0 +DA:120,0 +DA:127,0 +DA:128,0 +DA:130,0 +DA:142,0 +DA:149,0 +DA:156,0 +DA:164,0 +DA:165,0 +DA:167,0 +DA:176,0 +DA:186,0 +DA:194,0 +DA:205,0 +DA:213,0 +DA:223,0 +DA:231,0 +DA:239,0 +LH:0 +LF:39 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/Pair.java +FN:25,com.mediatek.jacoco2lcov.util.Pair.pair(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:32,com.mediatek.jacoco2lcov.util.Pair.(Ljava/lang/Object;Ljava/lang/Object;) +FN:48,com.mediatek.jacoco2lcov.util.Pair.dispose() +FN:58,com.mediatek.jacoco2lcov.util.Pair.toString(Ljava/lang/String;) +FN:66,com.mediatek.jacoco2lcov.util.Pair.hashCode(I) +FN:73,com.mediatek.jacoco2lcov.util.Pair.equals(Ljava/lang/Object;)Z +FN:85,com.mediatek.jacoco2lcov.util.Pair.clone(Ljava/lang/Object;) +FN:93,com.mediatek.jacoco2lcov.util.Pair.first(Ljava/lang/Object;) +FN:101,com.mediatek.jacoco2lcov.util.Pair.setFirst(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:107,com.mediatek.jacoco2lcov.util.Pair.second(Ljava/lang/Object;) +FN:115,com.mediatek.jacoco2lcov.util.Pair.setSecond(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:124,com.mediatek.jacoco2lcov.util.Pair.set(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.pair(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.(Ljava/lang/Object;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.dispose() +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.hashCode(I) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.equals(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.clone(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.first(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.setFirst(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.second(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.setSecond(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.set(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNF:12 +FNH:0 +BRDA:66,0,0,0 +BRDA:66,0,1,0 +BRDA:67,0,0,0 +BRDA:67,0,1,0 +BRDA:74,0,0,0 +BRDA:74,0,1,0 +BRDA:76,0,0,0 +BRDA:76,0,1,0 +BRDA:77,0,0,0 +BRDA:77,0,1,0 +BRF:10 +BRH:0 +DA:25,0 +DA:32,0 +DA:35,0 +DA:41,0 +DA:42,0 +DA:43,0 +DA:44,0 +DA:48,0 +DA:49,0 +DA:50,0 +DA:58,0 +DA:59,0 +DA:60,0 +DA:66,0 +DA:67,0 +DA:73,0 +DA:74,0 +DA:75,0 +DA:76,0 +DA:77,0 +DA:79,0 +DA:85,0 +DA:93,0 +DA:101,0 +DA:102,0 +DA:107,0 +DA:115,0 +DA:116,0 +DA:124,0 +DA:125,0 +DA:126,0 +LH:0 +LF:31 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/Utils.java +FN:54,com.mediatek.jacoco2lcov.util.Utils.getCurrentWorkingDirectory(Ljava/io/File;) +FN:59,com.mediatek.jacoco2lcov.util.Utils.getCurrentWorkingDirectoryPathname(Ljava/lang/String;) +FN:67,com.mediatek.jacoco2lcov.util.Utils.getenv(Ljava/lang/String;)Ljava/lang/String; +FN:75,com.mediatek.jacoco2lcov.util.Utils.getenv(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:83,com.mediatek.jacoco2lcov.util.Utils.getenvBoolean(Ljava/lang/String;Z)Z +FN:91,com.mediatek.jacoco2lcov.util.Utils.getenvInteger(Ljava/lang/String;I)I +FN:99,com.mediatek.jacoco2lcov.util.Utils.isNull(Ljava/lang/Object;)Z +FN:104,com.mediatek.jacoco2lcov.util.Utils.isNotNull(Ljava/lang/Object;)Z +FN:109,com.mediatek.jacoco2lcov.util.Utils.ifNull(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; +FN:119,com.mediatek.jacoco2lcov.util.Utils.ifNull(Ljava/lang/Object;Ljava/lang/Object;Ljava/util/function/Function;)Ljava/lang/Object; +FN:130,com.mediatek.jacoco2lcov.util.Utils.not(Z)Z +FN:135,com.mediatek.jacoco2lcov.util.Utils.clampMin(II)I +FN:146,com.mediatek.jacoco2lcov.util.Utils.sleep(I)Z +FN:159,com.mediatek.jacoco2lcov.util.Utils.exceptionToString(Ljava/lang/Throwable;)Ljava/lang/String; +FN:174,com.mediatek.jacoco2lcov.util.Utils.toString(Ljava/lang/Object;)Ljava/lang/String; +FN:182,com.mediatek.jacoco2lcov.util.Utils.toString(Ljava/lang/Object;Ljava/lang/String;)Ljava/lang/String; +FN:187,com.mediatek.jacoco2lcov.util.Utils.equals(Ljava/lang/String;Ljava/lang/String;)Z +FN:193,com.mediatek.jacoco2lcov.util.Utils.isEmpty(Ljava/lang/String;)Z +FN:198,com.mediatek.jacoco2lcov.util.Utils.isNotEmpty(Ljava/lang/String;)Z +FN:203,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/String; +FN:211,com.mediatek.jacoco2lcov.util.Utils.concatenate([Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:222,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/lang/String;Ljava/util/List;)Ljava/lang/String; +FN:230,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/util/List;Ljava/lang/String;)Ljava/lang/String; +FN:247,com.mediatek.jacoco2lcov.util.Utils.isBoolean(Ljava/lang/String;)Z +FN:266,com.mediatek.jacoco2lcov.util.Utils.toBoolean(Ljava/lang/String;)Z +FN:271,com.mediatek.jacoco2lcov.util.Utils.toBoolean(Ljava/lang/String;Z)Z +FN:294,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;)I +FN:302,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;I)I +FN:311,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;II)I +FN:335,com.mediatek.jacoco2lcov.util.Utils.equals(Ljava/lang/Object;Ljava/lang/Object;)Z +FN:343,com.mediatek.jacoco2lcov.util.Utils.compare(II)I +FN:352,com.mediatek.jacoco2lcov.util.Utils.compare(Ljava/lang/String;Ljava/lang/String;)I +FN:377,com.mediatek.jacoco2lcov.util.Utils.toListOf(Ljava/lang/Class;Ljava/util/List;)Ljava/util/List; +FN:407,com.mediatek.jacoco2lcov.util.Utils.toListOf(Ljava/lang/Class;[Ljava/lang/Object;)Ljava/util/List; +FN:421,com.mediatek.jacoco2lcov.util.Utils.toList([Ljava/lang/Object;)Ljava/util/List; +FN:430,com.mediatek.jacoco2lcov.util.Utils.createArrayOf(Ljava/lang/Class;I)[Ljava/lang/Object; +FN:447,com.mediatek.jacoco2lcov.util.Utils.toArrayOf(Ljava/lang/Class;[Ljava/lang/Object;)[Ljava/lang/Object; +FN:465,com.mediatek.jacoco2lcov.util.Utils.listify(Ljava/util/Collection;)Ljava/util/List; +FN:475,com.mediatek.jacoco2lcov.util.Utils.format(Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/String; +FN:481,com.mediatek.jacoco2lcov.util.Utils.append(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FN:487,com.mediatek.jacoco2lcov.util.Utils.append(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FN:493,com.mediatek.jacoco2lcov.util.Utils.println() +FN:498,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintStream;) +FN:503,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintWriter;) +FN:508,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuffer;) +FN:513,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuilder;) +FN:519,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/String;[Ljava/lang/Object;) +FN:524,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintStream;Ljava/lang/String;[Ljava/lang/Object;) +FN:530,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintWriter;Ljava/lang/String;[Ljava/lang/Object;) +FN:540,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FN:550,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FN:557,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/String;[Ljava/lang/Object;) +FN:562,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/io/PrintStream;Ljava/lang/String;[Ljava/lang/Object;) +FN:567,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/io/PrintWriter;Ljava/lang/String;[Ljava/lang/Object;) +FN:573,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FN:579,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FN:585,com.mediatek.jacoco2lcov.util.Utils.flush() +FN:591,com.mediatek.jacoco2lcov.util.Utils.flush(Ljava/io/Writer;) +FN:599,com.mediatek.jacoco2lcov.util.Utils.flush(Ljava/io/OutputStream;) +FN:609,com.mediatek.jacoco2lcov.util.Utils.exists(Ljava/lang/String;)Z +FN:614,com.mediatek.jacoco2lcov.util.Utils.exists(Ljava/io/File;)Z +FN:619,com.mediatek.jacoco2lcov.util.Utils.isFile(Ljava/lang/String;)Z +FN:628,com.mediatek.jacoco2lcov.util.Utils.isFile(Ljava/io/File;)Z +FN:634,com.mediatek.jacoco2lcov.util.Utils.isAbsolute(Ljava/lang/String;)Z +FN:639,com.mediatek.jacoco2lcov.util.Utils.isAbsolute(Ljava/io/File;)Z +FN:644,com.mediatek.jacoco2lcov.util.Utils.isRelative(Ljava/lang/String;)Z +FN:649,com.mediatek.jacoco2lcov.util.Utils.isRelative(Ljava/io/File;)Z +FN:655,com.mediatek.jacoco2lcov.util.Utils.isDirectory(Ljava/lang/String;)Z +FN:661,com.mediatek.jacoco2lcov.util.Utils.isDirectory(Ljava/io/File;)Z +FN:666,com.mediatek.jacoco2lcov.util.Utils.isReadable(Ljava/lang/String;)Z +FN:671,com.mediatek.jacoco2lcov.util.Utils.isReadable(Ljava/io/File;)Z +FN:676,com.mediatek.jacoco2lcov.util.Utils.isWritable(Ljava/lang/String;)Z +FN:681,com.mediatek.jacoco2lcov.util.Utils.isWritable(Ljava/io/File;)Z +FN:686,com.mediatek.jacoco2lcov.util.Utils.getDirectory(Ljava/io/File;)Ljava/lang/String; +FN:692,com.mediatek.jacoco2lcov.util.Utils.getFilename(Ljava/io/File;)Ljava/lang/String; +FN:698,com.mediatek.jacoco2lcov.util.Utils.getFilename(Ljava/lang/String;)Ljava/lang/String; +FN:705,com.mediatek.jacoco2lcov.util.Utils.getDirectory(Ljava/lang/String;)Ljava/lang/String; +FN:716,com.mediatek.jacoco2lcov.util.Utils.dropLeadingSeparator(Ljava/lang/String;)Ljava/lang/String; +FN:730,com.mediatek.jacoco2lcov.util.Utils.dropTrailingSeparator(Ljava/lang/String;)Ljava/lang/String; +FN:744,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:756,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/io/File;Ljava/lang/String;)Ljava/lang/String; +FN:768,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/lang/String;Ljava/io/File;)Ljava/lang/String; +FN:780,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/io/File;Ljava/io/File;)Ljava/lang/String; +FN:795,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:809,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/io/File;Ljava/lang/String;)Ljava/lang/String; +FN:822,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/lang/String;Ljava/io/File;)Ljava/lang/String; +FN:835,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/io/File;Ljava/io/File;)Ljava/lang/String; +FN:848,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/lang/String;Ljava/lang/String;)Ljava/io/File; +FN:861,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/io/File;Ljava/lang/String;)Ljava/io/File; +FN:874,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/lang/String;Ljava/io/File;)Ljava/io/File; +FN:887,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/io/File;Ljava/io/File;)Ljava/io/File; +FN:899,com.mediatek.jacoco2lcov.util.Utils.getCanonicalPathname(Ljava/lang/String;)Ljava/lang/String; +FN:911,com.mediatek.jacoco2lcov.util.Utils.getCanonicalPathname(Ljava/io/File;)Ljava/lang/String; +FN:923,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;)Ljava/io/File; +FN:935,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/io/File;)Ljava/io/File; +FN:947,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;Ljava/io/File;)Ljava/io/File; +FN:973,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;Ljava/lang/String;)Ljava/io/File; +FN:982,com.mediatek.jacoco2lcov.util.Utils.constructPathname(Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/String; +FN:991,com.mediatek.jacoco2lcov.util.Utils.constructPathname(Ljava/io/File;[Ljava/lang/String;)Ljava/lang/String; +FN:999,com.mediatek.jacoco2lcov.util.Utils.constructPathnameFile(Ljava/lang/String;[Ljava/lang/String;)Ljava/io/File; +FN:1008,com.mediatek.jacoco2lcov.util.Utils.constructPathnameFile(Ljava/io/File;[Ljava/lang/String;)Ljava/io/File; +FN:1030,com.mediatek.jacoco2lcov.util.Utils.findFiles(Ljava/lang/String;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1041,com.mediatek.jacoco2lcov.util.Utils.findFiles(Ljava/io/File;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1052,com.mediatek.jacoco2lcov.util.Utils.findDirectories(Ljava/lang/String;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1063,com.mediatek.jacoco2lcov.util.Utils.findDirectories(Ljava/io/File;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1075,com.mediatek.jacoco2lcov.util.Utils.findPathnames(Ljava/lang/String;Ljava/util/function/Predicate;ZZ)Ljava/util/List; +FN:1088,com.mediatek.jacoco2lcov.util.Utils.findPathnames(Ljava/io/File;Ljava/util/function/Predicate;ZZ)Ljava/util/List; +FN:1092,com.mediatek.jacoco2lcov.util.Utils.lambda$findPathnames$0(ZZLjava/util/function/Predicate;Ljava/io/File;)Z +FN:1106,com.mediatek.jacoco2lcov.util.Utils.openInputFile(Ljava/lang/String;)Ljava/io/BufferedReader; +FN:1114,com.mediatek.jacoco2lcov.util.Utils.openInputFile(Ljava/io/File;)Ljava/io/BufferedReader; +FN:1130,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/lang/String;)Ljava/io/PrintWriter; +FN:1139,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/io/File;)Ljava/io/PrintWriter; +FN:1149,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/lang/String;Z)Ljava/io/PrintWriter; +FN:1159,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/io/File;Z)Ljava/io/PrintWriter; +FN:1175,com.mediatek.jacoco2lcov.util.Utils.removeFile(Ljava/io/File;) +FN:1180,com.mediatek.jacoco2lcov.util.Utils.removeFile(Ljava/lang/String;) +FN:1196,com.mediatek.jacoco2lcov.util.Utils.map(Ljava/util/List;Ljava/util/function/Function;)Ljava/util/List; +FN:1209,com.mediatek.jacoco2lcov.util.Utils.forEach(Ljava/util/List;Ljava/util/function/Consumer;) +FN:1222,com.mediatek.jacoco2lcov.util.Utils.keep(Ljava/util/List;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1224,com.mediatek.jacoco2lcov.util.Utils.lambda$keep$1(Ljava/util/function/Predicate;Ljava/util/ArrayList;Ljava/lang/Object;) +FN:1237,com.mediatek.jacoco2lcov.util.Utils.flatMap(Ljava/util/List;Ljava/util/function/Function;)Ljava/util/List; +FN:1238,com.mediatek.jacoco2lcov.util.Utils.lambda$flatMap$2(Ljava/util/List;Ljava/util/function/Function;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCurrentWorkingDirectory(Ljava/io/File;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.getCurrentWorkingDirectoryPathname(Ljava/lang/String;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.getenv(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getenv(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.getenvBoolean(Ljava/lang/String;Z)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getenvInteger(Ljava/lang/String;I)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isNull(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isNotNull(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.ifNull(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.ifNull(Ljava/lang/Object;Ljava/lang/Object;Ljava/util/function/Function;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.not(Z)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.clampMin(II)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.sleep(I)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.exceptionToString(Ljava/lang/Throwable;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toString(Ljava/lang/Object;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toString(Ljava/lang/Object;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.equals(Ljava/lang/String;Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isEmpty(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isNotEmpty(Ljava/lang/String;)Z +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/String; +FNDA:3,com.mediatek.jacoco2lcov.util.Utils.concatenate([Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/lang/String;Ljava/util/List;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/util/List;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isBoolean(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toBoolean(Ljava/lang/String;)Z +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.toBoolean(Ljava/lang/String;Z)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;I)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;II)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.equals(Ljava/lang/Object;Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.compare(II)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.compare(Ljava/lang/String;Ljava/lang/String;)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toListOf(Ljava/lang/Class;Ljava/util/List;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toListOf(Ljava/lang/Class;[Ljava/lang/Object;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toList([Ljava/lang/Object;)Ljava/util/List; +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.createArrayOf(Ljava/lang/Class;I)[Ljava/lang/Object; +FNDA:2,com.mediatek.jacoco2lcov.util.Utils.toArrayOf(Ljava/lang/Class;[Ljava/lang/Object;)[Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.listify(Ljava/util/Collection;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.format(Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.append(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.append(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println() +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintStream;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintWriter;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuffer;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuilder;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/String;[Ljava/lang/Object;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintStream;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintWriter;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/io/PrintStream;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/io/PrintWriter;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.flush() +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.flush(Ljava/io/Writer;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.flush(Ljava/io/OutputStream;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.exists(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.exists(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isFile(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isFile(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isAbsolute(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isAbsolute(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isRelative(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isRelative(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isDirectory(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isDirectory(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isReadable(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isReadable(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isWritable(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isWritable(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getDirectory(Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getFilename(Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getFilename(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getDirectory(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.dropLeadingSeparator(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.dropTrailingSeparator(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/io/File;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/lang/String;Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/io/File;Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/io/File;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/lang/String;Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/io/File;Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/lang/String;Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/io/File;Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/lang/String;Ljava/io/File;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/io/File;Ljava/io/File;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalPathname(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalPathname(Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/io/File;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;Ljava/io/File;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.constructPathname(Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.constructPathname(Ljava/io/File;[Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.constructPathnameFile(Ljava/lang/String;[Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.constructPathnameFile(Ljava/io/File;[Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findFiles(Ljava/lang/String;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findFiles(Ljava/io/File;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findDirectories(Ljava/lang/String;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findDirectories(Ljava/io/File;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findPathnames(Ljava/lang/String;Ljava/util/function/Predicate;ZZ)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findPathnames(Ljava/io/File;Ljava/util/function/Predicate;ZZ)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.lambda$findPathnames$0(ZZLjava/util/function/Predicate;Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openInputFile(Ljava/lang/String;)Ljava/io/BufferedReader; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openInputFile(Ljava/io/File;)Ljava/io/BufferedReader; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/lang/String;)Ljava/io/PrintWriter; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/io/File;)Ljava/io/PrintWriter; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/lang/String;Z)Ljava/io/PrintWriter; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/io/File;Z)Ljava/io/PrintWriter; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.removeFile(Ljava/io/File;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.removeFile(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.map(Ljava/util/List;Ljava/util/function/Function;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.forEach(Ljava/util/List;Ljava/util/function/Consumer;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.keep(Ljava/util/List;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.lambda$keep$1(Ljava/util/function/Predicate;Ljava/util/ArrayList;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.flatMap(Ljava/util/List;Ljava/util/function/Function;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.lambda$flatMap$2(Ljava/util/List;Ljava/util/function/Function;Ljava/lang/Object;) +FNF:122 +FNH:12 +BRDA:99,0,0,0 +BRDA:99,0,1,0 +BRDA:104,0,0,0 +BRDA:104,0,1,0 +BRDA:109,0,0,0 +BRDA:109,0,1,0 +BRDA:119,0,0,0 +BRDA:119,0,1,0 +BRDA:119,0,2,0 +BRDA:119,0,3,0 +BRDA:130,0,0,0 +BRDA:130,0,1,0 +BRDA:135,0,0,0 +BRDA:135,0,1,0 +BRDA:160,0,0,0 +BRDA:160,0,1,0 +BRDA:163,0,0,0 +BRDA:163,0,1,0 +BRDA:182,0,0,0 +BRDA:182,0,1,0 +BRDA:187,0,0,0 +BRDA:187,0,1,0 +BRDA:187,0,2,0 +BRDA:187,0,3,0 +BRDA:187,0,4,0 +BRDA:187,0,5,0 +BRDA:187,0,6,0 +BRDA:187,0,7,0 +BRDA:193,0,0,0 +BRDA:193,0,1,0 +BRDA:193,0,2,0 +BRDA:193,0,3,0 +BRDA:198,0,0,0 +BRDA:198,0,1,0 +BRDA:198,0,2,0 +BRDA:198,0,3,0 +BRDA:212,0,0,1 +BRDA:212,0,1,0 +BRDA:213,0,0,1 +BRDA:213,0,1,1 +BRDA:214,0,0,1 +BRDA:214,0,1,1 +BRDA:214,0,2,1 +BRDA:214,0,3,0 +BRDA:232,0,0,0 +BRDA:232,0,1,0 +BRDA:233,0,0,0 +BRDA:233,0,1,0 +BRDA:234,0,0,0 +BRDA:234,0,1,0 +BRDA:234,0,2,0 +BRDA:234,0,3,0 +BRDA:247,0,0,0 +BRDA:247,0,1,0 +BRDA:250,0,0,0 +BRDA:250,0,1,0 +BRDA:251,0,0,0 +BRDA:251,0,1,0 +BRDA:252,0,0,0 +BRDA:252,0,1,0 +BRDA:253,0,0,0 +BRDA:253,0,1,0 +BRDA:254,0,0,0 +BRDA:254,0,1,0 +BRDA:256,0,0,0 +BRDA:256,0,1,0 +BRDA:257,0,0,0 +BRDA:257,0,1,0 +BRDA:258,0,0,0 +BRDA:258,0,1,0 +BRDA:259,0,0,0 +BRDA:259,0,1,0 +BRDA:260,0,0,0 +BRDA:260,0,1,0 +BRDA:272,0,0,1 +BRDA:272,0,1,0 +BRDA:273,0,0,0 +BRDA:273,0,1,0 +BRDA:274,0,0,0 +BRDA:274,0,1,0 +BRDA:275,0,0,0 +BRDA:275,0,1,0 +BRDA:276,0,0,0 +BRDA:276,0,1,0 +BRDA:277,0,0,0 +BRDA:277,0,1,0 +BRDA:279,0,0,0 +BRDA:279,0,1,0 +BRDA:280,0,0,0 +BRDA:280,0,1,0 +BRDA:281,0,0,0 +BRDA:281,0,1,0 +BRDA:282,0,0,0 +BRDA:282,0,1,0 +BRDA:283,0,0,0 +BRDA:283,0,1,0 +BRDA:311,0,0,0 +BRDA:311,0,1,0 +BRDA:315,0,0,0 +BRDA:315,0,1,0 +BRDA:316,0,0,0 +BRDA:316,0,1,0 +BRDA:316,0,2,0 +BRDA:316,0,3,0 +BRDA:318,0,0,0 +BRDA:318,0,1,0 +BRDA:318,0,2,0 +BRDA:318,0,3,0 +BRDA:335,0,0,0 +BRDA:335,0,1,0 +BRDA:335,0,2,0 +BRDA:335,0,3,0 +BRDA:335,0,4,0 +BRDA:335,0,5,0 +BRDA:335,0,6,0 +BRDA:335,0,7,0 +BRDA:343,0,0,0 +BRDA:343,0,1,0 +BRDA:343,0,2,0 +BRDA:343,0,3,0 +BRDA:352,0,0,0 +BRDA:352,0,1,0 +BRDA:352,0,2,0 +BRDA:352,0,3,0 +BRDA:353,0,0,0 +BRDA:353,0,1,0 +BRDA:356,0,0,0 +BRDA:356,0,1,0 +BRDA:357,0,0,0 +BRDA:357,0,1,0 +BRDA:378,0,0,0 +BRDA:378,0,1,0 +BRDA:381,0,0,0 +BRDA:381,0,1,0 +BRDA:407,0,0,0 +BRDA:407,0,1,0 +BRDA:421,0,0,0 +BRDA:421,0,1,0 +BRDA:448,0,0,1 +BRDA:448,0,1,0 +BRDA:451,0,0,1 +BRDA:451,0,1,1 +BRDA:466,0,0,0 +BRDA:466,0,1,0 +BRDA:609,0,0,0 +BRDA:609,0,1,0 +BRDA:609,0,2,0 +BRDA:609,0,3,0 +BRDA:614,0,0,0 +BRDA:614,0,1,0 +BRDA:614,0,2,0 +BRDA:614,0,3,0 +BRDA:619,0,0,0 +BRDA:619,0,1,0 +BRDA:622,0,0,0 +BRDA:622,0,1,0 +BRDA:622,0,2,0 +BRDA:622,0,3,0 +BRDA:628,0,0,0 +BRDA:628,0,1,0 +BRDA:628,0,2,0 +BRDA:628,0,3,0 +BRDA:629,0,0,0 +BRDA:629,0,1,0 +BRDA:634,0,0,0 +BRDA:634,0,1,0 +BRDA:634,0,2,0 +BRDA:634,0,3,0 +BRDA:639,0,0,0 +BRDA:639,0,1,0 +BRDA:639,0,2,0 +BRDA:639,0,3,0 +BRDA:644,0,0,0 +BRDA:644,0,1,0 +BRDA:644,0,2,0 +BRDA:644,0,3,0 +BRDA:649,0,0,0 +BRDA:649,0,1,0 +BRDA:649,0,2,0 +BRDA:649,0,3,0 +BRDA:655,0,0,0 +BRDA:655,0,1,0 +BRDA:655,0,2,0 +BRDA:655,0,3,0 +BRDA:661,0,0,0 +BRDA:661,0,1,0 +BRDA:661,0,2,0 +BRDA:661,0,3,0 +BRDA:666,0,0,0 +BRDA:666,0,1,0 +BRDA:666,0,2,0 +BRDA:666,0,3,0 +BRDA:671,0,0,0 +BRDA:671,0,1,0 +BRDA:671,0,2,0 +BRDA:671,0,3,0 +BRDA:676,0,0,0 +BRDA:676,0,1,0 +BRDA:676,0,2,0 +BRDA:676,0,3,0 +BRDA:681,0,0,0 +BRDA:681,0,1,0 +BRDA:681,0,2,0 +BRDA:681,0,3,0 +BRDA:686,0,0,0 +BRDA:686,0,1,0 +BRDA:692,0,0,0 +BRDA:692,0,1,0 +BRDA:698,0,0,0 +BRDA:698,0,1,0 +BRDA:700,0,0,0 +BRDA:700,0,1,0 +BRDA:705,0,0,0 +BRDA:705,0,1,0 +BRDA:706,0,0,0 +BRDA:706,0,1,0 +BRDA:708,0,0,0 +BRDA:708,0,1,0 +BRDA:716,0,0,0 +BRDA:716,0,1,0 +BRDA:718,0,0,0 +BRDA:718,0,1,0 +BRDA:718,0,2,0 +BRDA:718,0,3,0 +BRDA:730,0,0,0 +BRDA:730,0,1,0 +BRDA:732,0,0,0 +BRDA:732,0,1,0 +BRDA:732,0,2,0 +BRDA:732,0,3,0 +BRDA:744,0,0,0 +BRDA:744,0,1,0 +BRDA:745,0,0,0 +BRDA:745,0,1,0 +BRDA:746,0,0,0 +BRDA:746,0,1,0 +BRDA:756,0,0,0 +BRDA:756,0,1,0 +BRDA:757,0,0,0 +BRDA:757,0,1,0 +BRDA:758,0,0,0 +BRDA:758,0,1,0 +BRDA:768,0,0,0 +BRDA:768,0,1,0 +BRDA:769,0,0,0 +BRDA:769,0,1,0 +BRDA:770,0,0,0 +BRDA:770,0,1,0 +BRDA:780,0,0,0 +BRDA:780,0,1,0 +BRDA:781,0,0,0 +BRDA:781,0,1,0 +BRDA:782,0,0,0 +BRDA:782,0,1,0 +BRDA:795,0,0,0 +BRDA:795,0,1,0 +BRDA:796,0,0,0 +BRDA:796,0,1,0 +BRDA:809,0,0,0 +BRDA:809,0,1,0 +BRDA:810,0,0,0 +BRDA:810,0,1,0 +BRDA:822,0,0,0 +BRDA:822,0,1,0 +BRDA:823,0,0,0 +BRDA:823,0,1,0 +BRDA:836,0,0,0 +BRDA:836,0,1,0 +BRDA:848,0,0,0 +BRDA:848,0,1,0 +BRDA:849,0,0,0 +BRDA:849,0,1,0 +BRDA:861,0,0,0 +BRDA:861,0,1,0 +BRDA:862,0,0,0 +BRDA:862,0,1,0 +BRDA:874,0,0,0 +BRDA:874,0,1,0 +BRDA:875,0,0,0 +BRDA:875,0,1,0 +BRDA:887,0,0,0 +BRDA:887,0,1,0 +BRDA:888,0,0,0 +BRDA:888,0,1,0 +BRDA:889,0,0,0 +BRDA:889,0,1,0 +BRDA:891,0,0,0 +BRDA:891,0,1,0 +BRDA:899,0,0,0 +BRDA:899,0,1,0 +BRDA:911,0,0,0 +BRDA:911,0,1,0 +BRDA:923,0,0,0 +BRDA:923,0,1,0 +BRDA:935,0,0,0 +BRDA:935,0,1,0 +BRDA:950,0,0,0 +BRDA:950,0,1,0 +BRDA:952,0,0,0 +BRDA:952,0,1,0 +BRDA:955,0,0,0 +BRDA:955,0,1,0 +BRDA:973,0,0,0 +BRDA:973,0,1,0 +BRDA:982,0,0,0 +BRDA:982,0,1,0 +BRDA:999,0,0,0 +BRDA:999,0,1,0 +BRDA:1009,0,0,0 +BRDA:1009,0,1,0 +BRDA:1012,0,0,0 +BRDA:1012,0,1,0 +BRDA:1013,0,0,0 +BRDA:1013,0,1,0 +BRDA:1075,0,0,0 +BRDA:1075,0,1,0 +BRDA:1088,0,0,0 +BRDA:1088,0,1,0 +BRDA:1088,0,2,0 +BRDA:1088,0,3,0 +BRDA:1088,0,4,0 +BRDA:1088,0,5,0 +BRDA:1088,0,6,0 +BRDA:1088,0,7,0 +BRDA:1093,0,0,0 +BRDA:1093,0,1,0 +BRDA:1093,0,2,0 +BRDA:1093,0,3,0 +BRDA:1093,0,4,0 +BRDA:1093,0,5,0 +BRDA:1093,0,6,0 +BRDA:1093,0,7,0 +BRDA:1116,0,0,0 +BRDA:1116,0,1,0 +BRDA:1116,0,2,0 +BRDA:1116,0,3,0 +BRDA:1162,0,0,0 +BRDA:1162,0,1,0 +BRDA:1164,0,0,0 +BRDA:1164,0,1,0 +BRDA:1164,0,2,0 +BRDA:1164,0,3,0 +BRDA:1180,0,0,0 +BRDA:1180,0,1,0 +BRDA:1196,0,0,0 +BRDA:1196,0,1,0 +BRDA:1198,0,0,0 +BRDA:1198,0,1,0 +BRDA:1209,0,0,0 +BRDA:1209,0,1,0 +BRDA:1209,0,2,0 +BRDA:1209,0,3,0 +BRDA:1211,0,0,0 +BRDA:1211,0,1,0 +BRDA:1224,0,0,0 +BRDA:1224,0,1,0 +BRF:356 +BRH:10 +DA:54,0 +DA:59,1 +DA:67,1 +DA:75,0 +DA:83,1 +DA:91,0 +DA:99,0 +DA:104,0 +DA:109,0 +DA:119,0 +DA:120,0 +DA:130,0 +DA:135,0 +DA:146,0 +DA:148,0 +DA:149,0 +DA:150,0 +DA:151,0 +DA:152,0 +DA:159,0 +DA:160,0 +DA:161,0 +DA:162,0 +DA:163,0 +DA:164,0 +DA:166,0 +DA:174,0 +DA:182,0 +DA:187,0 +DA:193,0 +DA:198,0 +DA:203,1 +DA:211,1 +DA:212,1 +DA:213,2 +DA:214,3 +DA:215,1 +DA:217,1 +DA:222,0 +DA:230,0 +DA:231,0 +DA:232,0 +DA:233,0 +DA:234,0 +DA:235,0 +DA:236,0 +DA:237,0 +DA:239,0 +DA:247,0 +DA:248,0 +DA:250,0 +DA:251,0 +DA:252,0 +DA:253,0 +DA:254,0 +DA:256,0 +DA:257,0 +DA:258,0 +DA:259,0 +DA:260,0 +DA:266,0 +DA:271,1 +DA:272,1 +DA:273,0 +DA:274,0 +DA:275,0 +DA:276,0 +DA:277,0 +DA:278,0 +DA:279,0 +DA:280,0 +DA:281,0 +DA:282,0 +DA:283,0 +DA:284,0 +DA:286,1 +DA:294,0 +DA:302,0 +DA:311,0 +DA:314,0 +DA:315,0 +DA:316,0 +DA:317,0 +DA:318,0 +DA:319,0 +DA:321,0 +DA:322,0 +DA:323,0 +DA:335,0 +DA:343,0 +DA:352,0 +DA:353,0 +DA:354,0 +DA:356,0 +DA:357,0 +DA:358,0 +DA:377,0 +DA:378,0 +DA:379,0 +DA:380,0 +DA:381,0 +DA:384,0 +DA:385,0 +DA:388,0 +DA:407,0 +DA:408,0 +DA:411,0 +DA:421,0 +DA:430,1 +DA:447,1 +DA:448,1 +DA:449,1 +DA:450,1 +DA:451,2 +DA:454,1 +DA:455,1 +DA:458,1 +DA:465,0 +DA:466,0 +DA:467,0 +DA:475,0 +DA:481,0 +DA:482,0 +DA:487,0 +DA:488,0 +DA:493,0 +DA:494,0 +DA:498,0 +DA:499,0 +DA:503,0 +DA:504,0 +DA:508,0 +DA:509,0 +DA:513,0 +DA:514,0 +DA:519,1 +DA:520,1 +DA:524,1 +DA:525,1 +DA:526,1 +DA:530,0 +DA:531,0 +DA:532,0 +DA:540,0 +DA:541,0 +DA:542,0 +DA:550,0 +DA:551,0 +DA:552,0 +DA:557,0 +DA:558,0 +DA:562,0 +DA:563,0 +DA:567,0 +DA:568,0 +DA:573,0 +DA:574,0 +DA:579,0 +DA:580,0 +DA:585,1 +DA:586,1 +DA:591,0 +DA:592,0 +DA:593,0 +DA:594,0 +DA:599,1 +DA:600,0 +DA:601,1 +DA:602,1 +DA:609,0 +DA:614,0 +DA:619,0 +DA:620,0 +DA:622,0 +DA:628,0 +DA:629,0 +DA:634,0 +DA:639,0 +DA:644,0 +DA:649,0 +DA:655,0 +DA:661,0 +DA:666,0 +DA:671,0 +DA:676,0 +DA:681,0 +DA:686,0 +DA:687,0 +DA:692,0 +DA:693,0 +DA:698,0 +DA:699,0 +DA:700,0 +DA:705,0 +DA:706,0 +DA:707,0 +DA:708,0 +DA:716,0 +DA:717,0 +DA:718,0 +DA:719,0 +DA:720,0 +DA:722,0 +DA:730,0 +DA:731,0 +DA:732,0 +DA:733,0 +DA:734,0 +DA:736,0 +DA:744,0 +DA:745,0 +DA:746,0 +DA:747,0 +DA:748,0 +DA:756,0 +DA:757,0 +DA:758,0 +DA:759,0 +DA:760,0 +DA:768,0 +DA:769,0 +DA:770,0 +DA:771,0 +DA:772,0 +DA:780,0 +DA:781,0 +DA:782,0 +DA:783,0 +DA:784,0 +DA:795,0 +DA:796,0 +DA:797,0 +DA:809,0 +DA:810,0 +DA:811,0 +DA:822,0 +DA:823,0 +DA:824,0 +DA:835,0 +DA:836,0 +DA:848,0 +DA:849,0 +DA:850,0 +DA:861,0 +DA:862,0 +DA:863,0 +DA:874,0 +DA:875,0 +DA:876,0 +DA:887,0 +DA:888,0 +DA:889,0 +DA:890,0 +DA:891,0 +DA:899,0 +DA:900,0 +DA:901,0 +DA:902,0 +DA:911,0 +DA:912,0 +DA:913,0 +DA:914,0 +DA:923,0 +DA:924,0 +DA:925,0 +DA:926,0 +DA:935,0 +DA:936,0 +DA:937,0 +DA:938,0 +DA:947,0 +DA:950,0 +DA:952,0 +DA:953,0 +DA:954,0 +DA:955,0 +DA:958,0 +DA:959,0 +DA:960,0 +DA:961,0 +DA:962,0 +DA:965,0 +DA:973,0 +DA:974,0 +DA:982,0 +DA:983,0 +DA:991,0 +DA:999,0 +DA:1000,0 +DA:1008,0 +DA:1009,0 +DA:1011,0 +DA:1012,0 +DA:1013,0 +DA:1014,0 +DA:1017,0 +DA:1018,0 +DA:1019,0 +DA:1030,0 +DA:1041,0 +DA:1052,0 +DA:1063,0 +DA:1075,0 +DA:1076,0 +DA:1088,0 +DA:1090,0 +DA:1091,0 +DA:1092,0 +DA:1093,0 +DA:1094,0 +DA:1096,0 +DA:1098,0 +DA:1106,0 +DA:1114,0 +DA:1116,0 +DA:1118,0 +DA:1119,0 +DA:1120,0 +DA:1121,0 +DA:1130,0 +DA:1139,0 +DA:1149,0 +DA:1159,0 +DA:1161,0 +DA:1162,0 +DA:1163,0 +DA:1164,0 +DA:1165,0 +DA:1167,0 +DA:1168,0 +DA:1169,0 +DA:1170,0 +DA:1175,0 +DA:1176,0 +DA:1180,0 +DA:1181,0 +DA:1183,0 +DA:1184,0 +DA:1185,0 +DA:1186,0 +DA:1196,0 +DA:1197,0 +DA:1198,0 +DA:1199,0 +DA:1201,0 +DA:1209,0 +DA:1210,0 +DA:1211,0 +DA:1212,0 +DA:1215,0 +DA:1222,0 +DA:1223,0 +DA:1224,0 +DA:1225,0 +DA:1226,0 +DA:1237,0 +DA:1238,0 +DA:1239,0 +LH:32 +LF:358 +end_of_record diff --git a/tests/lcov/lambda/lambda.sh b/tests/lcov/lambda/lambda.sh new file mode 100755 index 00000000..f24c1462 --- /dev/null +++ b/tests/lcov/lambda/lambda.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# lambda function filtering, in java +set +x + +source ../../common.tst + +rm -rf *.txt* *.json dumper* report lambda *.gcda *.gcno *.info + +clean_cover + +if [[ 1 == "$CLEAN_ONLY" ]] ; then + exit 0 +fi + +LCOV_OPTS="--branch $PARALLEL $PROFILE" + +# lambda function on same line as function decl +$COVER $LCOV_TOOL $LCOV_OPTS -o filter.info -a 'lambda*.dat' +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# did the two get merged? +COUNT=`grep -c -E 'FNL:.+,319' filter.info` +if [ "$COUNT" != 1 ] ; then + echo "ERROR: did not merge the lambda" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +#did we get the right end line for toArrayOf? +grep -c -E 'FNL:.+,303,309' filter.info +if [ 0 != $? ] ; then + echo "ERROR: computed wrong end line" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/lambda/lambda2.dat b/tests/lcov/lambda/lambda2.dat new file mode 100644 index 00000000..40cbd8d2 --- /dev/null +++ b/tests/lcov/lambda/lambda2.dat @@ -0,0 +1,2654 @@ +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/Jacoco2LCOV.java +FN:51,com.mediatek.jacoco2lcov.Jacoco2LCOV.() +FN:63,com.mediatek.jacoco2lcov.Jacoco2LCOV.main([Ljava/lang/String;) +FN:78,com.mediatek.jacoco2lcov.Jacoco2LCOV.([Ljava/lang/String;) +FN:116,com.mediatek.jacoco2lcov.Jacoco2LCOV.showUsage() +FN:147,com.mediatek.jacoco2lcov.Jacoco2LCOV.parseArgs([Ljava/lang/String;)I +FN:213,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$parseArgs$0(Lcom/mediatek/jacoco2lcov/Plugin;) +FN:226,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$parseArgs$1(Ljava/lang/String;) +FN:248,com.mediatek.jacoco2lcov.Jacoco2LCOV.isValid(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:337,com.mediatek.jacoco2lcov.Jacoco2LCOV.getAbsolutePath(Ljava/lang/String;)Ljava/lang/String; +FN:347,com.mediatek.jacoco2lcov.Jacoco2LCOV.getRelativePath(Ljava/lang/String;)Ljava/lang/String; +FN:362,com.mediatek.jacoco2lcov.Jacoco2LCOV.run(I) +FN:372,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$2(Ljava/lang/String;) +FN:375,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$3(Ljava/lang/String;) +FN:390,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$4(Ljava/lang/String;) +FN:395,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$5(Ljava/lang/String;) +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.() +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.main([Ljava/lang/String;) +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.([Ljava/lang/String;) +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.showUsage() +FNDA:2,com.mediatek.jacoco2lcov.Jacoco2LCOV.parseArgs([Ljava/lang/String;)I +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$parseArgs$0(Lcom/mediatek/jacoco2lcov/Plugin;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$parseArgs$1(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.isValid(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.getAbsolutePath(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.getRelativePath(Ljava/lang/String;)Ljava/lang/String; +FNDA:1,com.mediatek.jacoco2lcov.Jacoco2LCOV.run(I) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$2(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$3(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$4(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Jacoco2LCOV.lambda$run$5(Ljava/lang/String;) +FNF:15 +FNH:6 +BRDA:68,0,0,1 +BRDA:68,0,1,0 +BRDA:151,0,0,1 +BRDA:151,0,1,1 +BRDA:151,0,2,1 +BRDA:151,0,3,1 +BRDA:151,0,4,0 +BRDA:151,0,5,0 +BRDA:154,0,0,1 +BRDA:154,0,1,1 +BRDA:154,0,2,0 +BRDA:154,0,3,0 +BRDA:158,0,0,0 +BRDA:158,0,1,0 +BRDA:158,0,2,0 +BRDA:158,0,3,0 +BRDA:162,0,0,0 +BRDA:162,0,1,0 +BRDA:162,0,2,0 +BRDA:162,0,3,0 +BRDA:166,0,0,0 +BRDA:166,0,1,0 +BRDA:166,0,2,0 +BRDA:166,0,3,0 +BRDA:170,0,0,0 +BRDA:170,0,1,0 +BRDA:170,0,2,0 +BRDA:170,0,3,0 +BRDA:174,0,0,0 +BRDA:174,0,1,0 +BRDA:174,0,2,0 +BRDA:174,0,3,0 +BRDA:179,0,0,0 +BRDA:179,0,1,0 +BRDA:179,0,2,0 +BRDA:179,0,3,0 +BRDA:184,0,0,0 +BRDA:184,0,1,0 +BRDA:184,0,2,0 +BRDA:184,0,3,0 +BRDA:203,0,0,1 +BRDA:203,0,1,0 +BRDA:207,0,0,0 +BRDA:207,0,1,0 +BRDA:227,0,0,0 +BRDA:227,0,1,0 +BRDA:228,0,0,0 +BRDA:228,0,1,0 +BRDA:231,0,0,1 +BRDA:231,0,1,0 +BRDA:234,0,0,1 +BRDA:234,0,1,0 +BRDA:236,0,0,1 +BRDA:236,0,1,0 +BRDA:248,0,0,0 +BRDA:248,0,1,0 +BRDA:253,0,0,0 +BRDA:253,0,1,0 +BRDA:256,0,0,0 +BRDA:256,0,1,0 +BRDA:262,0,0,0 +BRDA:262,0,1,0 +BRDA:267,0,0,0 +BRDA:267,0,1,0 +BRDA:268,0,0,0 +BRDA:268,0,1,0 +BRDA:273,0,0,0 +BRDA:273,0,1,0 +BRDA:280,0,0,0 +BRDA:280,0,1,0 +BRDA:290,0,0,0 +BRDA:290,0,1,0 +BRDA:292,0,0,0 +BRDA:292,0,1,0 +BRDA:300,0,0,0 +BRDA:300,0,1,0 +BRDA:305,0,0,0 +BRDA:305,0,1,0 +BRDA:307,0,0,0 +BRDA:307,0,1,0 +BRDA:315,0,0,0 +BRDA:315,0,1,0 +BRDA:320,0,0,0 +BRDA:320,0,1,0 +BRDA:322,0,0,0 +BRDA:322,0,1,0 +BRDA:337,0,0,0 +BRDA:337,0,1,0 +BRDA:347,0,0,0 +BRDA:347,0,1,0 +BRDA:350,0,0,0 +BRDA:350,0,1,0 +BRDA:362,0,0,1 +BRDA:362,0,1,0 +BRDA:365,0,0,0 +BRDA:365,0,1,0 +BRF:96 +BRH:12 +DA:51,1 +DA:52,1 +DA:63,1 +DA:68,1 +DA:72,0 +DA:73,0 +DA:78,1 +DA:81,1 +DA:84,1 +DA:85,1 +DA:88,1 +DA:91,1 +DA:94,1 +DA:97,1 +DA:100,1 +DA:106,1 +DA:107,1 +DA:108,1 +DA:116,1 +DA:137,1 +DA:138,1 +DA:147,1 +DA:148,1 +DA:150,1 +DA:151,4 +DA:152,1 +DA:154,2 +DA:155,1 +DA:158,0 +DA:159,0 +DA:162,0 +DA:163,0 +DA:166,0 +DA:167,0 +DA:170,0 +DA:171,0 +DA:174,0 +DA:175,0 +DA:176,0 +DA:179,0 +DA:180,0 +DA:181,0 +DA:184,0 +DA:185,0 +DA:186,0 +DA:191,0 +DA:192,0 +DA:193,0 +DA:199,1 +DA:200,1 +DA:201,1 +DA:203,1 +DA:207,0 +DA:208,0 +DA:210,0 +DA:212,0 +DA:213,0 +DA:214,0 +DA:215,0 +DA:216,0 +DA:217,0 +DA:218,0 +DA:219,0 +DA:220,0 +DA:221,0 +DA:225,0 +DA:226,0 +DA:227,0 +DA:228,0 +DA:231,1 +DA:232,1 +DA:234,1 +DA:235,0 +DA:236,1 +DA:237,1 +DA:239,0 +DA:248,0 +DA:249,0 +DA:250,0 +DA:253,0 +DA:254,0 +DA:255,0 +DA:256,0 +DA:257,0 +DA:258,0 +DA:261,0 +DA:262,0 +DA:263,0 +DA:264,0 +DA:266,0 +DA:267,0 +DA:268,0 +DA:269,0 +DA:270,0 +DA:273,0 +DA:274,0 +DA:275,0 +DA:279,0 +DA:280,0 +DA:281,0 +DA:282,0 +DA:290,0 +DA:291,0 +DA:292,0 +DA:293,0 +DA:294,0 +DA:298,0 +DA:300,0 +DA:301,0 +DA:302,0 +DA:305,0 +DA:306,0 +DA:307,0 +DA:308,0 +DA:309,0 +DA:313,0 +DA:315,0 +DA:316,0 +DA:317,0 +DA:320,0 +DA:321,0 +DA:322,0 +DA:323,0 +DA:324,0 +DA:328,0 +DA:330,0 +DA:337,0 +DA:338,0 +DA:340,0 +DA:347,0 +DA:348,0 +DA:350,0 +DA:351,0 +DA:352,0 +DA:362,1 +DA:364,0 +DA:365,0 +DA:366,0 +DA:367,0 +DA:368,0 +DA:369,0 +DA:370,0 +DA:371,0 +DA:372,0 +DA:373,0 +DA:374,0 +DA:375,0 +DA:376,0 +DA:381,0 +DA:388,0 +DA:390,0 +DA:392,0 +DA:393,0 +DA:395,0 +DA:397,0 +DA:398,0 +DA:400,0 +DA:401,0 +DA:403,0 +DA:404,0 +DA:406,0 +LH:36 +LF:161 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/NoCoverageGenerated.java +FNF:0 +FNH:0 +BRF:0 +BRH:0 +LH:0 +LF:0 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/Plugin.java +FN:35,com.mediatek.jacoco2lcov.Plugin.getPlugins(Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:45,com.mediatek.jacoco2lcov.Plugin.getPlugins(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:57,com.mediatek.jacoco2lcov.Plugin.isSourcePlugin(Ljava/io/File;)Z +FN:65,com.mediatek.jacoco2lcov.Plugin.findSourceDirs(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:79,com.mediatek.jacoco2lcov.Plugin.findClassDirs(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:103,com.mediatek.jacoco2lcov.Plugin.(Ljava/io/File;) +FN:115,com.mediatek.jacoco2lcov.Plugin.toString(Ljava/lang/String;) +FN:127,com.mediatek.jacoco2lcov.Plugin.addSourceDirectory(Ljava/lang/String;) +FN:137,com.mediatek.jacoco2lcov.Plugin.getSourceDirectories(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:146,com.mediatek.jacoco2lcov.Plugin.addClassDirectory(Ljava/lang/String;) +FN:156,com.mediatek.jacoco2lcov.Plugin.getClassDirectories(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.getPlugins(Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.Plugin.getPlugins(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.Plugin.isSourcePlugin(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.Plugin.findSourceDirs(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.Plugin.findClassDirs(Ljava/io/File;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.Plugin.(Ljava/io/File;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.addSourceDirectory(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.getSourceDirectories(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.addClassDirectory(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.Plugin.getClassDirectories(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNF:11 +FNH:0 +BRDA:35,0,0,0 +BRDA:35,0,1,0 +BRDA:45,0,0,0 +BRDA:45,0,1,0 +BRDA:46,0,0,0 +BRDA:46,0,1,0 +BRDA:57,0,0,0 +BRDA:57,0,1,0 +BRDA:127,0,0,0 +BRDA:127,0,1,0 +BRDA:128,0,0,0 +BRDA:128,0,1,0 +BRDA:146,0,0,0 +BRDA:146,0,1,0 +BRDA:147,0,0,0 +BRDA:147,0,1,0 +BRF:16 +BRH:0 +DA:35,0 +DA:36,0 +DA:45,0 +DA:46,0 +DA:47,0 +DA:48,0 +DA:50,0 +DA:51,0 +DA:52,0 +DA:57,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +DA:69,0 +DA:70,0 +DA:71,0 +DA:79,0 +DA:80,0 +DA:81,0 +DA:82,0 +DA:83,0 +DA:84,0 +DA:103,0 +DA:104,0 +DA:105,0 +DA:106,0 +DA:107,0 +DA:115,0 +DA:127,0 +DA:128,0 +DA:129,0 +DA:130,0 +DA:131,0 +DA:137,0 +DA:146,0 +DA:147,0 +DA:148,0 +DA:149,0 +DA:150,0 +DA:156,0 +LH:0 +LF:41 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/jacoco/JacocoData.java +FN:46,com.mediatek.jacoco2lcov.jacoco.JacocoData.(Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;Lorg/jacoco/core/analysis/CoverageBuilder;) +FN:57,com.mediatek.jacoco2lcov.jacoco.JacocoData.getSourceFileData(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:62,com.mediatek.jacoco2lcov.jacoco.JacocoData.getPackageData(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoData.(Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;Lorg/jacoco/core/analysis/CoverageBuilder;) +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoData.getSourceFileData(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoData.getPackageData(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNF:3 +FNH:0 +BRF:0 +BRH:0 +DA:46,0 +DA:47,0 +DA:48,0 +DA:49,0 +DA:50,0 +DA:57,0 +DA:62,0 +LH:0 +LF:7 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/jacoco/JacocoUtils.java +FN:126,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.loadJacocoData(Ljava/lang/String;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:135,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$loadJacocoData$1(Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:138,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$loadJacocoData$0(Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;Lorg/jacoco/core/analysis/CoverageBuilder;)Lcom/mediatek/jacoco2lcov/jacoco/JacocoData; +FN:146,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.loadCoverageData(Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:175,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.analyzeData(Lorg/jacoco/core/tools/ExecFileLoader;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:181,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$2(Lorg/jacoco/core/analysis/Analyzer;Ljava/io/File;)Ljava/lang/Throwable; +FN:195,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$4(Lcom/mediatek/jacoco2lcov/util/ListF;)Ljava/lang/String; +FN:196,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$3(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:206,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.getSourceLineData(Lorg/jacoco/core/analysis/ISourceFileCoverage;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:209,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLineData$6(Lorg/jacoco/core/analysis/ISourceFileCoverage;Ljava/lang/Integer;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:210,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLineData$5(Ljava/lang/Integer;Lorg/jacoco/core/analysis/ILine;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:255,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLocator$7(Lorg/jacoco/report/MultiSourceFileLocator;ILjava/io/File;) +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.loadJacocoData(Ljava/lang/String;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$loadJacocoData$1(Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$loadJacocoData$0(Ljava/lang/String;Lorg/jacoco/core/tools/ExecFileLoader;Lorg/jacoco/core/analysis/CoverageBuilder;)Lcom/mediatek/jacoco2lcov/jacoco/JacocoData; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.loadCoverageData(Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.analyzeData(Lorg/jacoco/core/tools/ExecFileLoader;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$2(Lorg/jacoco/core/analysis/Analyzer;Ljava/io/File;)Ljava/lang/Throwable; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$4(Lcom/mediatek/jacoco2lcov/util/ListF;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$analyzeData$3(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.getSourceLineData(Lorg/jacoco/core/analysis/ISourceFileCoverage;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLineData$6(Lorg/jacoco/core/analysis/ISourceFileCoverage;Ljava/lang/Integer;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLineData$5(Ljava/lang/Integer;Lorg/jacoco/core/analysis/ILine;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.jacoco.JacocoUtils.lambda$getSourceLocator$7(Lorg/jacoco/report/MultiSourceFileLocator;ILjava/io/File;) +FNF:12 +FNH:0 +BRDA:126,0,0,0 +BRDA:126,0,1,0 +BRDA:152,0,0,0 +BRDA:152,0,1,0 +BRDA:152,0,2,0 +BRDA:152,0,3,0 +BRDA:156,0,0,0 +BRDA:156,0,1,0 +BRDA:166,0,0,0 +BRDA:166,0,1,0 +BRF:10 +BRH:0 +DA:126,0 +DA:127,0 +DA:128,0 +DA:132,0 +DA:135,0 +DA:138,0 +DA:146,0 +DA:147,0 +DA:149,0 +DA:150,0 +DA:151,0 +DA:152,0 +DA:153,0 +DA:156,0 +DA:157,0 +DA:161,0 +DA:165,0 +DA:166,0 +DA:175,0 +DA:177,0 +DA:178,0 +DA:180,0 +DA:181,0 +DA:183,0 +DA:184,0 +DA:185,0 +DA:186,0 +DA:187,0 +DA:191,0 +DA:193,0 +DA:194,0 +DA:195,0 +DA:196,0 +DA:206,0 +DA:207,0 +DA:208,0 +DA:209,0 +DA:210,0 +DA:211,0 +DA:212,0 +DA:255,0 +LH:0 +LF:41 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/ClassData.java +FN:33,com.mediatek.jacoco2lcov.lcov.ClassData.() +FN:61,com.mediatek.jacoco2lcov.lcov.ClassData.(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;Lorg/jacoco/core/analysis/IClassCoverage;) +FN:76,com.mediatek.jacoco2lcov.lcov.ClassData.sort() +FN:96,com.mediatek.jacoco2lcov.lcov.ClassData.getSourceFileData(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;) +FN:103,com.mediatek.jacoco2lcov.lcov.ClassData.getPackageName(Ljava/lang/String;) +FN:110,com.mediatek.jacoco2lcov.lcov.ClassData.getClassName(Ljava/lang/String;) +FN:117,com.mediatek.jacoco2lcov.lcov.ClassData.getFirstLine(I) +FN:124,com.mediatek.jacoco2lcov.lcov.ClassData.getLastLine(I) +FN:131,com.mediatek.jacoco2lcov.lcov.ClassData.getMethods(Ljava/util/List;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;Lorg/jacoco/core/analysis/IClassCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.sort() +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getSourceFileData(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getPackageName(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getClassName(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getFirstLine(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getLastLine(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData.getMethods(Ljava/util/List;) +FN:34,com.mediatek.jacoco2lcov.lcov.ClassData$1.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.ClassData$1.() +FNF:10 +FNH:0 +BRDA:65,0,0,0 +BRDA:65,0,1,0 +BRDA:67,0,0,0 +BRDA:67,0,1,0 +BRF:4 +BRH:0 +DA:33,0 +DA:34,0 +DA:61,0 +DA:62,0 +DA:63,0 +DA:64,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +DA:69,0 +DA:76,0 +DA:77,0 +DA:96,0 +DA:103,0 +DA:110,0 +DA:117,0 +DA:124,0 +DA:131,0 +LH:0 +LF:19 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/LCOVUtils.java +FN:72,com.mediatek.jacoco2lcov.lcov.LCOVUtils.() +FN:77,com.mediatek.jacoco2lcov.lcov.LCOVUtils.dispose() +FN:93,com.mediatek.jacoco2lcov.lcov.LCOVUtils.generateLCOVReport(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:104,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$0(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:108,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$1(Ljava/lang/String;) +FN:112,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$2(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:116,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$3(Ljava/lang/String;) +FN:129,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$4(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/jacoco/JacocoData;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:133,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$5(Ljava/lang/String;Ljava/lang/String;Ljava/util/List;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:144,com.mediatek.jacoco2lcov.lcov.LCOVUtils.getSourcePathname(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/lang/String;)Ljava/lang/String; +FN:145,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$getSourcePathname$6(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:147,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$getSourcePathname$7(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:163,com.mediatek.jacoco2lcov.lcov.LCOVUtils.constructLCOVData(Lcom/mediatek/jacoco2lcov/jacoco/JacocoData;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:172,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$8(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/util/Map;Lorg/jacoco/core/analysis/ISourceFileCoverage;) +FN:193,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$9(Ljava/util/Map;Lorg/jacoco/core/analysis/IPackageCoverage;) +FN:227,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$10(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;) +FN:239,com.mediatek.jacoco2lcov.lcov.LCOVUtils.writeLCOVData(Ljava/lang/String;Ljava/util/List;Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:262,com.mediatek.jacoco2lcov.lcov.LCOVUtils.generateLCOVData(Ljava/util/List;Ljava/lang/String;Ljava/io/PrintWriter;Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.dispose() +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.generateLCOVReport(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$0(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$1(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$2(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$3(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$4(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/jacoco/JacocoData;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$generateLCOVReport$5(Ljava/lang/String;Ljava/lang/String;Ljava/util/List;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.getSourcePathname(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$getSourcePathname$6(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$getSourcePathname$7(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.constructLCOVData(Lcom/mediatek/jacoco2lcov/jacoco/JacocoData;Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$8(Ljava/lang/String;Lcom/mediatek/jacoco2lcov/util/ListF;Ljava/util/Map;Lorg/jacoco/core/analysis/ISourceFileCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$9(Ljava/util/Map;Lorg/jacoco/core/analysis/IPackageCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.lambda$constructLCOVData$10(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.writeLCOVData(Ljava/lang/String;Ljava/util/List;Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.lcov.LCOVUtils.generateLCOVData(Ljava/util/List;Ljava/lang/String;Ljava/io/PrintWriter;Ljava/lang/String;)Lcom/mediatek/jacoco2lcov/util/Either; +FNF:18 +FNH:0 +BRDA:95,0,0,0 +BRDA:95,0,1,0 +BRDA:100,0,0,0 +BRDA:100,0,1,0 +BRDA:105,0,0,0 +BRDA:105,0,1,0 +BRDA:113,0,0,0 +BRDA:113,0,1,0 +BRDA:120,0,0,0 +BRDA:120,0,1,0 +BRDA:181,0,0,0 +BRDA:181,0,1,0 +BRDA:182,0,0,0 +BRDA:182,0,1,0 +BRDA:194,0,0,0 +BRDA:194,0,1,0 +BRDA:200,0,0,0 +BRDA:200,0,1,0 +BRDA:202,0,0,0 +BRDA:202,0,1,0 +BRDA:203,0,0,0 +BRDA:203,0,1,0 +BRDA:221,0,0,0 +BRDA:221,0,1,0 +BRDA:224,0,0,0 +BRDA:224,0,1,0 +BRDA:248,0,0,0 +BRDA:248,0,1,0 +BRDA:262,0,0,0 +BRDA:262,0,1,0 +BRDA:269,0,0,0 +BRDA:269,0,1,0 +BRDA:282,0,0,0 +BRDA:282,0,1,0 +BRDA:284,0,0,0 +BRDA:284,0,1,0 +BRDA:286,0,0,0 +BRDA:286,0,1,0 +BRDA:293,0,0,0 +BRDA:293,0,1,0 +BRDA:297,0,0,0 +BRDA:297,0,1,0 +BRDA:299,0,0,0 +BRDA:299,0,1,0 +BRDA:321,0,0,0 +BRDA:321,0,1,0 +BRDA:323,0,0,0 +BRDA:323,0,1,0 +BRDA:328,0,0,0 +BRDA:328,0,1,0 +BRDA:329,0,0,0 +BRDA:329,0,1,0 +BRDA:331,0,0,0 +BRDA:331,0,1,0 +BRDA:345,0,0,0 +BRDA:345,0,1,0 +BRDA:347,0,0,0 +BRDA:347,0,1,0 +BRDA:352,0,0,0 +BRDA:352,0,1,0 +BRF:60 +BRH:0 +DA:72,0 +DA:73,0 +DA:77,0 +DA:93,0 +DA:94,0 +DA:95,0 +DA:96,0 +DA:98,0 +DA:99,0 +DA:100,0 +DA:101,0 +DA:103,0 +DA:104,0 +DA:105,0 +DA:106,0 +DA:107,0 +DA:108,0 +DA:111,0 +DA:112,0 +DA:113,0 +DA:114,0 +DA:115,0 +DA:116,0 +DA:120,0 +DA:121,0 +DA:124,0 +DA:128,0 +DA:129,0 +DA:132,0 +DA:133,0 +DA:144,0 +DA:145,0 +DA:146,0 +DA:147,0 +DA:148,0 +DA:163,0 +DA:164,0 +DA:167,0 +DA:168,0 +DA:169,0 +DA:172,0 +DA:173,0 +DA:174,0 +DA:176,0 +DA:179,0 +DA:181,0 +DA:182,0 +DA:183,0 +DA:187,0 +DA:191,0 +DA:192,0 +DA:193,0 +DA:194,0 +DA:195,0 +DA:196,0 +DA:197,0 +DA:200,0 +DA:201,0 +DA:202,0 +DA:203,0 +DA:204,0 +DA:206,0 +DA:207,0 +DA:211,0 +DA:212,0 +DA:215,0 +DA:216,0 +DA:220,0 +DA:221,0 +DA:222,0 +DA:224,0 +DA:225,0 +DA:226,0 +DA:227,0 +DA:230,0 +DA:239,0 +DA:240,0 +DA:242,0 +DA:244,0 +DA:245,0 +DA:248,0 +DA:251,0 +DA:262,0 +DA:264,0 +DA:269,0 +DA:272,0 +DA:275,0 +DA:277,0 +DA:278,0 +DA:282,0 +DA:284,0 +DA:286,0 +DA:288,0 +DA:289,0 +DA:291,0 +DA:292,0 +DA:293,0 +DA:294,0 +DA:295,0 +DA:297,0 +DA:299,0 +DA:301,0 +DA:302,0 +DA:303,0 +DA:304,0 +DA:305,0 +DA:306,0 +DA:310,0 +DA:311,0 +DA:318,0 +DA:319,0 +DA:321,0 +DA:323,0 +DA:325,0 +DA:326,0 +DA:328,0 +DA:329,0 +DA:331,0 +DA:335,0 +DA:336,0 +DA:337,0 +DA:339,0 +DA:340,0 +DA:343,0 +DA:344,0 +DA:345,0 +DA:347,0 +DA:349,0 +DA:351,0 +DA:352,0 +DA:354,0 +DA:355,0 +DA:357,0 +DA:361,0 +DA:362,0 +DA:365,0 +DA:366,0 +DA:368,0 +LH:0 +LF:138 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/LineData.java +FN:51,com.mediatek.jacoco2lcov.lcov.LineData.(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;ILorg/jacoco/core/analysis/ILine;) +FN:75,com.mediatek.jacoco2lcov.lcov.LineData.getLineNumber(I) +FN:80,com.mediatek.jacoco2lcov.lcov.LineData.hasCode(Z) +FN:85,com.mediatek.jacoco2lcov.lcov.LineData.isCovered(Z) +FN:90,com.mediatek.jacoco2lcov.lcov.LineData.isPartlyCovered(Z) +FN:95,com.mediatek.jacoco2lcov.lcov.LineData.isFullyCovered(Z) +FN:100,com.mediatek.jacoco2lcov.lcov.LineData.getBranchCount(I) +FN:105,com.mediatek.jacoco2lcov.lcov.LineData.getBranchesHit(I) +FN:116,com.mediatek.jacoco2lcov.lcov.LineData.getExecutionCount(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;ILorg/jacoco/core/analysis/ILine;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.getLineNumber(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.hasCode(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.isCovered(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.isPartlyCovered(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.isFullyCovered(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.getBranchCount(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.getBranchesHit(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.LineData.getExecutionCount(I) +FNF:9 +FNH:0 +BRDA:80,0,0,0 +BRDA:80,0,1,0 +BRDA:85,0,0,0 +BRDA:85,0,1,0 +BRDA:85,0,2,0 +BRDA:85,0,3,0 +BRDA:90,0,0,0 +BRDA:90,0,1,0 +BRDA:95,0,0,0 +BRDA:95,0,1,0 +BRDA:119,0,0,0 +BRDA:119,0,1,0 +BRDA:119,0,2,0 +BRDA:122,0,0,0 +BRDA:122,0,1,0 +BRDA:127,0,0,0 +BRDA:127,0,1,0 +BRF:17 +BRH:0 +DA:51,0 +DA:52,0 +DA:53,0 +DA:54,0 +DA:55,0 +DA:75,0 +DA:80,0 +DA:85,0 +DA:90,0 +DA:95,0 +DA:100,0 +DA:105,0 +DA:116,0 +DA:117,0 +DA:118,0 +DA:119,0 +DA:122,0 +DA:123,0 +DA:124,0 +DA:127,0 +DA:128,0 +DA:131,0 +LH:0 +LF:22 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/MethodData.java +FN:29,com.mediatek.jacoco2lcov.lcov.MethodData.() +FN:60,com.mediatek.jacoco2lcov.lcov.MethodData.(Lcom/mediatek/jacoco2lcov/lcov/ClassData;Lorg/jacoco/core/analysis/IMethodCoverage;) +FN:75,com.mediatek.jacoco2lcov.lcov.MethodData.dispose() +FN:94,com.mediatek.jacoco2lcov.lcov.MethodData.getClassData(Lcom/mediatek/jacoco2lcov/lcov/ClassData;) +FN:101,com.mediatek.jacoco2lcov.lcov.MethodData.getName(Ljava/lang/String;) +FN:106,com.mediatek.jacoco2lcov.lcov.MethodData.hasDebugInfo(Z) +FN:111,com.mediatek.jacoco2lcov.lcov.MethodData.getFirstLine(I) +FN:116,com.mediatek.jacoco2lcov.lcov.MethodData.getLastLine(I) +FN:125,com.mediatek.jacoco2lcov.lcov.MethodData.getExecutionCount(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.(Lcom/mediatek/jacoco2lcov/lcov/ClassData;Lorg/jacoco/core/analysis/IMethodCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.dispose() +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getClassData(Lcom/mediatek/jacoco2lcov/lcov/ClassData;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getName(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.hasDebugInfo(Z) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getFirstLine(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getLastLine(I) +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData.getExecutionCount(I) +FN:30,com.mediatek.jacoco2lcov.lcov.MethodData$1.() +FN:33,com.mediatek.jacoco2lcov.lcov.MethodData$1.compare(Lcom/mediatek/jacoco2lcov/lcov/MethodData;Lcom/mediatek/jacoco2lcov/lcov/MethodData;)I +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData$1.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.MethodData$1.compare(Lcom/mediatek/jacoco2lcov/lcov/MethodData;Lcom/mediatek/jacoco2lcov/lcov/MethodData;)I +FNF:11 +FNH:0 +BRDA:36,0,0,0 +BRDA:36,0,1,0 +BRDA:37,0,0,0 +BRDA:37,0,1,0 +BRDA:65,0,0,0 +BRDA:65,0,1,0 +BRDA:66,0,0,0 +BRDA:66,0,1,0 +BRDA:67,0,0,0 +BRDA:67,0,1,0 +BRDA:68,0,0,0 +BRDA:68,0,1,0 +BRDA:106,0,0,0 +BRDA:106,0,1,0 +BRF:14 +BRH:0 +DA:29,0 +DA:30,0 +DA:33,0 +DA:34,0 +DA:36,0 +DA:37,0 +DA:38,0 +DA:60,0 +DA:61,0 +DA:62,0 +DA:64,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +DA:70,0 +DA:71,0 +DA:75,0 +DA:94,0 +DA:101,0 +DA:106,0 +DA:111,0 +DA:116,0 +DA:125,0 +LH:0 +LF:24 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/lcov/SourceFileData.java +FN:33,com.mediatek.jacoco2lcov.lcov.SourceFileData.() +FN:59,com.mediatek.jacoco2lcov.lcov.SourceFileData.(Ljava/lang/String;Lorg/jacoco/core/analysis/ISourceFileCoverage;) +FN:66,com.mediatek.jacoco2lcov.lcov.SourceFileData.lambda$new$0(Lcom/mediatek/jacoco2lcov/util/Pair;)Lcom/mediatek/jacoco2lcov/lcov/LineData; +FN:75,com.mediatek.jacoco2lcov.lcov.SourceFileData.addClass(Lorg/jacoco/core/analysis/IClassCoverage;) +FN:83,com.mediatek.jacoco2lcov.lcov.SourceFileData.sort() +FN:104,com.mediatek.jacoco2lcov.lcov.SourceFileData.getPathname(Ljava/lang/String;) +FN:109,com.mediatek.jacoco2lcov.lcov.SourceFileData.getPackageName(Ljava/lang/String;) +FN:114,com.mediatek.jacoco2lcov.lcov.SourceFileData.getFilePathname(Ljava/lang/String;) +FN:119,com.mediatek.jacoco2lcov.lcov.SourceFileData.getClasses(Ljava/util/List;) +FN:124,com.mediatek.jacoco2lcov.lcov.SourceFileData.getLines(Ljava/util/List;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.(Ljava/lang/String;Lorg/jacoco/core/analysis/ISourceFileCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.lambda$new$0(Lcom/mediatek/jacoco2lcov/util/Pair;)Lcom/mediatek/jacoco2lcov/lcov/LineData; +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.addClass(Lorg/jacoco/core/analysis/IClassCoverage;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.sort() +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getPathname(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getPackageName(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getFilePathname(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getClasses(Ljava/util/List;) +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData.getLines(Ljava/util/List;) +FN:34,com.mediatek.jacoco2lcov.lcov.SourceFileData$1.() +FN:37,com.mediatek.jacoco2lcov.lcov.SourceFileData$1.compare(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;)I +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData$1.() +FNDA:0,com.mediatek.jacoco2lcov.lcov.SourceFileData$1.compare(Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;Lcom/mediatek/jacoco2lcov/lcov/SourceFileData;)I +FNF:12 +FNH:0 +BRDA:84,0,0,0 +BRDA:84,0,1,0 +BRF:2 +BRH:0 +DA:33,0 +DA:34,0 +DA:37,0 +DA:59,0 +DA:60,0 +DA:61,0 +DA:63,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +DA:75,0 +DA:76,0 +DA:83,0 +DA:84,0 +DA:85,0 +DA:86,0 +DA:104,0 +DA:109,0 +DA:114,0 +DA:119,0 +DA:124,0 +LH:0 +LF:22 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/Either.java +FN:30,com.mediatek.jacoco2lcov.util.Either.left(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:35,com.mediatek.jacoco2lcov.util.Either.right(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:43,com.mediatek.jacoco2lcov.util.Either.from(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:51,com.mediatek.jacoco2lcov.util.Either.from(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:59,com.mediatek.jacoco2lcov.util.Either.from(Ljava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:70,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:79,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:88,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/lang/Object;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:97,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/util/function/Supplier;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:106,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:116,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:125,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/lang/Object;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:135,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/util/function/Supplier;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:142,com.mediatek.jacoco2lcov.util.Either.(ZLjava/lang/Object;Ljava/lang/Object;) +FN:166,com.mediatek.jacoco2lcov.util.Either.toString(Ljava/lang/String;) +FN:180,com.mediatek.jacoco2lcov.util.Either.debug(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:185,com.mediatek.jacoco2lcov.util.Either.isLeft(Z) +FN:190,com.mediatek.jacoco2lcov.util.Either.getLeft(Ljava/lang/Object;) +FN:198,com.mediatek.jacoco2lcov.util.Either.isRight(Z) +FN:203,com.mediatek.jacoco2lcov.util.Either.get(Ljava/lang/Object;) +FN:215,com.mediatek.jacoco2lcov.util.Either.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FN:220,com.mediatek.jacoco2lcov.util.Either.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FN:229,com.mediatek.jacoco2lcov.util.Either.leftCast(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:239,com.mediatek.jacoco2lcov.util.Either.rightCast(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:250,com.mediatek.jacoco2lcov.util.Either.removeNulls(Lcom/mediatek/jacoco2lcov/util/Either;) +FN:250,com.mediatek.jacoco2lcov.util.Either.lambda$removeNulls$0(Ljava/lang/Object;)Z +FN:258,com.mediatek.jacoco2lcov.util.Either.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:266,com.mediatek.jacoco2lcov.util.Either.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:275,com.mediatek.jacoco2lcov.util.Either.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:284,com.mediatek.jacoco2lcov.util.Either.ifLeftDo(Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/Either; +FN:294,com.mediatek.jacoco2lcov.util.Either.andEitherDo(Ljava/util/function/Consumer;Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.left(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.right(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.from(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.from(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.from(Ljava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/lang/Object;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.when(ZLjava/util/function/Supplier;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/util/function/Supplier;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/lang/Object;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.unless(ZLjava/util/function/Supplier;Ljava/util/function/Supplier;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.(ZLjava/lang/Object;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.debug(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.isLeft(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.getLeft(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.isRight(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.get(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.leftCast(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.rightCast(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.removeNulls(Lcom/mediatek/jacoco2lcov/util/Either;) +FNDA:0,com.mediatek.jacoco2lcov.util.Either.lambda$removeNulls$0(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Either.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.ifLeftDo(Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/Either; +FNDA:0,com.mediatek.jacoco2lcov.util.Either.andEitherDo(Ljava/util/function/Consumer;Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/Either; +FNF:31 +FNH:0 +BRDA:51,0,0,0 +BRDA:51,0,1,0 +BRDA:59,0,0,0 +BRDA:59,0,1,0 +BRDA:60,0,0,0 +BRDA:60,0,1,0 +BRDA:70,0,0,0 +BRDA:70,0,1,0 +BRDA:79,0,0,0 +BRDA:79,0,1,0 +BRDA:88,0,0,0 +BRDA:88,0,1,0 +BRDA:97,0,0,0 +BRDA:97,0,1,0 +BRDA:106,0,0,0 +BRDA:106,0,1,0 +BRDA:116,0,0,0 +BRDA:116,0,1,0 +BRDA:125,0,0,0 +BRDA:125,0,1,0 +BRDA:135,0,0,0 +BRDA:135,0,1,0 +BRDA:166,0,0,0 +BRDA:166,0,1,0 +BRDA:185,0,0,0 +BRDA:185,0,1,0 +BRDA:190,0,0,0 +BRDA:190,0,1,0 +BRDA:203,0,0,0 +BRDA:203,0,1,0 +BRDA:215,0,0,0 +BRDA:215,0,1,0 +BRDA:220,0,0,0 +BRDA:220,0,1,0 +BRDA:229,0,0,0 +BRDA:229,0,1,0 +BRDA:239,0,0,0 +BRDA:239,0,1,0 +BRDA:250,0,0,0 +BRDA:250,0,1,0 +BRDA:258,0,0,0 +BRDA:258,0,1,0 +BRDA:258,0,2,0 +BRDA:258,0,3,0 +BRDA:266,0,0,0 +BRDA:266,0,1,0 +BRDA:275,0,0,0 +BRDA:275,0,1,0 +BRDA:284,0,0,0 +BRDA:284,0,1,0 +BRDA:294,0,0,0 +BRDA:294,0,1,0 +BRF:52 +BRH:0 +DA:30,0 +DA:35,0 +DA:43,0 +DA:51,0 +DA:59,0 +DA:60,0 +DA:61,0 +DA:70,0 +DA:79,0 +DA:88,0 +DA:97,0 +DA:106,0 +DA:116,0 +DA:125,0 +DA:135,0 +DA:142,0 +DA:154,0 +DA:155,0 +DA:156,0 +DA:157,0 +DA:158,0 +DA:166,0 +DA:167,0 +DA:169,0 +DA:180,0 +DA:185,0 +DA:190,0 +DA:191,0 +DA:193,0 +DA:198,0 +DA:203,0 +DA:204,0 +DA:206,0 +DA:215,0 +DA:220,0 +DA:229,0 +DA:239,0 +DA:250,0 +DA:258,0 +DA:266,0 +DA:275,0 +DA:284,0 +DA:285,0 +DA:294,0 +DA:295,0 +DA:297,0 +DA:298,0 +LH:0 +LF:47 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/ListF.java +FN:37,com.mediatek.jacoco2lcov.util.ListF.() +FN:50,com.mediatek.jacoco2lcov.util.ListF.nil(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:58,com.mediatek.jacoco2lcov.util.ListF.nil(Ljava/lang/Class;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:66,com.mediatek.jacoco2lcov.util.ListF.cons(Ljava/lang/Object;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:75,com.mediatek.jacoco2lcov.util.ListF.cons(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:81,com.mediatek.jacoco2lcov.util.ListF.listOf(Ljava/lang/Class;[Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:87,com.mediatek.jacoco2lcov.util.ListF.list([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:98,com.mediatek.jacoco2lcov.util.ListF.of([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:103,com.mediatek.jacoco2lcov.util.ListF.from(Ljava/util/List;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:112,com.mediatek.jacoco2lcov.util.ListF.from(Ljava/util/Collection;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:118,com.mediatek.jacoco2lcov.util.ListF.from([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:130,com.mediatek.jacoco2lcov.util.ListF.range(II)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:154,com.mediatek.jacoco2lcov.util.ListF.() +FN:158,com.mediatek.jacoco2lcov.util.ListF.(Ljava/lang/Object;Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:165,com.mediatek.jacoco2lcov.util.ListF.dispose() +FN:176,com.mediatek.jacoco2lcov.util.ListF.toString(Ljava/lang/String;) +FN:178,com.mediatek.jacoco2lcov.util.ListF.lambda$toString$0(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:184,com.mediatek.jacoco2lcov.util.ListF.equals(Ljava/lang/Object;)Z +FN:190,com.mediatek.jacoco2lcov.util.ListF.equals(Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/util/ListF;)Z +FN:210,com.mediatek.jacoco2lcov.util.ListF.debug(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:218,com.mediatek.jacoco2lcov.util.ListF.isNull(Z) +FN:223,com.mediatek.jacoco2lcov.util.ListF.isEmpty(Z) +FN:228,com.mediatek.jacoco2lcov.util.ListF.isNotEmpty(Z) +FN:236,com.mediatek.jacoco2lcov.util.ListF.get(Ljava/lang/Object;) +FN:244,com.mediatek.jacoco2lcov.util.ListF.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FN:252,com.mediatek.jacoco2lcov.util.ListF.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FN:260,com.mediatek.jacoco2lcov.util.ListF.getOrElseNull(Ljava/lang/Object;) +FN:268,com.mediatek.jacoco2lcov.util.ListF.head(Ljava/lang/Object;) +FN:276,com.mediatek.jacoco2lcov.util.ListF.tail(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:284,com.mediatek.jacoco2lcov.util.ListF.car(Ljava/lang/Object;) +FN:289,com.mediatek.jacoco2lcov.util.ListF.cdr(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:296,com.mediatek.jacoco2lcov.util.ListF.toList(Ljava/util/List;) +FN:297,com.mediatek.jacoco2lcov.util.ListF.lambda$toList$1(Ljava/util/ArrayList;Ljava/lang/Object;) +FN:303,com.mediatek.jacoco2lcov.util.ListF.toArrayOf(Ljava/lang/Class;)[Ljava/lang/Object; +FN:319,com.mediatek.jacoco2lcov.util.ListF.removeNulls(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:319,com.mediatek.jacoco2lcov.util.ListF.lambda$removeNulls$2(Ljava/lang/Object;)Z +FN:326,com.mediatek.jacoco2lcov.util.ListF.length(I) +FN:340,com.mediatek.jacoco2lcov.util.ListF.forEach(Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:352,com.mediatek.jacoco2lcov.util.ListF.reverse(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:366,com.mediatek.jacoco2lcov.util.ListF.concat(Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:380,com.mediatek.jacoco2lcov.util.ListF.map(Ljava/util/function/Function;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:387,com.mediatek.jacoco2lcov.util.ListF.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:400,com.mediatek.jacoco2lcov.util.ListF.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:415,com.mediatek.jacoco2lcov.util.ListF.foldLeft(Ljava/util/function/BiFunction;Ljava/lang/Object;)Ljava/lang/Object; +FN:429,com.mediatek.jacoco2lcov.util.ListF.foldLeft1(Ljava/util/function/BiFunction;)Ljava/lang/Object; +FN:447,com.mediatek.jacoco2lcov.util.ListF.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/ListF; +FN:456,com.mediatek.jacoco2lcov.util.ListF.flatten(Lcom/mediatek/jacoco2lcov/util/ListF;) +FN:458,com.mediatek.jacoco2lcov.util.ListF.lambda$flatten$3(Ljava/util/List;Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.() +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.nil(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.nil(Ljava/lang/Class;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.cons(Ljava/lang/Object;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.cons(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.listOf(Ljava/lang/Class;[Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.list([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.of([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.from(Ljava/util/List;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.from(Ljava/util/Collection;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.from([Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.range(II)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.() +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.(Ljava/lang/Object;Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.dispose() +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.lambda$toString$0(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.equals(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.equals(Lcom/mediatek/jacoco2lcov/util/ListF;Lcom/mediatek/jacoco2lcov/util/ListF;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.debug(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.isNull(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.isEmpty(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.isNotEmpty(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.get(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.getOrElseNull(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.head(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.tail(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.car(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.cdr(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.toList(Ljava/util/List;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.lambda$toList$1(Ljava/util/ArrayList;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.toArrayOf(Ljava/lang/Class;)[Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.removeNulls(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.lambda$removeNulls$2(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.length(I) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.forEach(Ljava/util/function/Consumer;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:1,com.mediatek.jacoco2lcov.util.ListF.reverse(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.concat(Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.map(Ljava/util/function/Function;Lcom/mediatek/jacoco2lcov/util/ListF;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.foldLeft(Ljava/util/function/BiFunction;Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.foldLeft1(Ljava/util/function/BiFunction;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/ListF; +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.flatten(Lcom/mediatek/jacoco2lcov/util/ListF;) +FNDA:0,com.mediatek.jacoco2lcov.util.ListF.lambda$flatten$3(Ljava/util/List;Lcom/mediatek/jacoco2lcov/util/ListF;) +FNF:48 +FNH:5 +BRDA:88,0,0,0 +BRDA:88,0,1,0 +BRDA:89,0,0,0 +BRDA:89,0,1,0 +BRDA:105,0,0,0 +BRDA:105,0,1,0 +BRDA:112,0,0,0 +BRDA:112,0,1,0 +BRDA:119,0,0,0 +BRDA:119,0,1,0 +BRDA:120,0,0,0 +BRDA:120,0,1,0 +BRDA:131,0,0,0 +BRDA:131,0,1,0 +BRDA:132,0,0,0 +BRDA:132,0,1,0 +BRDA:135,0,0,0 +BRDA:135,0,1,0 +BRDA:165,0,0,0 +BRDA:165,0,1,0 +BRDA:184,0,0,0 +BRDA:184,0,1,0 +BRDA:190,0,0,0 +BRDA:190,0,1,0 +BRDA:190,0,2,0 +BRDA:190,0,3,0 +BRDA:191,0,0,0 +BRDA:191,0,1,0 +BRDA:191,0,2,0 +BRDA:191,0,3,0 +BRDA:194,0,0,0 +BRDA:194,0,1,0 +BRDA:194,0,2,0 +BRDA:194,0,3,0 +BRDA:195,0,0,0 +BRDA:195,0,1,0 +BRDA:195,0,2,0 +BRDA:195,0,3,0 +BRDA:198,0,0,0 +BRDA:198,0,1,0 +BRDA:218,0,0,1 +BRDA:218,0,1,0 +BRDA:228,0,0,0 +BRDA:228,0,1,0 +BRDA:244,0,0,0 +BRDA:244,0,1,0 +BRDA:252,0,0,0 +BRDA:252,0,1,0 +BRDA:268,0,0,0 +BRDA:268,0,1,0 +BRDA:276,0,0,0 +BRDA:276,0,1,0 +BRDA:306,0,0,0 +BRDA:306,0,1,0 +BRDA:319,0,0,0 +BRDA:319,0,1,0 +BRDA:328,0,0,0 +BRDA:328,0,1,0 +BRDA:341,0,0,0 +BRDA:341,0,1,0 +BRDA:354,0,0,1 +BRDA:354,0,1,0 +BRDA:368,0,0,0 +BRDA:368,0,1,0 +BRDA:389,0,0,0 +BRDA:389,0,1,0 +BRDA:402,0,0,0 +BRDA:402,0,1,0 +BRDA:403,0,0,0 +BRDA:403,0,1,0 +BRDA:417,0,0,0 +BRDA:417,0,1,0 +BRDA:429,0,0,0 +BRDA:429,0,1,0 +BRDA:434,0,0,0 +BRDA:434,0,1,0 +BRF:76 +BRH:2 +DA:37,1 +DA:50,1 +DA:58,0 +DA:66,0 +DA:67,0 +DA:75,0 +DA:81,0 +DA:87,0 +DA:88,0 +DA:89,0 +DA:90,0 +DA:92,0 +DA:98,0 +DA:103,0 +DA:104,0 +DA:105,0 +DA:106,0 +DA:107,0 +DA:112,0 +DA:118,0 +DA:119,0 +DA:120,0 +DA:121,0 +DA:122,0 +DA:130,0 +DA:131,0 +DA:132,0 +DA:133,0 +DA:135,0 +DA:136,0 +DA:137,0 +DA:154,1 +DA:155,1 +DA:158,0 +DA:159,0 +DA:160,0 +DA:161,0 +DA:165,0 +DA:166,0 +DA:167,0 +DA:168,0 +DA:176,0 +DA:177,0 +DA:178,0 +DA:184,0 +DA:185,0 +DA:190,0 +DA:191,0 +DA:192,0 +DA:193,0 +DA:194,0 +DA:195,0 +DA:196,0 +DA:197,0 +DA:198,0 +DA:199,0 +DA:210,0 +DA:218,1 +DA:223,0 +DA:228,0 +DA:236,0 +DA:244,0 +DA:252,0 +DA:260,0 +DA:268,0 +DA:269,0 +DA:271,0 +DA:276,0 +DA:277,0 +DA:279,0 +DA:284,0 +DA:289,0 +DA:296,0 +DA:297,0 +DA:298,0 +DA:303,0 +DA:304,0 +DA:305,0 +DA:306,0 +DA:307,0 +DA:309,0 +DA:319,0 +DA:326,0 +DA:327,0 +DA:328,0 +DA:329,0 +DA:330,0 +DA:332,0 +DA:340,0 +DA:341,0 +DA:342,0 +DA:343,0 +DA:345,0 +DA:352,1 +DA:353,1 +DA:354,1 +DA:355,0 +DA:356,0 +DA:358,1 +DA:366,0 +DA:367,0 +DA:368,0 +DA:369,0 +DA:370,0 +DA:372,0 +DA:380,0 +DA:387,0 +DA:388,0 +DA:389,0 +DA:390,0 +DA:391,0 +DA:393,0 +DA:400,0 +DA:401,0 +DA:402,0 +DA:403,0 +DA:404,0 +DA:405,0 +DA:407,0 +DA:415,0 +DA:416,0 +DA:417,0 +DA:418,0 +DA:419,0 +DA:421,0 +DA:429,0 +DA:430,0 +DA:432,0 +DA:433,0 +DA:434,0 +DA:435,0 +DA:436,0 +DA:438,0 +DA:447,0 +DA:456,0 +DA:457,0 +DA:458,0 +DA:459,0 +LH:9 +LF:138 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/Maybe.java +FN:29,com.mediatek.jacoco2lcov.util.Maybe.() +FN:41,com.mediatek.jacoco2lcov.util.Maybe.nothing(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FN:49,com.mediatek.jacoco2lcov.util.Maybe.nothing(Ljava/lang/Class;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:57,com.mediatek.jacoco2lcov.util.Maybe.just(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:62,com.mediatek.jacoco2lcov.util.Maybe.from(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:67,com.mediatek.jacoco2lcov.util.Maybe.from(Ljava/lang/Class;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:86,com.mediatek.jacoco2lcov.util.Maybe.() +FN:90,com.mediatek.jacoco2lcov.util.Maybe.(Ljava/lang/Object;) +FN:97,com.mediatek.jacoco2lcov.util.Maybe.dispose() +FN:107,com.mediatek.jacoco2lcov.util.Maybe.toString(Ljava/lang/String;) +FN:113,com.mediatek.jacoco2lcov.util.Maybe.equals(Ljava/lang/Object;)Z +FN:127,com.mediatek.jacoco2lcov.util.Maybe.hashCode(I) +FN:142,com.mediatek.jacoco2lcov.util.Maybe.debug(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FN:149,com.mediatek.jacoco2lcov.util.Maybe.isNothing(Z) +FN:156,com.mediatek.jacoco2lcov.util.Maybe.exists(Z) +FN:164,com.mediatek.jacoco2lcov.util.Maybe.get(Ljava/lang/Object;) +FN:176,com.mediatek.jacoco2lcov.util.Maybe.getOrElseNull(Ljava/lang/Object;) +FN:186,com.mediatek.jacoco2lcov.util.Maybe.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FN:194,com.mediatek.jacoco2lcov.util.Maybe.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FN:205,com.mediatek.jacoco2lcov.util.Maybe.removeNulls(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FN:205,com.mediatek.jacoco2lcov.util.Maybe.lambda$removeNulls$0(Ljava/lang/Object;)Z +FN:213,com.mediatek.jacoco2lcov.util.Maybe.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:223,com.mediatek.jacoco2lcov.util.Maybe.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:231,com.mediatek.jacoco2lcov.util.Maybe.mapN(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FN:239,com.mediatek.jacoco2lcov.util.Maybe.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.() +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.nothing(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.nothing(Ljava/lang/Class;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.just(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.from(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.from(Ljava/lang/Class;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.() +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.dispose() +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.equals(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.hashCode(I) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.debug(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.isNothing(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.exists(Z) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.get(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.getOrElseNull(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.getOrElse(Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.getOrElse(Ljava/util/function/Supplier;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.removeNulls(Lcom/mediatek/jacoco2lcov/util/Maybe;) +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.lambda$removeNulls$0(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.keep(Ljava/util/function/Predicate;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.map(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.mapN(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNDA:0,com.mediatek.jacoco2lcov.util.Maybe.flatMap(Ljava/util/function/Function;)Lcom/mediatek/jacoco2lcov/util/Maybe; +FNF:25 +FNH:0 +BRDA:62,0,0,0 +BRDA:62,0,1,0 +BRDA:67,0,0,0 +BRDA:67,0,1,0 +BRDA:97,0,0,0 +BRDA:97,0,1,0 +BRDA:107,0,0,0 +BRDA:107,0,1,0 +BRDA:113,0,0,0 +BRDA:113,0,1,0 +BRDA:115,0,0,0 +BRDA:115,0,1,0 +BRDA:117,0,0,0 +BRDA:117,0,1,0 +BRDA:127,0,0,0 +BRDA:127,0,1,0 +BRDA:130,0,0,0 +BRDA:130,0,1,0 +BRDA:149,0,0,0 +BRDA:149,0,1,0 +BRDA:156,0,0,0 +BRDA:156,0,1,0 +BRDA:164,0,0,0 +BRDA:164,0,1,0 +BRDA:186,0,0,0 +BRDA:186,0,1,0 +BRDA:194,0,0,0 +BRDA:194,0,1,0 +BRDA:205,0,0,0 +BRDA:205,0,1,0 +BRDA:213,0,0,0 +BRDA:213,0,1,0 +BRDA:213,0,2,0 +BRDA:213,0,3,0 +BRDA:223,0,0,0 +BRDA:223,0,1,0 +BRDA:231,0,0,0 +BRDA:231,0,1,0 +BRDA:239,0,0,0 +BRDA:239,0,1,0 +BRF:40 +BRH:0 +DA:29,0 +DA:41,0 +DA:49,0 +DA:57,0 +DA:62,0 +DA:67,0 +DA:86,0 +DA:87,0 +DA:90,0 +DA:91,0 +DA:92,0 +DA:97,0 +DA:98,0 +DA:99,0 +DA:107,0 +DA:113,0 +DA:114,0 +DA:115,0 +DA:116,0 +DA:117,0 +DA:118,0 +DA:120,0 +DA:127,0 +DA:128,0 +DA:130,0 +DA:142,0 +DA:149,0 +DA:156,0 +DA:164,0 +DA:165,0 +DA:167,0 +DA:176,0 +DA:186,0 +DA:194,0 +DA:205,0 +DA:213,0 +DA:223,0 +DA:231,0 +DA:239,0 +LH:0 +LF:39 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/Pair.java +FN:25,com.mediatek.jacoco2lcov.util.Pair.pair(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:32,com.mediatek.jacoco2lcov.util.Pair.(Ljava/lang/Object;Ljava/lang/Object;) +FN:48,com.mediatek.jacoco2lcov.util.Pair.dispose() +FN:58,com.mediatek.jacoco2lcov.util.Pair.toString(Ljava/lang/String;) +FN:66,com.mediatek.jacoco2lcov.util.Pair.hashCode(I) +FN:73,com.mediatek.jacoco2lcov.util.Pair.equals(Ljava/lang/Object;)Z +FN:85,com.mediatek.jacoco2lcov.util.Pair.clone(Ljava/lang/Object;) +FN:93,com.mediatek.jacoco2lcov.util.Pair.first(Ljava/lang/Object;) +FN:101,com.mediatek.jacoco2lcov.util.Pair.setFirst(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:107,com.mediatek.jacoco2lcov.util.Pair.second(Ljava/lang/Object;) +FN:115,com.mediatek.jacoco2lcov.util.Pair.setSecond(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FN:124,com.mediatek.jacoco2lcov.util.Pair.set(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.pair(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.(Ljava/lang/Object;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.dispose() +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.toString(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.hashCode(I) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.equals(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.clone(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.first(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.setFirst(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.second(Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.setSecond(Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNDA:0,com.mediatek.jacoco2lcov.util.Pair.set(Ljava/lang/Object;Ljava/lang/Object;)Lcom/mediatek/jacoco2lcov/util/Pair; +FNF:12 +FNH:0 +BRDA:66,0,0,0 +BRDA:66,0,1,0 +BRDA:67,0,0,0 +BRDA:67,0,1,0 +BRDA:74,0,0,0 +BRDA:74,0,1,0 +BRDA:76,0,0,0 +BRDA:76,0,1,0 +BRDA:77,0,0,0 +BRDA:77,0,1,0 +BRF:10 +BRH:0 +DA:25,0 +DA:32,0 +DA:35,0 +DA:41,0 +DA:42,0 +DA:43,0 +DA:44,0 +DA:48,0 +DA:49,0 +DA:50,0 +DA:58,0 +DA:59,0 +DA:60,0 +DA:66,0 +DA:67,0 +DA:73,0 +DA:74,0 +DA:75,0 +DA:76,0 +DA:77,0 +DA:79,0 +DA:85,0 +DA:93,0 +DA:101,0 +DA:102,0 +DA:107,0 +DA:115,0 +DA:116,0 +DA:124,0 +DA:125,0 +DA:126,0 +LH:0 +LF:31 +end_of_record +TN:Jacoco2LCOV_Utility +SF:src/com/mediatek/jacoco2lcov/util/Utils.java +FN:54,com.mediatek.jacoco2lcov.util.Utils.getCurrentWorkingDirectory(Ljava/io/File;) +FN:59,com.mediatek.jacoco2lcov.util.Utils.getCurrentWorkingDirectoryPathname(Ljava/lang/String;) +FN:67,com.mediatek.jacoco2lcov.util.Utils.getenv(Ljava/lang/String;)Ljava/lang/String; +FN:75,com.mediatek.jacoco2lcov.util.Utils.getenv(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:83,com.mediatek.jacoco2lcov.util.Utils.getenvBoolean(Ljava/lang/String;Z)Z +FN:91,com.mediatek.jacoco2lcov.util.Utils.getenvInteger(Ljava/lang/String;I)I +FN:99,com.mediatek.jacoco2lcov.util.Utils.isNull(Ljava/lang/Object;)Z +FN:104,com.mediatek.jacoco2lcov.util.Utils.isNotNull(Ljava/lang/Object;)Z +FN:109,com.mediatek.jacoco2lcov.util.Utils.ifNull(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; +FN:119,com.mediatek.jacoco2lcov.util.Utils.ifNull(Ljava/lang/Object;Ljava/lang/Object;Ljava/util/function/Function;)Ljava/lang/Object; +FN:130,com.mediatek.jacoco2lcov.util.Utils.not(Z)Z +FN:135,com.mediatek.jacoco2lcov.util.Utils.clampMin(II)I +FN:146,com.mediatek.jacoco2lcov.util.Utils.sleep(I)Z +FN:159,com.mediatek.jacoco2lcov.util.Utils.exceptionToString(Ljava/lang/Throwable;)Ljava/lang/String; +FN:174,com.mediatek.jacoco2lcov.util.Utils.toString(Ljava/lang/Object;)Ljava/lang/String; +FN:182,com.mediatek.jacoco2lcov.util.Utils.toString(Ljava/lang/Object;Ljava/lang/String;)Ljava/lang/String; +FN:187,com.mediatek.jacoco2lcov.util.Utils.equals(Ljava/lang/String;Ljava/lang/String;)Z +FN:193,com.mediatek.jacoco2lcov.util.Utils.isEmpty(Ljava/lang/String;)Z +FN:198,com.mediatek.jacoco2lcov.util.Utils.isNotEmpty(Ljava/lang/String;)Z +FN:203,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/String; +FN:211,com.mediatek.jacoco2lcov.util.Utils.concatenate([Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:222,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/lang/String;Ljava/util/List;)Ljava/lang/String; +FN:230,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/util/List;Ljava/lang/String;)Ljava/lang/String; +FN:247,com.mediatek.jacoco2lcov.util.Utils.isBoolean(Ljava/lang/String;)Z +FN:266,com.mediatek.jacoco2lcov.util.Utils.toBoolean(Ljava/lang/String;)Z +FN:271,com.mediatek.jacoco2lcov.util.Utils.toBoolean(Ljava/lang/String;Z)Z +FN:294,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;)I +FN:302,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;I)I +FN:311,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;II)I +FN:335,com.mediatek.jacoco2lcov.util.Utils.equals(Ljava/lang/Object;Ljava/lang/Object;)Z +FN:343,com.mediatek.jacoco2lcov.util.Utils.compare(II)I +FN:352,com.mediatek.jacoco2lcov.util.Utils.compare(Ljava/lang/String;Ljava/lang/String;)I +FN:377,com.mediatek.jacoco2lcov.util.Utils.toListOf(Ljava/lang/Class;Ljava/util/List;)Ljava/util/List; +FN:407,com.mediatek.jacoco2lcov.util.Utils.toListOf(Ljava/lang/Class;[Ljava/lang/Object;)Ljava/util/List; +FN:421,com.mediatek.jacoco2lcov.util.Utils.toList([Ljava/lang/Object;)Ljava/util/List; +FN:430,com.mediatek.jacoco2lcov.util.Utils.createArrayOf(Ljava/lang/Class;I)[Ljava/lang/Object; +FN:447,com.mediatek.jacoco2lcov.util.Utils.toArrayOf(Ljava/lang/Class;[Ljava/lang/Object;)[Ljava/lang/Object; +FN:465,com.mediatek.jacoco2lcov.util.Utils.listify(Ljava/util/Collection;)Ljava/util/List; +FN:475,com.mediatek.jacoco2lcov.util.Utils.format(Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/String; +FN:481,com.mediatek.jacoco2lcov.util.Utils.append(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FN:487,com.mediatek.jacoco2lcov.util.Utils.append(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FN:493,com.mediatek.jacoco2lcov.util.Utils.println() +FN:498,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintStream;) +FN:503,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintWriter;) +FN:508,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuffer;) +FN:513,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuilder;) +FN:519,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/String;[Ljava/lang/Object;) +FN:524,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintStream;Ljava/lang/String;[Ljava/lang/Object;) +FN:530,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintWriter;Ljava/lang/String;[Ljava/lang/Object;) +FN:540,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FN:550,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FN:557,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/String;[Ljava/lang/Object;) +FN:562,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/io/PrintStream;Ljava/lang/String;[Ljava/lang/Object;) +FN:567,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/io/PrintWriter;Ljava/lang/String;[Ljava/lang/Object;) +FN:573,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FN:579,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FN:585,com.mediatek.jacoco2lcov.util.Utils.flush() +FN:591,com.mediatek.jacoco2lcov.util.Utils.flush(Ljava/io/Writer;) +FN:599,com.mediatek.jacoco2lcov.util.Utils.flush(Ljava/io/OutputStream;) +FN:609,com.mediatek.jacoco2lcov.util.Utils.exists(Ljava/lang/String;)Z +FN:614,com.mediatek.jacoco2lcov.util.Utils.exists(Ljava/io/File;)Z +FN:619,com.mediatek.jacoco2lcov.util.Utils.isFile(Ljava/lang/String;)Z +FN:628,com.mediatek.jacoco2lcov.util.Utils.isFile(Ljava/io/File;)Z +FN:634,com.mediatek.jacoco2lcov.util.Utils.isAbsolute(Ljava/lang/String;)Z +FN:639,com.mediatek.jacoco2lcov.util.Utils.isAbsolute(Ljava/io/File;)Z +FN:644,com.mediatek.jacoco2lcov.util.Utils.isRelative(Ljava/lang/String;)Z +FN:649,com.mediatek.jacoco2lcov.util.Utils.isRelative(Ljava/io/File;)Z +FN:655,com.mediatek.jacoco2lcov.util.Utils.isDirectory(Ljava/lang/String;)Z +FN:661,com.mediatek.jacoco2lcov.util.Utils.isDirectory(Ljava/io/File;)Z +FN:666,com.mediatek.jacoco2lcov.util.Utils.isReadable(Ljava/lang/String;)Z +FN:671,com.mediatek.jacoco2lcov.util.Utils.isReadable(Ljava/io/File;)Z +FN:676,com.mediatek.jacoco2lcov.util.Utils.isWritable(Ljava/lang/String;)Z +FN:681,com.mediatek.jacoco2lcov.util.Utils.isWritable(Ljava/io/File;)Z +FN:686,com.mediatek.jacoco2lcov.util.Utils.getDirectory(Ljava/io/File;)Ljava/lang/String; +FN:692,com.mediatek.jacoco2lcov.util.Utils.getFilename(Ljava/io/File;)Ljava/lang/String; +FN:698,com.mediatek.jacoco2lcov.util.Utils.getFilename(Ljava/lang/String;)Ljava/lang/String; +FN:705,com.mediatek.jacoco2lcov.util.Utils.getDirectory(Ljava/lang/String;)Ljava/lang/String; +FN:716,com.mediatek.jacoco2lcov.util.Utils.dropLeadingSeparator(Ljava/lang/String;)Ljava/lang/String; +FN:730,com.mediatek.jacoco2lcov.util.Utils.dropTrailingSeparator(Ljava/lang/String;)Ljava/lang/String; +FN:744,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:756,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/io/File;Ljava/lang/String;)Ljava/lang/String; +FN:768,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/lang/String;Ljava/io/File;)Ljava/lang/String; +FN:780,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/io/File;Ljava/io/File;)Ljava/lang/String; +FN:795,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FN:809,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/io/File;Ljava/lang/String;)Ljava/lang/String; +FN:822,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/lang/String;Ljava/io/File;)Ljava/lang/String; +FN:835,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/io/File;Ljava/io/File;)Ljava/lang/String; +FN:848,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/lang/String;Ljava/lang/String;)Ljava/io/File; +FN:861,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/io/File;Ljava/lang/String;)Ljava/io/File; +FN:874,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/lang/String;Ljava/io/File;)Ljava/io/File; +FN:887,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/io/File;Ljava/io/File;)Ljava/io/File; +FN:899,com.mediatek.jacoco2lcov.util.Utils.getCanonicalPathname(Ljava/lang/String;)Ljava/lang/String; +FN:911,com.mediatek.jacoco2lcov.util.Utils.getCanonicalPathname(Ljava/io/File;)Ljava/lang/String; +FN:923,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;)Ljava/io/File; +FN:935,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/io/File;)Ljava/io/File; +FN:947,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;Ljava/io/File;)Ljava/io/File; +FN:973,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;Ljava/lang/String;)Ljava/io/File; +FN:982,com.mediatek.jacoco2lcov.util.Utils.constructPathname(Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/String; +FN:991,com.mediatek.jacoco2lcov.util.Utils.constructPathname(Ljava/io/File;[Ljava/lang/String;)Ljava/lang/String; +FN:999,com.mediatek.jacoco2lcov.util.Utils.constructPathnameFile(Ljava/lang/String;[Ljava/lang/String;)Ljava/io/File; +FN:1008,com.mediatek.jacoco2lcov.util.Utils.constructPathnameFile(Ljava/io/File;[Ljava/lang/String;)Ljava/io/File; +FN:1030,com.mediatek.jacoco2lcov.util.Utils.findFiles(Ljava/lang/String;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1041,com.mediatek.jacoco2lcov.util.Utils.findFiles(Ljava/io/File;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1052,com.mediatek.jacoco2lcov.util.Utils.findDirectories(Ljava/lang/String;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1063,com.mediatek.jacoco2lcov.util.Utils.findDirectories(Ljava/io/File;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1075,com.mediatek.jacoco2lcov.util.Utils.findPathnames(Ljava/lang/String;Ljava/util/function/Predicate;ZZ)Ljava/util/List; +FN:1088,com.mediatek.jacoco2lcov.util.Utils.findPathnames(Ljava/io/File;Ljava/util/function/Predicate;ZZ)Ljava/util/List; +FN:1092,com.mediatek.jacoco2lcov.util.Utils.lambda$findPathnames$0(ZZLjava/util/function/Predicate;Ljava/io/File;)Z +FN:1106,com.mediatek.jacoco2lcov.util.Utils.openInputFile(Ljava/lang/String;)Ljava/io/BufferedReader; +FN:1114,com.mediatek.jacoco2lcov.util.Utils.openInputFile(Ljava/io/File;)Ljava/io/BufferedReader; +FN:1130,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/lang/String;)Ljava/io/PrintWriter; +FN:1139,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/io/File;)Ljava/io/PrintWriter; +FN:1149,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/lang/String;Z)Ljava/io/PrintWriter; +FN:1159,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/io/File;Z)Ljava/io/PrintWriter; +FN:1175,com.mediatek.jacoco2lcov.util.Utils.removeFile(Ljava/io/File;) +FN:1180,com.mediatek.jacoco2lcov.util.Utils.removeFile(Ljava/lang/String;) +FN:1196,com.mediatek.jacoco2lcov.util.Utils.map(Ljava/util/List;Ljava/util/function/Function;)Ljava/util/List; +FN:1209,com.mediatek.jacoco2lcov.util.Utils.forEach(Ljava/util/List;Ljava/util/function/Consumer;) +FN:1222,com.mediatek.jacoco2lcov.util.Utils.keep(Ljava/util/List;Ljava/util/function/Predicate;)Ljava/util/List; +FN:1224,com.mediatek.jacoco2lcov.util.Utils.lambda$keep$1(Ljava/util/function/Predicate;Ljava/util/ArrayList;Ljava/lang/Object;) +FN:1237,com.mediatek.jacoco2lcov.util.Utils.flatMap(Ljava/util/List;Ljava/util/function/Function;)Ljava/util/List; +FN:1238,com.mediatek.jacoco2lcov.util.Utils.lambda$flatMap$2(Ljava/util/List;Ljava/util/function/Function;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCurrentWorkingDirectory(Ljava/io/File;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.getCurrentWorkingDirectoryPathname(Ljava/lang/String;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.getenv(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getenv(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.getenvBoolean(Ljava/lang/String;Z)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getenvInteger(Ljava/lang/String;I)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isNull(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isNotNull(Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.ifNull(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.ifNull(Ljava/lang/Object;Ljava/lang/Object;Ljava/util/function/Function;)Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.not(Z)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.clampMin(II)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.sleep(I)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.exceptionToString(Ljava/lang/Throwable;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toString(Ljava/lang/Object;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toString(Ljava/lang/Object;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.equals(Ljava/lang/String;Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isEmpty(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isNotEmpty(Ljava/lang/String;)Z +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/String; +FNDA:3,com.mediatek.jacoco2lcov.util.Utils.concatenate([Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/lang/String;Ljava/util/List;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.concatenate(Ljava/util/List;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isBoolean(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toBoolean(Ljava/lang/String;)Z +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.toBoolean(Ljava/lang/String;Z)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;I)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toInt(Ljava/lang/String;II)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.equals(Ljava/lang/Object;Ljava/lang/Object;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.compare(II)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.compare(Ljava/lang/String;Ljava/lang/String;)I +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toListOf(Ljava/lang/Class;Ljava/util/List;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toListOf(Ljava/lang/Class;[Ljava/lang/Object;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.toList([Ljava/lang/Object;)Ljava/util/List; +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.createArrayOf(Ljava/lang/Class;I)[Ljava/lang/Object; +FNDA:2,com.mediatek.jacoco2lcov.util.Utils.toArrayOf(Ljava/lang/Class;[Ljava/lang/Object;)[Ljava/lang/Object; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.listify(Ljava/util/Collection;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.format(Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.append(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.append(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println() +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintStream;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintWriter;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuffer;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuilder;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/String;[Ljava/lang/Object;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintStream;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/io/PrintWriter;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.println(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/io/PrintStream;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/io/PrintWriter;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/StringBuffer;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.print(Ljava/lang/StringBuilder;Ljava/lang/String;[Ljava/lang/Object;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.flush() +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.flush(Ljava/io/Writer;) +FNDA:1,com.mediatek.jacoco2lcov.util.Utils.flush(Ljava/io/OutputStream;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.exists(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.exists(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isFile(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isFile(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isAbsolute(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isAbsolute(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isRelative(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isRelative(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isDirectory(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isDirectory(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isReadable(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isReadable(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isWritable(Ljava/lang/String;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.isWritable(Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getDirectory(Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getFilename(Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getFilename(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getDirectory(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.dropLeadingSeparator(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.dropTrailingSeparator(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/io/File;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/lang/String;Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getAbsolutePathname(Ljava/io/File;Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/io/File;Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/lang/String;Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativePathname(Ljava/io/File;Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/lang/String;Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/io/File;Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/lang/String;Ljava/io/File;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getRelativeFile(Ljava/io/File;Ljava/io/File;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalPathname(Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalPathname(Ljava/io/File;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/io/File;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;Ljava/io/File;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.getCanonicalFile(Ljava/lang/String;Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.constructPathname(Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.constructPathname(Ljava/io/File;[Ljava/lang/String;)Ljava/lang/String; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.constructPathnameFile(Ljava/lang/String;[Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.constructPathnameFile(Ljava/io/File;[Ljava/lang/String;)Ljava/io/File; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findFiles(Ljava/lang/String;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findFiles(Ljava/io/File;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findDirectories(Ljava/lang/String;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findDirectories(Ljava/io/File;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findPathnames(Ljava/lang/String;Ljava/util/function/Predicate;ZZ)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.findPathnames(Ljava/io/File;Ljava/util/function/Predicate;ZZ)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.lambda$findPathnames$0(ZZLjava/util/function/Predicate;Ljava/io/File;)Z +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openInputFile(Ljava/lang/String;)Ljava/io/BufferedReader; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openInputFile(Ljava/io/File;)Ljava/io/BufferedReader; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/lang/String;)Ljava/io/PrintWriter; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/io/File;)Ljava/io/PrintWriter; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/lang/String;Z)Ljava/io/PrintWriter; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.openOutputFile(Ljava/io/File;Z)Ljava/io/PrintWriter; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.removeFile(Ljava/io/File;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.removeFile(Ljava/lang/String;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.map(Ljava/util/List;Ljava/util/function/Function;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.forEach(Ljava/util/List;Ljava/util/function/Consumer;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.keep(Ljava/util/List;Ljava/util/function/Predicate;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.lambda$keep$1(Ljava/util/function/Predicate;Ljava/util/ArrayList;Ljava/lang/Object;) +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.flatMap(Ljava/util/List;Ljava/util/function/Function;)Ljava/util/List; +FNDA:0,com.mediatek.jacoco2lcov.util.Utils.lambda$flatMap$2(Ljava/util/List;Ljava/util/function/Function;Ljava/lang/Object;) +FNF:122 +FNH:12 +BRDA:99,0,0,0 +BRDA:99,0,1,0 +BRDA:104,0,0,0 +BRDA:104,0,1,0 +BRDA:109,0,0,0 +BRDA:109,0,1,0 +BRDA:119,0,0,0 +BRDA:119,0,1,0 +BRDA:119,0,2,0 +BRDA:119,0,3,0 +BRDA:130,0,0,0 +BRDA:130,0,1,0 +BRDA:135,0,0,0 +BRDA:135,0,1,0 +BRDA:160,0,0,0 +BRDA:160,0,1,0 +BRDA:163,0,0,0 +BRDA:163,0,1,0 +BRDA:182,0,0,0 +BRDA:182,0,1,0 +BRDA:187,0,0,0 +BRDA:187,0,1,0 +BRDA:187,0,2,0 +BRDA:187,0,3,0 +BRDA:187,0,4,0 +BRDA:187,0,5,0 +BRDA:187,0,6,0 +BRDA:187,0,7,0 +BRDA:193,0,0,0 +BRDA:193,0,1,0 +BRDA:193,0,2,0 +BRDA:193,0,3,0 +BRDA:198,0,0,0 +BRDA:198,0,1,0 +BRDA:198,0,2,0 +BRDA:198,0,3,0 +BRDA:212,0,0,1 +BRDA:212,0,1,0 +BRDA:213,0,0,1 +BRDA:213,0,1,1 +BRDA:214,0,0,1 +BRDA:214,0,1,1 +BRDA:214,0,2,1 +BRDA:214,0,3,0 +BRDA:232,0,0,0 +BRDA:232,0,1,0 +BRDA:233,0,0,0 +BRDA:233,0,1,0 +BRDA:234,0,0,0 +BRDA:234,0,1,0 +BRDA:234,0,2,0 +BRDA:234,0,3,0 +BRDA:247,0,0,0 +BRDA:247,0,1,0 +BRDA:250,0,0,0 +BRDA:250,0,1,0 +BRDA:251,0,0,0 +BRDA:251,0,1,0 +BRDA:252,0,0,0 +BRDA:252,0,1,0 +BRDA:253,0,0,0 +BRDA:253,0,1,0 +BRDA:254,0,0,0 +BRDA:254,0,1,0 +BRDA:256,0,0,0 +BRDA:256,0,1,0 +BRDA:257,0,0,0 +BRDA:257,0,1,0 +BRDA:258,0,0,0 +BRDA:258,0,1,0 +BRDA:259,0,0,0 +BRDA:259,0,1,0 +BRDA:260,0,0,0 +BRDA:260,0,1,0 +BRDA:272,0,0,1 +BRDA:272,0,1,0 +BRDA:273,0,0,0 +BRDA:273,0,1,0 +BRDA:274,0,0,0 +BRDA:274,0,1,0 +BRDA:275,0,0,0 +BRDA:275,0,1,0 +BRDA:276,0,0,0 +BRDA:276,0,1,0 +BRDA:277,0,0,0 +BRDA:277,0,1,0 +BRDA:279,0,0,0 +BRDA:279,0,1,0 +BRDA:280,0,0,0 +BRDA:280,0,1,0 +BRDA:281,0,0,0 +BRDA:281,0,1,0 +BRDA:282,0,0,0 +BRDA:282,0,1,0 +BRDA:283,0,0,0 +BRDA:283,0,1,0 +BRDA:311,0,0,0 +BRDA:311,0,1,0 +BRDA:315,0,0,0 +BRDA:315,0,1,0 +BRDA:316,0,0,0 +BRDA:316,0,1,0 +BRDA:316,0,2,0 +BRDA:316,0,3,0 +BRDA:318,0,0,0 +BRDA:318,0,1,0 +BRDA:318,0,2,0 +BRDA:318,0,3,0 +BRDA:335,0,0,0 +BRDA:335,0,1,0 +BRDA:335,0,2,0 +BRDA:335,0,3,0 +BRDA:335,0,4,0 +BRDA:335,0,5,0 +BRDA:335,0,6,0 +BRDA:335,0,7,0 +BRDA:343,0,0,0 +BRDA:343,0,1,0 +BRDA:343,0,2,0 +BRDA:343,0,3,0 +BRDA:352,0,0,0 +BRDA:352,0,1,0 +BRDA:352,0,2,0 +BRDA:352,0,3,0 +BRDA:353,0,0,0 +BRDA:353,0,1,0 +BRDA:356,0,0,0 +BRDA:356,0,1,0 +BRDA:357,0,0,0 +BRDA:357,0,1,0 +BRDA:378,0,0,0 +BRDA:378,0,1,0 +BRDA:381,0,0,0 +BRDA:381,0,1,0 +BRDA:407,0,0,0 +BRDA:407,0,1,0 +BRDA:421,0,0,0 +BRDA:421,0,1,0 +BRDA:448,0,0,1 +BRDA:448,0,1,0 +BRDA:451,0,0,1 +BRDA:451,0,1,1 +BRDA:466,0,0,0 +BRDA:466,0,1,0 +BRDA:609,0,0,0 +BRDA:609,0,1,0 +BRDA:609,0,2,0 +BRDA:609,0,3,0 +BRDA:614,0,0,0 +BRDA:614,0,1,0 +BRDA:614,0,2,0 +BRDA:614,0,3,0 +BRDA:619,0,0,0 +BRDA:619,0,1,0 +BRDA:622,0,0,0 +BRDA:622,0,1,0 +BRDA:622,0,2,0 +BRDA:622,0,3,0 +BRDA:628,0,0,0 +BRDA:628,0,1,0 +BRDA:628,0,2,0 +BRDA:628,0,3,0 +BRDA:629,0,0,0 +BRDA:629,0,1,0 +BRDA:634,0,0,0 +BRDA:634,0,1,0 +BRDA:634,0,2,0 +BRDA:634,0,3,0 +BRDA:639,0,0,0 +BRDA:639,0,1,0 +BRDA:639,0,2,0 +BRDA:639,0,3,0 +BRDA:644,0,0,0 +BRDA:644,0,1,0 +BRDA:644,0,2,0 +BRDA:644,0,3,0 +BRDA:649,0,0,0 +BRDA:649,0,1,0 +BRDA:649,0,2,0 +BRDA:649,0,3,0 +BRDA:655,0,0,0 +BRDA:655,0,1,0 +BRDA:655,0,2,0 +BRDA:655,0,3,0 +BRDA:661,0,0,0 +BRDA:661,0,1,0 +BRDA:661,0,2,0 +BRDA:661,0,3,0 +BRDA:666,0,0,0 +BRDA:666,0,1,0 +BRDA:666,0,2,0 +BRDA:666,0,3,0 +BRDA:671,0,0,0 +BRDA:671,0,1,0 +BRDA:671,0,2,0 +BRDA:671,0,3,0 +BRDA:676,0,0,0 +BRDA:676,0,1,0 +BRDA:676,0,2,0 +BRDA:676,0,3,0 +BRDA:681,0,0,0 +BRDA:681,0,1,0 +BRDA:681,0,2,0 +BRDA:681,0,3,0 +BRDA:686,0,0,0 +BRDA:686,0,1,0 +BRDA:692,0,0,0 +BRDA:692,0,1,0 +BRDA:698,0,0,0 +BRDA:698,0,1,0 +BRDA:700,0,0,0 +BRDA:700,0,1,0 +BRDA:705,0,0,0 +BRDA:705,0,1,0 +BRDA:706,0,0,0 +BRDA:706,0,1,0 +BRDA:708,0,0,0 +BRDA:708,0,1,0 +BRDA:716,0,0,0 +BRDA:716,0,1,0 +BRDA:718,0,0,0 +BRDA:718,0,1,0 +BRDA:718,0,2,0 +BRDA:718,0,3,0 +BRDA:730,0,0,0 +BRDA:730,0,1,0 +BRDA:732,0,0,0 +BRDA:732,0,1,0 +BRDA:732,0,2,0 +BRDA:732,0,3,0 +BRDA:744,0,0,0 +BRDA:744,0,1,0 +BRDA:745,0,0,0 +BRDA:745,0,1,0 +BRDA:746,0,0,0 +BRDA:746,0,1,0 +BRDA:756,0,0,0 +BRDA:756,0,1,0 +BRDA:757,0,0,0 +BRDA:757,0,1,0 +BRDA:758,0,0,0 +BRDA:758,0,1,0 +BRDA:768,0,0,0 +BRDA:768,0,1,0 +BRDA:769,0,0,0 +BRDA:769,0,1,0 +BRDA:770,0,0,0 +BRDA:770,0,1,0 +BRDA:780,0,0,0 +BRDA:780,0,1,0 +BRDA:781,0,0,0 +BRDA:781,0,1,0 +BRDA:782,0,0,0 +BRDA:782,0,1,0 +BRDA:795,0,0,0 +BRDA:795,0,1,0 +BRDA:796,0,0,0 +BRDA:796,0,1,0 +BRDA:809,0,0,0 +BRDA:809,0,1,0 +BRDA:810,0,0,0 +BRDA:810,0,1,0 +BRDA:822,0,0,0 +BRDA:822,0,1,0 +BRDA:823,0,0,0 +BRDA:823,0,1,0 +BRDA:836,0,0,0 +BRDA:836,0,1,0 +BRDA:848,0,0,0 +BRDA:848,0,1,0 +BRDA:849,0,0,0 +BRDA:849,0,1,0 +BRDA:861,0,0,0 +BRDA:861,0,1,0 +BRDA:862,0,0,0 +BRDA:862,0,1,0 +BRDA:874,0,0,0 +BRDA:874,0,1,0 +BRDA:875,0,0,0 +BRDA:875,0,1,0 +BRDA:887,0,0,0 +BRDA:887,0,1,0 +BRDA:888,0,0,0 +BRDA:888,0,1,0 +BRDA:889,0,0,0 +BRDA:889,0,1,0 +BRDA:891,0,0,0 +BRDA:891,0,1,0 +BRDA:899,0,0,0 +BRDA:899,0,1,0 +BRDA:911,0,0,0 +BRDA:911,0,1,0 +BRDA:923,0,0,0 +BRDA:923,0,1,0 +BRDA:935,0,0,0 +BRDA:935,0,1,0 +BRDA:950,0,0,0 +BRDA:950,0,1,0 +BRDA:952,0,0,0 +BRDA:952,0,1,0 +BRDA:955,0,0,0 +BRDA:955,0,1,0 +BRDA:973,0,0,0 +BRDA:973,0,1,0 +BRDA:982,0,0,0 +BRDA:982,0,1,0 +BRDA:999,0,0,0 +BRDA:999,0,1,0 +BRDA:1009,0,0,0 +BRDA:1009,0,1,0 +BRDA:1012,0,0,0 +BRDA:1012,0,1,0 +BRDA:1013,0,0,0 +BRDA:1013,0,1,0 +BRDA:1075,0,0,0 +BRDA:1075,0,1,0 +BRDA:1088,0,0,0 +BRDA:1088,0,1,0 +BRDA:1088,0,2,0 +BRDA:1088,0,3,0 +BRDA:1088,0,4,0 +BRDA:1088,0,5,0 +BRDA:1088,0,6,0 +BRDA:1088,0,7,0 +BRDA:1093,0,0,0 +BRDA:1093,0,1,0 +BRDA:1093,0,2,0 +BRDA:1093,0,3,0 +BRDA:1093,0,4,0 +BRDA:1093,0,5,0 +BRDA:1093,0,6,0 +BRDA:1093,0,7,0 +BRDA:1116,0,0,0 +BRDA:1116,0,1,0 +BRDA:1116,0,2,0 +BRDA:1116,0,3,0 +BRDA:1162,0,0,0 +BRDA:1162,0,1,0 +BRDA:1164,0,0,0 +BRDA:1164,0,1,0 +BRDA:1164,0,2,0 +BRDA:1164,0,3,0 +BRDA:1180,0,0,0 +BRDA:1180,0,1,0 +BRDA:1196,0,0,0 +BRDA:1196,0,1,0 +BRDA:1198,0,0,0 +BRDA:1198,0,1,0 +BRDA:1209,0,0,0 +BRDA:1209,0,1,0 +BRDA:1209,0,2,0 +BRDA:1209,0,3,0 +BRDA:1211,0,0,0 +BRDA:1211,0,1,0 +BRDA:1224,0,0,0 +BRDA:1224,0,1,0 +BRF:356 +BRH:10 +DA:54,0 +DA:59,1 +DA:67,1 +DA:75,0 +DA:83,1 +DA:91,0 +DA:99,0 +DA:104,0 +DA:109,0 +DA:119,0 +DA:120,0 +DA:130,0 +DA:135,0 +DA:146,0 +DA:148,0 +DA:149,0 +DA:150,0 +DA:151,0 +DA:152,0 +DA:159,0 +DA:160,0 +DA:161,0 +DA:162,0 +DA:163,0 +DA:164,0 +DA:166,0 +DA:174,0 +DA:182,0 +DA:187,0 +DA:193,0 +DA:198,0 +DA:203,1 +DA:211,1 +DA:212,1 +DA:213,2 +DA:214,3 +DA:215,1 +DA:217,1 +DA:222,0 +DA:230,0 +DA:231,0 +DA:232,0 +DA:233,0 +DA:234,0 +DA:235,0 +DA:236,0 +DA:237,0 +DA:239,0 +DA:247,0 +DA:248,0 +DA:250,0 +DA:251,0 +DA:252,0 +DA:253,0 +DA:254,0 +DA:256,0 +DA:257,0 +DA:258,0 +DA:259,0 +DA:260,0 +DA:266,0 +DA:271,1 +DA:272,1 +DA:273,0 +DA:274,0 +DA:275,0 +DA:276,0 +DA:277,0 +DA:278,0 +DA:279,0 +DA:280,0 +DA:281,0 +DA:282,0 +DA:283,0 +DA:284,0 +DA:286,1 +DA:294,0 +DA:302,0 +DA:311,0 +DA:314,0 +DA:315,0 +DA:316,0 +DA:317,0 +DA:318,0 +DA:319,0 +DA:321,0 +DA:322,0 +DA:323,0 +DA:335,0 +DA:343,0 +DA:352,0 +DA:353,0 +DA:354,0 +DA:356,0 +DA:357,0 +DA:358,0 +DA:377,0 +DA:378,0 +DA:379,0 +DA:380,0 +DA:381,0 +DA:384,0 +DA:385,0 +DA:388,0 +DA:407,0 +DA:408,0 +DA:411,0 +DA:421,0 +DA:430,1 +DA:447,1 +DA:448,1 +DA:449,1 +DA:450,1 +DA:451,2 +DA:454,1 +DA:455,1 +DA:458,1 +DA:465,0 +DA:466,0 +DA:467,0 +DA:475,0 +DA:481,0 +DA:482,0 +DA:487,0 +DA:488,0 +DA:493,0 +DA:494,0 +DA:498,0 +DA:499,0 +DA:503,0 +DA:504,0 +DA:508,0 +DA:509,0 +DA:513,0 +DA:514,0 +DA:519,1 +DA:520,1 +DA:524,1 +DA:525,1 +DA:526,1 +DA:530,0 +DA:531,0 +DA:532,0 +DA:540,0 +DA:541,0 +DA:542,0 +DA:550,0 +DA:551,0 +DA:552,0 +DA:557,0 +DA:558,0 +DA:562,0 +DA:563,0 +DA:567,0 +DA:568,0 +DA:573,0 +DA:574,0 +DA:579,0 +DA:580,0 +DA:585,1 +DA:586,1 +DA:591,0 +DA:592,0 +DA:593,0 +DA:594,0 +DA:599,1 +DA:600,0 +DA:601,1 +DA:602,1 +DA:609,0 +DA:614,0 +DA:619,0 +DA:620,0 +DA:622,0 +DA:628,0 +DA:629,0 +DA:634,0 +DA:639,0 +DA:644,0 +DA:649,0 +DA:655,0 +DA:661,0 +DA:666,0 +DA:671,0 +DA:676,0 +DA:681,0 +DA:686,0 +DA:687,0 +DA:692,0 +DA:693,0 +DA:698,0 +DA:699,0 +DA:700,0 +DA:705,0 +DA:706,0 +DA:707,0 +DA:708,0 +DA:716,0 +DA:717,0 +DA:718,0 +DA:719,0 +DA:720,0 +DA:722,0 +DA:730,0 +DA:731,0 +DA:732,0 +DA:733,0 +DA:734,0 +DA:736,0 +DA:744,0 +DA:745,0 +DA:746,0 +DA:747,0 +DA:748,0 +DA:756,0 +DA:757,0 +DA:758,0 +DA:759,0 +DA:760,0 +DA:768,0 +DA:769,0 +DA:770,0 +DA:771,0 +DA:772,0 +DA:780,0 +DA:781,0 +DA:782,0 +DA:783,0 +DA:784,0 +DA:795,0 +DA:796,0 +DA:797,0 +DA:809,0 +DA:810,0 +DA:811,0 +DA:822,0 +DA:823,0 +DA:824,0 +DA:835,0 +DA:836,0 +DA:848,0 +DA:849,0 +DA:850,0 +DA:861,0 +DA:862,0 +DA:863,0 +DA:874,0 +DA:875,0 +DA:876,0 +DA:887,0 +DA:888,0 +DA:889,0 +DA:890,0 +DA:891,0 +DA:899,0 +DA:900,0 +DA:901,0 +DA:902,0 +DA:911,0 +DA:912,0 +DA:913,0 +DA:914,0 +DA:923,0 +DA:924,0 +DA:925,0 +DA:926,0 +DA:935,0 +DA:936,0 +DA:937,0 +DA:938,0 +DA:947,0 +DA:950,0 +DA:952,0 +DA:953,0 +DA:954,0 +DA:955,0 +DA:958,0 +DA:959,0 +DA:960,0 +DA:961,0 +DA:962,0 +DA:965,0 +DA:973,0 +DA:974,0 +DA:982,0 +DA:983,0 +DA:991,0 +DA:999,0 +DA:1000,0 +DA:1008,0 +DA:1009,0 +DA:1011,0 +DA:1012,0 +DA:1013,0 +DA:1014,0 +DA:1017,0 +DA:1018,0 +DA:1019,0 +DA:1030,0 +DA:1041,0 +DA:1052,0 +DA:1063,0 +DA:1075,0 +DA:1076,0 +DA:1088,0 +DA:1090,0 +DA:1091,0 +DA:1092,0 +DA:1093,0 +DA:1094,0 +DA:1096,0 +DA:1098,0 +DA:1106,0 +DA:1114,0 +DA:1116,0 +DA:1118,0 +DA:1119,0 +DA:1120,0 +DA:1121,0 +DA:1130,0 +DA:1139,0 +DA:1149,0 +DA:1159,0 +DA:1161,0 +DA:1162,0 +DA:1163,0 +DA:1164,0 +DA:1165,0 +DA:1167,0 +DA:1168,0 +DA:1169,0 +DA:1170,0 +DA:1175,0 +DA:1176,0 +DA:1180,0 +DA:1181,0 +DA:1183,0 +DA:1184,0 +DA:1185,0 +DA:1186,0 +DA:1196,0 +DA:1197,0 +DA:1198,0 +DA:1199,0 +DA:1201,0 +DA:1209,0 +DA:1210,0 +DA:1211,0 +DA:1212,0 +DA:1215,0 +DA:1222,0 +DA:1223,0 +DA:1224,0 +DA:1225,0 +DA:1226,0 +DA:1237,0 +DA:1238,0 +DA:1239,0 +LH:32 +LF:358 +end_of_record diff --git a/tests/lcov/mcdc/Makefile b/tests/lcov/mcdc/Makefile new file mode 100644 index 00000000..333f376b --- /dev/null +++ b/tests/lcov/mcdc/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := mcdc.sh + +clean: + $(shell ./mcdc.sh --clean) diff --git a/tests/lcov/mcdc/main.cpp b/tests/lcov/mcdc/main.cpp new file mode 100644 index 00000000..3ba9bdc7 --- /dev/null +++ b/tests/lcov/mcdc/main.cpp @@ -0,0 +1,23 @@ +/** + * @file main.cpp + * @author MTK50321 Henry Cox + * @date Fri Dec 13 14:34:31 2024 + * + * @brief Check differences between LLVM/GCC regarding MC/DC results. + * split into two files to enable more testing + */ + +extern void test(int, int, int); + + +int main(int ac, char ** av) +{ + test(1,1,0); +#ifdef SENS1 + test(1,0,0); +#endif +#ifdef SENS2 + test(0,1,0); +#endif + return 0; +} diff --git a/tests/lcov/mcdc/mcdc.sh b/tests/lcov/mcdc/mcdc.sh new file mode 100755 index 00000000..9aa14e10 --- /dev/null +++ b/tests/lcov/mcdc/mcdc.sh @@ -0,0 +1,238 @@ +#! /usr/bin/env bash + +source ../../common.tst + +rm -rf *.xml *.dat *.info *.jsn cover_one *_rpt *Test[123]* *.gcno *.gcda gccTest* llvmTest* + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +# is this git or P4? +if [ 1 == "$USE_P4" ] ; then + GET_VERSION=${SCRIPT_DIR}/P4version.pm,--local-edit,--md5 +else + # this is git + GET_VERSION=${SCRIPT_DIR}/gitversion.pm +fi + + +LCOV_OPTS="--branch-coverage $PARALLEL $PROFILE" + +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -ge 14 ] ; then + ENABLE_MCDC=1 +fi +IFS='.' read -r -a LLVM_VER <<< `clang -dumpversion` +if [ "${LLVM_VER[0]}" -ge 14 ] ; then + ENABLE_LLVM=1 +fi + +STATUS=0 + +function runClang() +( + # runClang exeName srcFile flags + echo "clang++ -fprofile-instr-generate -fcoverage-mapping -fcoverage-mcdc -o $1 main.cpp test.cpp $2" + clang++ -fprofile-instr-generate -fcoverage-mapping -fcoverage-mcdc -o $1 main.cpp test.cpp $2 + if [ $? != 0 ] ; then + echo "ERROR from clang++ $1" + return 1 + fi + ./$1 + llvm-profdata merge --sparse *.profraw -o $1.profdata + if [ $? != 0 ] ; then + echo "ERROR from llvm-profdata $1" + return 1 + fi + llvm-cov export -format=text -instr-profile=$1.profdata ./$1 > $1.jsn + if [ $? != 0 ] ; then + echo "ERROR from llvm-cov $1" + return 1 + fi + $COVER $LLVM2LCOV_TOOL --branch --mcdc -o $1.info $1.jsn --version-script $GET_VERSION + if [ $? != 0 ] ; then + echo "ERROR from llvm2lcov $1" + return 1 + fi + $COVER $GENHTML_TOOL --flat --branch --mcdc -o $1_rpt $1.info --version-script $GET_VERSION + if [ $? != 0 ] ; then + echo "ERROR from genhtml $1" + return 1 + fi + # run again, excluding 'main.cpp' + $COVER $LLVM2LCOV_TOOL --branch --mcdc -o $1.excl.info $1.jsn --version-script $GET_VERSION --exclude '*/main.cpp' + if [ $? != 0 ] ; then + echo "ERROR from llvm2lcov --exclude $1" + return 1 + fi + COUNT=`grep -c SF: $1.excl.info` + if [ 1 != "$COUNT" ] ; then + echo "ERROR llvm2lcov --exclude $1 didn't work" + return 1 + fi + rm -f *.profraw *.profdata +) + +function runGcc() +{ + NAME=$1 + shift + ARG=$1 + shift + echo "g++ --coverage -fcondition-coverage -o $NAME main.cpp test.cpp $ARG" + # runGcc exeName srcFile flags + eval g++ --coverage -fcondition-coverage -o $NAME main.cpp test.cpp $ARG + if [ $? != 0 ] ; then + echo "ERROR from g++ $NAME" + return 1 + fi + ./$NAME + echo "$GENINFO_TOOL -o $NAME.info --mcdc --branch $NAME-test.gcda $@" + $COVER $GENINFO_TOOL -o $NAME.info --mcdc --branch $NAME-test.gcda $@ + if [ $? != 0 ] ; then + echo "ERROR from geninfo $NAME" + return 1 + fi + $COVER $GENHTML_TOOL --flat --branch --mcdc -o ${NAME}_rpt $NAME.info + if [ $? != 0 ] ; then + echo "ERROR from genhtml $NAME" + return 1 + fi + rm -f *.gcda *.gcno +} + + +$COVER $LLVM2LCOV_TOOL --help +if [ 0 != $? ] ; then + echo "ERROR: unexpected return code from --help" + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LLVM2LCOV_TOOL --unknown_arg +if [ 0 == $? ] ; then + echo "ERROR: expected return code from --help" + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +if [ "$ENABLE_MCDC" == 1 ] ; then + runGcc gccTest1 + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + runGcc gccTest2 -DSENS1 + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + runGcc gccTest3 -DSENS2 + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + runGcc gccTest4 '-DSENS2 -DSIMPLE' --filter mcdc + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + # the MC/DC should have been filtered out - in favor of the branch + COUNT=`grep -c MCDC gccTest4.info` + if [ 0 != "$COUNT" ] ; then + STATUS=1 + echo "filter error MC/DC" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + runGcc gccTest4a '-DSENS2 -DSIMPLE' + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + # the MC/DC shouldn't be filtered + COUNT=`grep -c MCDC gccTest4a.info` + if [ 0 == "$COUNT" ] ; then + STATUS=1 + echo "filter error2 MC/DC" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + runGcc gccTest5 -DSENS2 --filter mcdc + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + # the MC/DC shouldn't have been filtered out + COUNT=`grep -c MCDC gccTest5.info` + if [ 0 == "$COUNT" ] ; then + STATUS=1 + echo "MC/DC filter error" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +else + echo "SKIPPING MC/DC tests: ancient compiler" +fi + +if [ "$ENABLE_LLVM" == 1 ] ; then + runClang clangTest1 + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + runClang clangTest2 -DSENS1 + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + runClang clangTest3 -DSENS2 + if [ $? != 0 ] ; then + STATUS=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +else + echo "SKIPPING LLVM tests" +fi + +if [ $STATUS == 0 ] ; then + echo "Tests passed" +else + echo "Tests failed" +fi + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi + +exit $STATUS diff --git a/tests/lcov/mcdc/test.cpp b/tests/lcov/mcdc/test.cpp new file mode 100644 index 00000000..5c1a2b3c --- /dev/null +++ b/tests/lcov/mcdc/test.cpp @@ -0,0 +1,20 @@ +/** + * @file test.cpp + * @author MTK50321 Henry Cox + * @brief Check differences between LLVM/GCC regarding MC/DC results. + */ + +#include + +void test(int a, int b, int c) +{ + if +#ifdef SIMPLE + (a) +#else + (a && (b || c)) +#endif + printf("%d && (%d || %d)\n", a, b, c); + else + printf("not..\n"); +} diff --git a/tests/lcov/merge/Makefile b/tests/lcov/merge/Makefile new file mode 100644 index 00000000..c7b07027 --- /dev/null +++ b/tests/lcov/merge/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := merge.sh + +clean: + $(shell ./merge.sh --clean) diff --git a/tests/lcov/merge/a.dat b/tests/lcov/merge/a.dat new file mode 100644 index 00000000..46311ccb --- /dev/null +++ b/tests/lcov/merge/a.dat @@ -0,0 +1,27 @@ +SF:project/common/trace/tracer_singleton.cpp +FN:23,_ZN5project5trace15TracerSingleton11get_backendEv +FN:18,_ZN5project5trace15TracerSingleton11set_backendESt10unique_ptrINS0_17TracerBackendBaseESt14default_deleteIS3_EE +FN:12,_ZN5project5trace15TracerSingleton3getEv +FN:7,_ZN5project5trace15TracerSingletonC2Ev +FNDA:1920,_ZN5project5trace15TracerSingleton11get_backendEv +FNDA:27,_ZN5project5trace15TracerSingleton11set_backendESt10unique_ptrINS0_17TracerBackendBaseESt14default_deleteIS3_EE +FNDA:1949,_ZN5project5trace15TracerSingleton3getEv +FNDA:34,_ZN5project5trace15TracerSingletonC2Ev +FNF:4 +FNH:4 +DA:7,34 +DA:8,34 +DA:9,34 +DA:12,1949 +DA:13,1949 +DA:14,1949 +DA:15,1949 +DA:18,27 +DA:19,27 +DA:20,27 +DA:23,1920 +DA:24,1920 +DA:25,1920 +LH:13 +LF:13 +end_of_record diff --git a/tests/lcov/merge/a.info b/tests/lcov/merge/a.info new file mode 100644 index 00000000..5f515d23 --- /dev/null +++ b/tests/lcov/merge/a.info @@ -0,0 +1,52 @@ +TN: +SF:a.cpp +DA:1,1 +#common line: my count is zero and yours is nonzero +DA:2,0 +DA:3,0 +DA:4,1 +DA:10,0 +DA:11,0 +DA:12,0 +LF:7 +LH:2 +FN:1,2,fcn +FN:1,2,alias +FN:3,3,noCommonAlias +FN:11,11,onlyA +FNF:4 +FNH:3 +# my count is zero yours is nonzero +FNDA:0,fcn +FNDA:2,alias +FNDA:0,onlyA +FNDA:1,noCommonAlias + +BRDA:1,1,0,1 +BRDA:1,1,1,1 +BRDA:1,1,2,- +BRDA:1,2,0,1 +# common branch expr count zero in my, nonzero in you +BRDA:1,2,1,0 + +# branch in A only +BRDA:11,0,0,0 +BRDA:11,0,1,0 + +BRF:5 +BRH:4 + +# common +MCDC:1,2,t,0,0,0 +MCDC:1,2,f,0,0,0 +MCDC:1,2,t,0,1,0 +MCDC:1,2,f,0,1,0 + +# in A only +MCDC:11,1,t,0,0,0 +MCDC:11,1,f,0,0,0 + +MCF:6 +MCH:0 + +end_of_record diff --git a/tests/lcov/merge/a_subtract_b.gold b/tests/lcov/merge/a_subtract_b.gold new file mode 100644 index 00000000..d346375b --- /dev/null +++ b/tests/lcov/merge/a_subtract_b.gold @@ -0,0 +1,28 @@ +TN: +SF:a.cpp +FNL:0,1,2 +FNA:0,2,alias +FNL:1,11,11 +FNA:1,0,onlyA +FNL:2,3,3 +FNA:2,1,noCommonAlias +FNF:3 +FNH:2 +BRDA:1,1,0,1 +BRDA:1,1,1,1 +BRDA:1,1,2,- +BRDA:11,0,0,0 +BRDA:11,0,1,0 +BRF:5 +BRH:2 +MCDC:11,1,t,0,0,0 +MCDC:11,1,f,0,0,0 +MCF:2 +MCH:0 +DA:4,1 +DA:10,0 +DA:11,0 +DA:12,0 +LF:4 +LH:1 +end_of_record diff --git a/tests/lcov/merge/b.dat b/tests/lcov/merge/b.dat new file mode 100644 index 00000000..bb00e5d3 --- /dev/null +++ b/tests/lcov/merge/b.dat @@ -0,0 +1,34 @@ +SF:project/common/trace/tracer_singleton.cpp +FN:22,_ZN5project5trace15TracerSingleton11get_backendEv +FN:17,_ZN5project5trace15TracerSingleton11set_backendENSt3__110unique_ptrINS0_17TracerBackendBaseENS2_14default_deleteIS4_EEEE +FN:11,_ZN5project5trace15TracerSingleton3getEv +FN:7,_ZN5project5trace15TracerSingletonC2Ev +FNDA:0,_ZN5project5trace15TracerSingleton11get_backendEv +FNDA:0,_ZN5project5trace15TracerSingleton11set_backendENSt3__110unique_ptrINS0_17TracerBackendBaseENS2_14default_deleteIS4_EEEE +FNDA:0,_ZN5project5trace15TracerSingleton3getEv +FNDA:0,_ZN5project5trace15TracerSingletonC2Ev +FNF:4 +FNH:0 +BRDA:13,0,0,- +BRDA:13,0,1,- +BRDA:13,0,2,- +BRDA:13,0,3,- +BRDA:13,0,4,- +BRDA:13,0,5,- +BRDA:13,0,6,- +BRDA:13,0,7,- +BRF:8 +BRH:0 +DA:7,0 +DA:9,0 +DA:11,0 +DA:13,0 +DA:14,0 +DA:17,0 +DA:19,0 +DA:20,0 +DA:22,0 +DA:24,0 +LH:0 +LF:10 +end_of_record diff --git a/tests/lcov/merge/b.info b/tests/lcov/merge/b.info new file mode 100644 index 00000000..e6009d99 --- /dev/null +++ b/tests/lcov/merge/b.info @@ -0,0 +1,64 @@ +#intersect defn +TN: +SF:a.cpp +DA:1,1 +DA:2,3 +DA:3,1 +DA:5,5 +LF:3 +LH:2 +FN:1,2,fcn +FN:3,3,NotCommon +FN:5,2,fcn2 +FNF:3 +FNH:3 +FNDA:1,fcn +FNDA:1,NotCommon +FNDA:5,fcn2 + +# common branch +BRDA:1,2,0,1 +BRDA:1,2,1,1 +BRF:2 +BRH:2 + +# common +MCDC:1,2,t,0,0,0 +MCDC:1,2,f,0,0,0 +MCDC:1,2,t,0,1,0 +MCDC:1,2,f,1,1,0 +MCF:4 +MCH:1 +end_of_record + +#file in B and not A +SF:b.cpp +DA:1,1 +DA:2,3 +DA:3,0 +LF:3 +LH:2 +FN:1,2,fcn +FN:1,2,alias +FN:3,3,fcn2 +FNF:2 +FNH:2 +FNDA:1,fcn +FNDA:2,alias + +BRDA:1,1,0,1 +BRDA:1,1,1,1 +BRDA:1,1,2,- +BRDA:1,2,0,1 +BRDA:1,2,1,1 +BRF:5 +BRH:4 + +MCDC:1,2,t,0,0,0 +MCDC:1,2,f,0,1,0 +MCDC:1,2,t,0,1,0 +MCDC:1,2,f,1,1,0 +MCF:4 +MCH:2 + +end_of_record diff --git a/tests/lcov/merge/b_subtract_a.gold b/tests/lcov/merge/b_subtract_a.gold new file mode 100644 index 00000000..c6178ecb --- /dev/null +++ b/tests/lcov/merge/b_subtract_a.gold @@ -0,0 +1,40 @@ +TN: +SF:a.cpp +FNL:0,3,3 +FNA:0,1,NotCommon +FNL:1,5,2 +FNA:1,5,fcn2 +FNF:2 +FNH:2 +DA:5,5 +LF:1 +LH:1 +end_of_record +TN: +SF:b.cpp +FNL:0,1,2 +FNA:0,2,alias +FNA:0,1,fcn +FNL:1,3,3 +FNA:1,0,fcn2 +FNF:2 +FNH:1 +BRDA:1,1,0,1 +BRDA:1,1,1,1 +BRDA:1,1,2,- +BRDA:1,2,0,1 +BRDA:1,2,1,1 +BRF:5 +BRH:4 +MCDC:1,2,t,0,0,0 +MCDC:1,2,f,0,0,0 +MCDC:1,2,t,0,1,0 +MCDC:1,2,f,1,1,0 +MCF:4 +MCH:1 +DA:1,1 +DA:2,3 +DA:3,0 +LF:3 +LH:2 +end_of_record diff --git a/tests/lcov/merge/functionBug_1.dat b/tests/lcov/merge/functionBug_1.dat new file mode 100644 index 00000000..aeee3e4d --- /dev/null +++ b/tests/lcov/merge/functionBug_1.dat @@ -0,0 +1,24 @@ +TN: +SF: my_file.cpp +FN:9,25,is_within_phase_offset_tolerance(Ouster1DriverConfig const&, std::__1::chrono::duration >, std::__1::chrono::duration >) +FNDA:0,is_within_phase_offset_tolerance(Ouster1DriverConfig const&, std::__1::chrono::duration >, std::__1::chrono::duration >) +FNF:1 +FNH:0 +BRDA:12,0,0,- +BRDA:12,0,1,- +BRDA:25,0,0,- +BRDA:25,0,1,- +BRDA:25,0,2,- +BRDA:25,0,3,- +BRF:6 +BRH:0 +DA:12,0 +DA:15,0 +DA:18,0 +DA:22,0 +DA:23,0 +DA:24,0 +DA:25,0 +LF:7 +LH:0 +end_of_record diff --git a/tests/lcov/merge/functionBug_2.dat b/tests/lcov/merge/functionBug_2.dat new file mode 100644 index 00000000..1bb89cd8 --- /dev/null +++ b/tests/lcov/merge/functionBug_2.dat @@ -0,0 +1,28 @@ +TN: +SF: my_file.cpp +FN:11,26,is_within_phase_offset_tolerance(Ouster1DriverConfig const&, std::chrono::duration >, std::chrono::duration >) +FNDA:1885,is_within_phase_offset_tolerance(Ouster1DriverConfig const&, std::chrono::duration >, std::chrono::duration >) +FNF:1 +FNH:1 +BRDA:12,0,0,1 +BRDA:12,0,1,1884 +BRDA:25,0,0,248 +BRDA:25,0,1,1636 +BRDA:25,1,2,210 +BRDA:25,1,3,1426 +BRF:6 +BRH:6 +DA:12,1885 +DA:13,1 +DA:15,1 +DA:18,1884 +DA:20,1884 +DA:21,1884 +DA:22,1884 +DA:23,1884 +DA:24,1884 +DA:25,1884 +DA:26,1885 +LF:11 +LH:11 +end_of_record diff --git a/tests/lcov/merge/intersect.gold b/tests/lcov/merge/intersect.gold new file mode 100644 index 00000000..f6525ad0 --- /dev/null +++ b/tests/lcov/merge/intersect.gold @@ -0,0 +1,22 @@ +TN: +SF:a.cpp +FNL:0,1,2 +FNA:0,1,fcn +FNF:1 +FNH:1 +BRDA:1,2,0,2 +BRDA:1,2,1,1 +BRF:2 +BRH:2 +MCDC:1,2,t,0,0,0 +MCDC:1,2,f,0,0,0 +MCDC:1,2,t,0,1,0 +MCDC:1,2,f,1,1,0 +MCF:4 +MCH:1 +DA:1,2 +DA:2,3 +DA:3,1 +LF:3 +LH:3 +end_of_record diff --git a/tests/lcov/merge/mcdc.dat b/tests/lcov/merge/mcdc.dat new file mode 100644 index 00000000..b19a14f7 --- /dev/null +++ b/tests/lcov/merge/mcdc.dat @@ -0,0 +1,52 @@ +TN: +SF:a.cpp +DA:1,1 +#common line: my count is zero and yours is nonzero +DA:2,0 +DA:3,0 +DA:4,1 +DA:10,0 +DA:11,0 +DA:12,0 +LF:7 +LH:2 +FN:1,2,fcn +FN:1,2,alias +FN:3,3,noCommonAlias +FN:11,11,onlyA +FNF:4 +FNH:3 +# my count is zero yours is nonzero +FNDA:0,fcn +FNDA:2,alias +FNDA:0,onlyA +FNDA:1,noCommonAlias + +BRDA:1,1,0,1 +BRDA:1,1,1,1 +BRDA:1,1,2,- +BRDA:1,2,0,1 +# common branch expr count zero in my, nonzero in you +BRDA:1,2,1,0 + +# branch in A only +BRDA:11,0,0,0 +BRDA:11,0,1,0 + +BRF:5 +BRH:4 + +# common +MCDC:1,2,t,0,0,0 +MCDC:1,2,f,0,0,0 +MCDC:1,2,t,0,1,0 +MCDC:1,2,f,0,1,0 + +# on line which has no coverpoint (so synthesize one) +MCDC:6,1,t,0,0,0 +MCDC:6,1,f,0,0,0 + +MCF:6 +MCH:0 + +end_of_record diff --git a/tests/lcov/merge/merge.sh b/tests/lcov/merge/merge.sh new file mode 100755 index 00000000..53bf5651 --- /dev/null +++ b/tests/lcov/merge/merge.sh @@ -0,0 +1,195 @@ +#!/bin/bash +# test lcov set operations + +set +x + +source ../../common.tst + +rm -f *.txt* *.json dumper* intersect*.info gen.info func.info inconsistent.info diff* *.log +rm -rf cover_db + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +if ! type ${CXX} >/dev/null 2>&1 ; then + echo "Missing tool: ${CXX}" >&2 + exit 2 +fi + +LCOV_OPTS="--branch $PARALLEL $PROFILE --mcdc-coverage" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" + # and filter exception branches to avoid spurious differences for old compiler + FILTER='--filter branch' +fi + +status=0 +# note that faked data is not consistent - but just ignoring the issue for now +$COVER $LCOV_TOOL $LCOV_OPTS -o intersect.info a.info --intersect b.info --ignore inconsistent +if [ 0 != $? ] ; then + echo "Error: unexpected error code from intersect" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +$COVER $LCOV_TOOL $LCOV_OPTS -o intersect_2.info b.info --intersect a.info --ignore inconsistent +if [ 0 != $? ] ; then + echo "Error: unexpected error code from intersect" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +diff intersect.info intersect_2.info +if [ 0 != $? ] ; then + echo "Error: expected reflexive but not" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +diff intersect.info intersect.gold +if [ 0 != $? ] ; then + echo "Error: unexpected mismatch: intersect.gold" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +$COVER $LCOV_TOOL $LCOV_OPTS -o diff.info a.info --subtract b.info --ignore inconsistent +if [ 0 != $? ] ; then + echo "Error: unexpected error code from subtract" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +diff diff.info a_subtract_b.gold +if [ 0 != $? ] ; then + echo "Error: unexpected mismatch: a_subtract_b.gold" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -o diff2.info b.info --subtract a.info --ignore inconsistent +if [ 0 != $? ] ; then + echo "Error: unexpected error code from subtract 2" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +diff diff2.info b_subtract_a.gold +if [ 0 != $? ] ; then + echo "Error: unexpected mismatch: b_subtract_a.gold" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + + +# test some error messages... +$COVER $LCOV_TOOL $LCOV_OPTS -o x.info 'y.?info' --intersect a.info --ignore inconsistent +if [ 0 == $? ] ; then + echo "Error: expected error but did not see one" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +$COVER $LCOV_TOOL $LCOV_OPTS -o x.info a.info --intersect 'z.?info' --ignore inconsistent +if [ 0 == $? ] ; then + echo "Error: expected error but did not see one" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# test line coverpoint generation +$COVER $LCOV_TOOL $LCOV_OPTS -o gen.info -a mcdc.dat --ignore inconsistent +if [ 0 != $? ] ; then + echo "Error: MC/DC DA gen failed" + + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +for count in 'DA:6,0' 'LF:8' 'LH:2' ; do + grep $count gen.info + if [ 0 != $? ] ; then + echo "Error: didn't find expected count '$count' in MC/DC gen" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +done + + +$COVER $LCOV_TOOL $LCOV_OPTS -o func.info -a functionBug_1.dat -a functionBug_2.dat --ignore inconsistent +if [ 0 != $? ] ; then + echo "Error: function merge failed" + + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +for count in 'FNF:2' 'FNH:2' ; do + grep $count func.info + if [ 0 != $? ] ; then + echo "Error: didn't find expected count '$count' in function merge" + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +done + +$COVER $LCOV_TOOL $LCOV_OPTS -o inconsistent.info -a a.dat -a b.dat --ignore inconsistent --msg-log inconsistent.log +if [ 0 != $? ] ; then + echo "Error: function merge2 failed" + + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi +grep -E "duplicate function .+ starts on line .+ but previous definition" inconsistent.log +if [ 0 != $? ] ; then + echo "Error: didn't find definition message" + + status=1 + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +if [ 0 == $status ] ; then + echo "Tests passed" +else + echo "Tests failed" +fi + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi + +exit $status diff --git a/tests/lcov/misc/help.sh b/tests/lcov/misc/help.sh index d34254cb..29ce54d4 100755 --- a/tests/lcov/misc/help.sh +++ b/tests/lcov/misc/help.sh @@ -5,29 +5,31 @@ # Test lcov --help # +source ../../common.tst + STDOUT=help_stdout.log STDERR=help_stderr.log -$LCOV --help >${STDOUT} 2>${STDERR} +$LCOV --help 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${STDERR}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi exit 0 diff --git a/tests/lcov/misc/version.sh b/tests/lcov/misc/version.sh index 1b7c9920..2cd2494d 100755 --- a/tests/lcov/misc/version.sh +++ b/tests/lcov/misc/version.sh @@ -5,29 +5,31 @@ # Test lcov --version # +source ../../common.tst + STDOUT=version_stdout.log STDERR=version_stderr.log -$LCOV --version >${STDOUT} 2>${STDERR} +$LCOV --version 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${STDERR}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi exit 0 diff --git a/tests/lcov/multiple/Makefile b/tests/lcov/multiple/Makefile new file mode 100644 index 00000000..11a15354 --- /dev/null +++ b/tests/lcov/multiple/Makefile @@ -0,0 +1,6 @@ +include ../../common.mak + +TESTS := multiple.sh + +clean: + $(shell ./multiple.sh --clean) diff --git a/tests/lcov/multiple/multiple.sh b/tests/lcov/multiple/multiple.sh new file mode 100755 index 00000000..8d5d0549 --- /dev/null +++ b/tests/lcov/multiple/multiple.sh @@ -0,0 +1,122 @@ +#!/bin/bash +set +x + +source ../../common.tst + +LCOV_OPTS="$PARALLEL $PROFILE" +# gcc/4.8.5 (and possibly other old versions) generate inconsistent line/function data +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -lt 5 ] ; then + IGNORE="--ignore inconsistent" +fi +NO_INITIAL_CAPTURE=0 +if [[ "${VER[0]}" -gt 4 && "${VER[0]}" -lt 7 ]] ; then + # no data generated by initial capture + IGNORE_EMPTY="--ignore empty" + NO_INITIAL_CAPTURE=1 +fi + +rm -rf rundir + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +mkdir -p rundir +cd rundir + +rm -Rf a b out +mkdir a b + +echo 'int a (int x) { return x + 1; }' > a/a.c +echo 'int b (int x) { return x + 2;}' > b/b.c + +( cd a ; ${CC} -c --coverage a.c -o a.o ) +( cd b ; ${CC} -c --coverage b.c -o b.o ) + +if [ 1 == $NO_INITIAL_CAPTURE ] ; then + # all test test use --initial + echo 'all tests skipped' + exit 0 +fi + +$COVER $LCOV_TOOL -o out.info --capture --initial --no-external -d a -d b +if [ 0 != $? ] ; then + echo "Error: unexpected error code from lcov --initial" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +COUNT=`grep -c SF: out.info` +if [ 2 != $COUNT ] ; then + echo "Error: expected COUNT==2, found $COUNT" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +$COVER $GENINFO_TOOL -o out2.info --initial --no-external a b +if [ 0 != $? ] ; then + echo "Error: unexpected error code from geninfo --initial" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +diff out.info out2.info +if [ 0 != $? ] ; then + echo "Error: expected identical geninfo output" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi +fi + +# old version of gcc doesn't encode path into .gcno file +# so the case-insensitive compare is not required. +IFS='.' read -r -a VER <<< `${CC} -dumpversion` +if [ "${VER[0]}" -ge 9 ] ; then + rm -rf B + mv b B + + $COVER $GENINFO_TOOL -o out3.info --initial --no-external a B + if [ 0 != $? ] ; then + echo "Error: unexpected error code from geninfo" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + COUNT=`grep -c SF: out3.info` + if [ 1 != $COUNT ] ; then + echo "Error: expected COUNT==1, found $COUNT" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + + # don't look for exclusions: our filesystem isn't case-insensitive + # and we will see a 'source' error + $COVER $GENINFO_TOOL -o out4.info --initial --no-external a B --rc case_insensitive=1 --no-markers + if [ 0 != $? ] ; then + echo "Error: expected error code from geninfo insensitive" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi + diff out4.info out.info + if [ 0 != $? ] ; then + echo "Error: expected identical case-insensitive output, found $COUNT" + if [ $KEEP_GOING == 0 ] ; then + exit 1 + fi + fi +fi + + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover +fi diff --git a/tests/lcov/summary/Makefile b/tests/lcov/summary/Makefile index 473bb251..7fcd3c7e 100644 --- a/tests/lcov/summary/Makefile +++ b/tests/lcov/summary/Makefile @@ -1,7 +1,11 @@ include ../../common.mak -TESTS := zero.sh full.sh target.sh part1.sh part2.sh concatenated.sh \ - concatenated2.sh +# disabling some old tests because generated data is inconsistent +# (line/branch/function hit/miss stats do not match). +# Those tests have probably outlived their usefulness - so eliminating for now +# rather than enhancing the generation to become consistent +TESTS := zero.sh full.sh +DISABLED := target.sh part1.sh part2.sh concatenated.sh concatenated2.sh clean: rm -f *.info *.log diff --git a/tests/lcov/summary/concatenated.sh b/tests/lcov/summary/concatenated.sh index 64dcd777..353262ec 100755 --- a/tests/lcov/summary/concatenated.sh +++ b/tests/lcov/summary/concatenated.sh @@ -6,31 +6,59 @@ # files target+target=target # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + STDOUT=summary_concatenated_stdout.log STDERR=summary_concatenated_stderr.log INFO=concatenated.info cat "${TARGETINFO}" "${TARGETINFO}" >"${INFO}" -$LCOV --summary "${INFO}" >${STDOUT} 2>${STDERR} +# generated data is not consistent -ingore for now +$LCOV --summary "${INFO}" --ignore inconsistent,inconsistent 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${STDERR}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi # Check counts in output diff --git a/tests/lcov/summary/concatenated2.sh b/tests/lcov/summary/concatenated2.sh index 482bf43d..f270c78d 100755 --- a/tests/lcov/summary/concatenated2.sh +++ b/tests/lcov/summary/concatenated2.sh @@ -6,31 +6,58 @@ # files part1+part2=target # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + STDOUT=summary_concatenated2_stdout.log STDERR=summary_concatenated2_stderr.log INFO=concatenated2.info cat $PART1INFO $PART2INFO >$INFO -$LCOV --summary "${INFO}" >${STDOUT} 2>${STDERR} +$LCOV --summary "${INFO}" 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${STDERR}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi # Check counts in output diff --git a/tests/lcov/summary/full.sh b/tests/lcov/summary/full.sh index a7d1a2d8..a0579483 100755 --- a/tests/lcov/summary/full.sh +++ b/tests/lcov/summary/full.sh @@ -5,29 +5,56 @@ # Check lcov --summary output for info files containing 100% coverage rates # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + STDOUT=summary_full_stdout.log STDERR=summary_full_stderr.log -$LCOV --summary "${FULLINFO}" >${STDOUT} 2>${STDERR} +$LCOV --summary "${FULLINFO}" 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${FILTERED}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi # Check counts in output diff --git a/tests/lcov/summary/part1.sh b/tests/lcov/summary/part1.sh index 7e1a55f0..e8a70a89 100755 --- a/tests/lcov/summary/part1.sh +++ b/tests/lcov/summary/part1.sh @@ -5,29 +5,56 @@ # Check lcov --summary output for partial coverage file # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + STDOUT=summary_part1_stdout.log STDERR=summary_part1_stderr.log -$LCOV --summary "${PART1INFO}" >${STDOUT} 2>${STDERR} +$LCOV --summary "${PART1INFO}" 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${STDERR}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi # Check counts in output diff --git a/tests/lcov/summary/part2.sh b/tests/lcov/summary/part2.sh index 5258128f..3fbd7cd7 100755 --- a/tests/lcov/summary/part2.sh +++ b/tests/lcov/summary/part2.sh @@ -5,29 +5,56 @@ # Check lcov --summary output for partial coverage file # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + STDOUT=summary_part2_stdout.log STDERR=summary_part2_stderr.log -$LCOV --summary "${PART2INFO}" >${STDOUT} 2>${STDERR} +$LCOV --summary "${PART2INFO}" 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${STDERR}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi # Check counts in output diff --git a/tests/lcov/summary/target.sh b/tests/lcov/summary/target.sh index e69b8efa..a8e8fa7f 100755 --- a/tests/lcov/summary/target.sh +++ b/tests/lcov/summary/target.sh @@ -6,29 +6,56 @@ # mkinfo profile. # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + STDOUT=summary_target_stdout.log STDERR=summary_target_stderr.log -$LCOV --summary "${TARGETINFO}" >${STDOUT} 2>${STDERR} +$LCOV --summary "${TARGETINFO}" 2> >(grep -v Devel::Cover: > ${STDERR}) >${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${STDERR}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi # Check counts in output diff --git a/tests/lcov/summary/zero.sh b/tests/lcov/summary/zero.sh index 12e8b745..09f75d46 100755 --- a/tests/lcov/summary/zero.sh +++ b/tests/lcov/summary/zero.sh @@ -5,29 +5,56 @@ # Check lcov --summary output for zero coverage file # +KEEP_GOING=0 +while [ $# -gt 0 ] ; do + + OPT=$1 + case $OPT in + + --coverage ) + shift + COVER_DB=$1 + shift + + COVER="perl -MDevel::Cover=-db,${COVER_DB},-coverage,statement,branch,condition,subroutine " + KEEP_GOING=1 + + ;; + + -v | --verbose ) + set -x + shift + ;; + + * ) + break + ;; + esac +done + STDOUT=summary_zero_stdout.log STDERR=summary_zero_stderr.log -$LCOV --summary "${ZEROINFO}" >${STDOUT} 2>${STDERR} +$LCOV --summary "${ZEROINFO}" 2> >(grep -v Devel::Cover: 1>&1 > $STDERR) > ${STDOUT} RC=$? cat "${STDOUT}" "${STDERR}" # Exit code must be zero -if [[ $RC -ne 0 ]] ; then - echo "Error: Non-zero lcov exit code $RC" - exit 1 +if [[ $RC -ne 0 && $KEEP_GOING != 1 ]] ; then + echo "Error: Non-zero lcov exit code $RC" + exit 1 fi # There must be output on stdout if [[ ! -s "${STDOUT}" ]] ; then - echo "Error: Missing output on standard output" - exit 1 + echo "Error: Missing output on standard output" + exit 1 fi # There must not be any output on stderr -if [[ -s "${STDERR}" ]] ; then - echo "Error: Unexpected output on standard error" - exit 1 +if [[ -s "${STDERR}" && $COVER == '' ]] ; then + echo "Error: Unexpected output on standard error" + exit 1 fi # Check counts in output diff --git a/tests/lcovrc b/tests/lcovrc index 5005f637..db4b335f 100644 --- a/tests/lcovrc +++ b/tests/lcovrc @@ -1,4 +1,8 @@ # lcovrc file used during tests -lcov_function_coverage = 1 -lcov_branch_coverage = 1 +function_coverage = 1 +branch_coverage = 1 + +# disable feature for backward compatibility +# - so I don't have to modify legacy testcases (some new tests _do_ check the feature) +derive_function_end_line = 0 diff --git a/tests/llvm2lcov/Makefile b/tests/llvm2lcov/Makefile new file mode 100644 index 00000000..df6b5065 --- /dev/null +++ b/tests/llvm2lcov/Makefile @@ -0,0 +1,6 @@ +include ../common.mak + +TESTS := llvm2lcov.sh + +clean: + $(shell ./llvm2lcov.sh --clean) diff --git a/tests/llvm2lcov/llvm2lcov.sh b/tests/llvm2lcov/llvm2lcov.sh new file mode 100755 index 00000000..2de54857 --- /dev/null +++ b/tests/llvm2lcov/llvm2lcov.sh @@ -0,0 +1,346 @@ +#!/bin/bash +set +x + +if [[ "x" == ${LCOV_HOME}x ]] ; then + if [ -f ../../bin/lcov ] ; then + LCOV_HOME=../.. + fi +fi + +source ../common.tst + +rm -rf test *.profraw *.profdata *.json *.info report + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +LCOV_OPTS="--branch-coverage $PARALLEL $PROFILE" + +clang++ -fprofile-instr-generate -fcoverage-mapping -fcoverage-mcdc -o test main.cpp +if [ $? != 0 ] ; then + echo "clang++ exec failed" + exit 1 +fi +./test +llvm-profdata merge --sparse *.profraw -o test.profdata +if [ $? != 0 ] ; then + echo "llvm-profdata failed" + exit 1 +fi +llvm-cov export -format=text -instr-profile=test.profdata ./test > test.json +if [ $? != 0 ] ; then + echo "llvm-cov failed" + exit 1 +fi + +# disable function, branch and mcdc coverage +$COVER $LLVM2LCOV_TOOL --rc function_coverage=0 -o test.info test.json +if [ $? != 0 ] ; then + echo "llvm2lcov failed" + exit 1 +fi + +# disable mcdc coverage +$COVER $LLVM2LCOV_TOOL --branch -o test.info test.json +if [ $? != 0 ] ; then + echo "llvm2lcov failed" + exit 1 +fi + +# disable branch coverage +$COVER $LLVM2LCOV_TOOL --mcdc -o test.info test.json +if [ $? != 0 ] ; then + echo "llvm2lcov failed" + exit 1 +fi + +$COVER $LLVM2LCOV_TOOL --branch --mcdc -o test.info test.json +if [ $? != 0 ] ; then + echo "llvm2lcov failed" + exit 1 +fi + +# should be valid data to generate HTML +$COVER $GENHTML_TOOL --flat --branch --mcdc -o report test.info +if [ $? != 0 ] ; then + echo "genhtml failed" + exit 1 +fi + +# run again, excluding 'main.cpp' +$COVER $LLVM2LCOV_TOOL --branch --mcdc -o test.excl.info test.json --exclude '*/main.cpp' +if [ $? != 0 ] ; then + echo "llvm2lcov --exclude failed" + exit 1 +fi + +# should be 3 functions +N=`grep -c "FNA:" test.info` +if [ 3 != "$N" ] ; then + echo "wrong number of functions" + exit 1 +fi + +# look for expected location and function hit counts: +for d in \ + 'FNL:[0-9],20,25' \ + 'FNA:[0-9],2,_Z3fooc' \ + 'FNL:[0-9],27,72' \ + 'FNA:[0-9],1,main' \ + 'FNL:[0-9],2,4' \ + 'FNA:[0-9],1,main.cpp:_ZL3barv' \ + ; do + grep -E $d test.info + if [ 0 != $? ] ; then + echo "did not find expected function data $d" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# lines main.cpp:(31-42) should be hit +for line in $(seq 31 42) ; \ + do \ + grep -E "DA:$line,1" test.info + if [ 0 != $? ] ; then + echo "did not find expected hit on function line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# lines main.cpp:14, 45-48 should be 'not hit +for line in 14 45 46 47 48 ; do + grep "DA:$line,0" test.info + if [ 0 != $? ] ; then + echo "did not find expected zero hit on function line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# lines main.cpp:30, 43, 51, 65 should be 'not instrumented +for line in 30 43 51 65 ; do + grep "DA:$line" test.info + if [ 0 == $? ] ; then + echo "find unexpected instrumented line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# check lines total number +grep -E "LF:55$" test.info +if [ $? != 0 ] ; then + echo "unexpected total number of lines" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# check lines hit number +grep -E "LH:50$" test.info +if [ $? != 0 ] ; then + echo "unexpected hit number of lines" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# check that branches have right expressions +line=41 +N=`grep -c "BRDA:$line," test.info` +if [ 2 != "$N" ] ; then + echo "did not find expected branches on line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "BRDA:$line,0,(i <= 0) == True,1" test.info +if [ 0 != $? ] ; then + echo "did not find expected 'BRDA' entry on line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep "BRDA:$line,0,(i <= 0) == False,0" test.info +if [ 0 != $? ] ; then + echo "did not find expected 'BRDA' entry on line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# check that branches defined inside macros are instrumented right +# lines main.cpp:33, 36, 39, 44 should contain branches defined inside macros +for line in 33 36 39 44 ; do + grep -E "BRDA:$line," test.info + if [ 0 != $? ] ; then + echo "did not find expected branches on line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# check branches total number +grep -E "BRF:56$" test.info +if [ $? != 0 ] ; then + echo "unexpected total number of branches" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# check branches hit number +grep -E "BRH:35$" test.info +if [ $? != 0 ] ; then + echo "unexpected hit number of branches" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# LLVM/21 and later generate JSON data files in the new format. +# So, these files should be processed differently. +IFS='.' read -r -a LLVM_VER <<< `clang -dumpversion` +if [ "${LLVM_VER[0]}" -ge 21 ] ; then + # line main.cpp:70 should contain 2 groups of MC/DC entries + line=70 + MCDC_1=`grep -c "MCDC:$line,2," test.info` + MCDC_2=`grep -c "MCDC:$line,3," test.info` + if [ 4 != "$MCDC_1" ] || [ 6 != "$MCDC_2" ] ; then + echo "did not find expected MC/DC entries on line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # check that MC/DC entries have right + N=`grep -c "MCDC:40,2,[tf],0,1,'i <= 0' in 'BOOL(i > 0) || i <= 0)'" test.info` + if [ 2 != "$N" ] ; then + echo "did not find expected MC/DC entries on line 40" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # check MC/DC defined in macros + grep -E "MCDC:" test.excl.info + if [ 0 == $? ] ; then + echo "find unexpected MC/DC" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + for line in 33 36 39 ; do + grep -E "MCDC:$line,[23],[tf]" test.info + if [ 0 != $? ] ; then + echo "did not find expected MC/DC on line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + # check MC/DC total number + grep -E "MCF:40$" test.info + if [ $? != 0 ] ; then + echo "unexpected total number of MC/DC entries" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # check MC/DC hit number + grep -E "MCH:10$" test.info + if [ $? != 0 ] ; then + echo "unexpected hit number of MC/DC entries" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +else + # line main.cpp:70 should contain 2 groups of MC/DC entries + line=70 + MCDC_1=`grep -c "MCDC:$line,2," test.info` + MCDC_2=`grep -c "MCDC:$line,3," test.info` + if [ 4 != "$MCDC_1" ] || [ 6 != "$MCDC_2" ] ; then + echo "did not find expected MC/DC entries on line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # check that MC/DC entries have right + N=`grep -c "MCDC:63,2,[tf],1,1,'i < 1' in 'a\[i\] && i < 1'" test.info` + if [ 2 != "$N" ] ; then + echo "did not find expected MC/DC entries on line 63" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # check MC/DC defined in macros + grep -E "MCDC:6,3,[tf]" test.excl.info + if [ 0 != $? ] ; then + echo "did not find expected MC/DC" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + for m in \ + "MCDC:6,2,[tf]" \ + "MCDC:15,2,[tf]" \ + ; do + grep -E $m test.info + if [ 0 != $? ] ; then + echo "did not find expected MC/DC" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + done + # check MC/DC total number + grep -E "MCF:34$" test.info + if [ $? != 0 ] ; then + echo "unexpected total number of MC/DC entries" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + # check MC/DC hit number + grep -E "MCH:10$" test.info + if [ $? != 0 ] ; then + echo "unexpected hit number of MC/DC entries" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +fi + +# generate help message +$COVER ${EXEC_COVER} $LLVM2LCOV_TOOL --help +if [ 0 != $? ] ; then + echo "llvm2lcov help failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# incorrect option +$COVER ${EXEC_COVER} $LLVM2LCOV_TOOL --unsupported +$COVER $LLVM2LCOV_TOOL --unsupported -o test.info test.json +if [ 0 == $? ] ; then + echo "did not see incorrect option" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover ${COVER_DB} + $PERL2LCOV_TOOL -o ${COVER_DB}/perlcov.info ${COVER_DB} --ignore-errors inconsistent + $GENHTML_TOOL -o ${COVER_DB}/report ${COVER_DB}/perlcov.info --flat --show-navigation --branch --ignore-errors inconsistent +fi diff --git a/tests/llvm2lcov/main.cpp b/tests/llvm2lcov/main.cpp new file mode 100644 index 00000000..0bc287cd --- /dev/null +++ b/tests/llvm2lcov/main.cpp @@ -0,0 +1,72 @@ +#include "test.h" + +#define macro_1(expr) \ + do \ + { \ + } while (expr) + +#define macro_2(i, expr1, expr2) \ + do { \ + ++(i); \ + if (!(expr1)) \ + ++(i); \ + if (!(expr2)) \ + ++(i); \ + } while((expr1) && (expr2)); + +#define macro_3(expr) macro_1((expr)) + +void foo(char a) +{ + if (a) + /* comment + + */ return; +} + +int main() { + int a[] = {3, 12}; /* comment */ + int i; /* comment + comment + comment */ i = 0; + macro_1(i < 0); + macro_1 ( + BOOL(i < 0 && i % 2 == 0)) + ; + macro_2(i, i < 10, i > 0); + i = 0; + macro_3(i < 0); + macro_4(i < 0 || i > 0 && i < 10); + if (BOOL(i > 0) || + i <= 0) + ; + + if (BOOL(i > 0) + && BOOL(i < 0)) + { + ; + } + + for (; i < sizeof(a) / sizeof(*a); ++i) + + { + if ((a[i] % 4 + == 0) + && + (a[i] % 3 + == 0)) + { + ; + } + if (a[i] < 10) + ; + foo(a[i] && i < 1); + } + /* i == 2 + */ + do { + --i; + } while (i); + while(i < 2 && i < 3 && i < 4) { ++i; } for(i = 0; i < 5 && i < 4; ++i) { (void)i; } + return 0; +} diff --git a/tests/llvm2lcov/test.h b/tests/llvm2lcov/test.h new file mode 100644 index 00000000..851617f5 --- /dev/null +++ b/tests/llvm2lcov/test.h @@ -0,0 +1,8 @@ +static inline void bar() +{ + return; +} + +#define macro_4(expr) ((expr) ? ((void) 0) : bar()) + +#define BOOL(x) (!!(x)) diff --git a/tests/perl2lcov/Makefile b/tests/perl2lcov/Makefile new file mode 100644 index 00000000..ff58029a --- /dev/null +++ b/tests/perl2lcov/Makefile @@ -0,0 +1,6 @@ +include ../common.mak + +TESTS := perltest1.sh + +clean: + $(shell ./perltest1.sh --clean) diff --git a/tests/perl2lcov/example.pl b/tests/perl2lcov/example.pl new file mode 100644 index 00000000..3f8bd2b7 --- /dev/null +++ b/tests/perl2lcov/example.pl @@ -0,0 +1,55 @@ +# example used to test perl2lcov coverage data extract + +use strict; + +sub global1 { + print("called global1 function\n"); + if (exists($ENV{NO_SUCH_VARIABLE})) { + print("unexercised statement in un-hit branch\n"); + } +} + +package space1; +# LCOV_EXCL_START +sub packageFunc { + print("this is a function in space1 - not exercised\n"); +} +# LCOV_EXCL_STOP +sub packageFunc2 { + my $val = shift; + if (exists($ENV{NO_SUCH_VARIABLE}) && + ($ENV{NO_SUCH_VARIABLE} eq 'a' || + $ENV{NO_SUCH_VARIABLE} < 3)) { + print("unexercised statement in more complex conditional\n"); + } + print("packageFunc2 called\n"); +} + +package space2; + +sub packageFunc { + print("this is a function in space2 - not exercised\n"); +} + +sub packageFunc2 { + if (exists($ENV{NO_SUCH_VARIABLE}) && + ($ENV{NO_SUCH_VARIABLE} eq 'a' || + $ENV{NO_SUCH_VARIABLE} < 3)) { + print("unexercised statement in more complex conditional\n"); + } + print("packageFunc2 called\n"); +} + +package main; +# LCOV_EXCL_BR_START +print "simple perl testcase\n"; +global1(); + +space1::packageFunc2(1); + +space2::packageFunc(); +unless (@ARGV) { + print("no args so we entered the branch\n"); +} +exit 0; +# LCOV_EXCL_BR_STOP diff --git a/tests/perl2lcov/perltest1.sh b/tests/perl2lcov/perltest1.sh new file mode 100755 index 00000000..fed03221 --- /dev/null +++ b/tests/perl2lcov/perltest1.sh @@ -0,0 +1,205 @@ +#!/bin/bash +set +x + +if [[ "x" == ${LCOV_HOME}x ]] ; then + if [ -f ../../bin/lcov ] ; then + LCOV_HOME=../.. + fi +fi +source ../common.tst + +rm -rf *.xml *.dat *.info *.json cover_one perl2lcov_report cover_genhtml *.log + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +LCOV_OPTS="--branch-coverage $PARALLEL $PROFILE" + + +perl -MDevel::Cover=-db,cover_one,-coverage,statement,branch,condition,subroutine,-silent,1 example.pl +if [ 0 != $? ] ; then + echo "perl exec failed" + exit 1 +fi + +# error check: try to run perl2lcov before running 'cover': +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --output err.info --testname test1 ./cover_one 2>&1 | tee err.log +if [ 0 == ${PIPESTATUS[0} ] ; then + echo "expected to fail - but passed" + exit 1 +fi +grep "appears to be empty" err.log +if [ 0 != $? ] ; then + echo "expected error message not found" + exit 1 +fi + +cover cover_one -silent 1 + +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --output one.info --testname test1 ./cover_one +if [ 0 != $? ] ; then + echo "perl2lcov failed" + exit 1 +fi + +# did we generate the test name we expected +N=`grep -c TN: one.info` +if [ "$N" != '1' ] ; then + echo "wrong number of tests" + exit 1; +fi +T=`grep TN: one.info` +if [ "$T" != 'TN:test1' ] ; then + echo "wrong test name" + exit 1 +fi + +#should be 2 functions in namespace 1 and namespace 2 +for space in 'space1' 'space2' ; do + N=`grep FNA: one.info | grep -c $space::` + if [ 2 != "$N" ] ; then + echo "wrong number of functions in $space" + exit 1 + fi +done +# expect only one function in global namespace +# rather than looking for known index '4' for this function, would be better +# to look for the name - then find index from name, then find location from index +# but this is easier and testcase is simple. +G=`grep FNA: one.info | grep -v space` +if [ "$G" != 'FNA:4,1,global1' ] ; then + echo "wrong name/location for function in global namespace" + exit 1 +fi +DA=`grep -c -E '^DA:' one.info` +BR=`grep -c -E '^BRDA:' one.info` + +# do region exclusions work? +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --filter region --output region.info ./cover_one +if [ 0 != $? ] ; then + echo "perl2lcov failed" + exit 1 +fi +# how many lines now? +REGION_DA=`grep -c -E '^DA:' region.info` +REGION_BR=`grep -c -E '^BRDA:' region.info` +if [ $BR -lt $REGION_BR ] ; then + echo "wrong region branch count $BR -> $REGION_BR" + exit 1 +fi +if [ $DA -lt $REGION_DA ] ; then + echo "wrong region line count $DA -> $REGION_DA" + exit 1 +fi + +# how about just branch exclusion... +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --filter branch_region --output br_region.info ./cover_one +if [ 0 != $? ] ; then + echo "perl2lcov failed" + exit 1 +fi +# how many lines now? +BREGION_DA=`grep -c -E '^DA:' br_region.info` +BREGION_BR=`grep -c -E '^BRDA:' br_egion.info` +if [ $REGION_BR != $BREGION_BR ] ; then + echo "wrong branch region branch count $BR -> $BREGION_BR" + exit 1 +fi +if [ $DA != $BREGION_DA ] ; then + echo "wrong branch region line count $DA -> $BREGION_DA" + exit 1 +fi + + +# run again, collecting checksum.. +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --output checksum.info --testname testCheck ./cover_one --checksum +if [ 0 != $? ] ; then + echo "perl2lcov checksum failed" +fi + +# do we see the checksums we expect? +# expect to see checksum on each DA line.. +for l in `grep -E '^DA:' checksum.info` ; do + echo $l | grep -E 'DA:[0-9]+,[0-9]+,.+' + if [ 0 != $? ] ; then + echo "no checksum in '$l'" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + + +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL -o x.info --exclude example.pl ./cover_one +if [ 0 == $? ] ; then + echo "expected ERROR_EMPTY not found" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --exclude example.pl --ignore empty ./cover_one -o x.info +if [ 0 != $? ] ; then + echo "didn't ignore ERROR_EMPTY" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +if [ `test ! -z x.info` ] ; then + echo 'expected empty file - but not empty' + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --help +if [ 0 != $? ] ; then + echo "perl2lcov help failed" + exit 1 +fi + +# incorrect option +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --unsupported +if [ 0 == $? ] ; then + echo "did not see expected error" + exit 1 +fi + +# is the data generated by perl2lcov valid? +$COVER $LCOV_TOOL $LCOV_OPTS --summary one.info +if [ 0 != $? ] ; then + echo "lcov summary failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# now try running genhtml on the perl2lcov-generated .info file... +perl -MDevel::Cover=-db,cover_genhtml,-silent,1 $LCOV_HOME/bin/genhtml -o perl2lcov_report --flat --show-navigation one.info --branch --validate +if [ 0 != $? ] ; then + echo "genhtml failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +cover cover_genhtml -silent 1 + +# ignore inconsistency: line hit but no branch on line is hit +$COVER ${EXEC_COVER} $PERL2LCOV_TOOL --output genhtml.info --testname genhtml_test ./cover_genhtml --ignore inconsistent +if [ 0 != $? ] ; then + echo "perl2lcov genhtml" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +echo "Tests passed" + +if [ "x$COVER" != "x" ] && [ $LOCAL_COVERAGE == 1 ]; then + cover + $PERL2LCOV_TOOL -o ${COVER_DB}/perlcov.info ${COVER_DB} + $GENHTML_TOOL -o ${COVER_DB}/report ${COVER_DB}/perlcov.info --flat --show-navigation --branch +fi diff --git a/tests/py2lcov/Makefile b/tests/py2lcov/Makefile new file mode 100644 index 00000000..211ef94e --- /dev/null +++ b/tests/py2lcov/Makefile @@ -0,0 +1,6 @@ +include ../common.mak + +TESTS := py2lcov.sh + +clean: + $(shell ./py2lcov.sh --clean) diff --git a/tests/py2lcov/localmodule.py b/tests/py2lcov/localmodule.py new file mode 100644 index 00000000..ca40a998 --- /dev/null +++ b/tests/py2lcov/localmodule.py @@ -0,0 +1,12 @@ + +def enter(s, + a, b): + print("lcocalmodule::enter(%s)" % (s)) + # LCOV_EXCL_BR_START + if a: + print("this is a branch") + # LCOV_EXCL_BR_STOP + +def unusedFunc(): + print("not called"); + return 1; diff --git a/tests/py2lcov/py2lcov.sh b/tests/py2lcov/py2lcov.sh new file mode 100755 index 00000000..7b8aa434 --- /dev/null +++ b/tests/py2lcov/py2lcov.sh @@ -0,0 +1,409 @@ +#!/bin/bash +set +x + +if [[ "x" == ${LCOV_HOME}x ]] ; then + if [ -f ../../bin/lcov ] ; then + LCOV_HOME=../.. + fi +fi + +source ../common.tst + +rm -rf *.xml* *.dat *.info *.json __pycache__ help.txt *.pyc my_cache rpt1 rpt2 + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + +PY2LCOV_SCRIPT=${LCOV_HOME}/bin/py2lcov + +if [ ! -f $LCOV_HOME/scripts/getp4version ] ; then + # running test from lcov install + MD5_OPT=',--md5' +fi +# is this git or P4? +if [ 1 == "$IS_P4" ] ; then + VERSION="--version-script ${SCRIPT_DIR}/P4version.pm,--local-edit${MD5_OPT}" + ANNOTATE="--annotate-script ${SCRIPT_DIR}/p4annotate.pm,--cache,./my_cache" + DEPOT=",." +else + # this is git + VERSION="--version-script ${SCRIPT_DIR}/gitversion${MD5_OPT}" + ANNOTATE="--annotate-script ${SCRIPT_DIR}/gitblame.pm,--cache,my_cache" +fi + +if [ $IS_GIT == 0 ] && [ $IS_P4 == 0 ] ; then + VERSION= + ANNOTATE="$ANNOTATE --ignore annotate" +fi + +if [ ! -x $PY2LCOV_SCRIPT ] ; then + echo "missing py2lcov script - dying" + exit 1 +fi + +LCOV_OPTS="--branch-coverage $PARALLEL $PROFILE" + + +if [ '' != "${COVERAGE_COMMAND}" ] ; then + CMD=${COVERAGE_COMMAND} +else + CMD='coverage' + which $CMD + if [ 0 != $? ] ; then + CMD='python3-coverage' # ubuntu? + fi +fi +which $CMD +if [ 0 != $? ] ; then + echo "cannot find 'coverage' or 'python3-coverage'" + echo "unable to run py2lcov - please install python Coverage.py package" + exit 1 +fi + +# some corner cases: +COVERAGE_FILE=./functions.dat $CMD run --branch ./test.py -v -v +if [ 0 != $? ] ; then + echo "coverage functions failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +eval COVERAGE_COMMAND=$CMD ${PYCOV} ${PY2LCOV_TOOL} -o functions.info --cmd $CMD functions.dat $VERSION +if [ 0 != $? ] ; then + echo "py2lcov failed function example" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# lines test.py:10, 12 should be 'not hit +for line in 10 12 13 ; do + grep "DA:$line,0" functions.info + if [ 0 != $? ] ; then + echo "did not find expected zero hit on function line $line" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done +# look for expected location and function hit counts: +for d in \ + 'FN functions.info' \ + 'FNL:[0-9],10,12' \ + 'FNA:[0-9],0,unusedFunc' \ + 'FNL:[0-9],2,7' \ + 'FNA:[0-9],1,enter' \ + 'FNL:[0-9],10,18' \ + 'FNA:[0-9],0,main.localfunc' \ + 'FNL:[0-9],12,16' \ + 'FNA:[0-9],0,main.localfunc.nested1' \ + 'FNL:[0-9],13,14' \ + 'FNA:[0-9],0,main.localfunc.nested1.nested2' \ + 'FNL:[0-9],5,18' \ + ; do + grep -E $d functions.info + if [ 0 != $? ] ; then + echo "did not find expected function data $d" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +# should be valid data to generate HTML +$GENHTML_TOOL -o rpt1 $VERSION $ANNOTATE functions.info --validate +if [ 0 != $? ] ; then + echo "genhtml failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# legacy mode: run with intermediate XML file +COVERAGE_FILE=./functions.dat $CMD xml -o functions.xml +if [ 0 != $? ] ; then + echo "coverage xml failed" + exit 1 +fi + +eval ${PYCOV} ${PY2LCOV_TOOL} -i functions.xml -o functions2.info $VERSION +if [ 0 != $? ] ; then + echo "coverage extract XML failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# result should be identical: +diff functions.info functions2.info +if [ 0 != $? ] ; then + echo "XML vs direct failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# run again, generating checksum data... +eval ${PYCOV} ${PY2LCOV_TOOL} --cmd $CMD -o checksum.info functions.dat $VERSION --checksum +if [ 0 != $? ] ; then + echo "py2lcov failed function example" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# expect to see checksum on each DA line.. +for l in `grep -E '^DA:' checksum.info` ; do + echo $l | grep -E 'DA:[0-9]+,[0-9]+,.+' + if [ 0 != $? ] ; then + echo "no checksum in '$l'" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +done + +if [ $IS_GIT == 0 ] && [ $IS_P4 == 0 ] ; then + D= +else + D=$DEPOT +fi +# should be valid data to generate HTML +$GENHTML_TOOL -o rpt2 $VERSION$D $ANNOTATE functions.info checksum.info --validate +if [ 0 != $? ] ; then + echo "genhtml 2 failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# run without generating function data: +eval ${PYCOV} ${PY2LCOV_TOOL} functions.dat --cmd $CMD -o no_functions.info $VERSION --no-function +if [ 0 != $? ] ; then + echo "coverage no_functions failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +COUNT=`grep -c FNL: no_function.info` +if [ 0 != $COUNT ] ; then + echo "--no-function flag had no effect" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# run without extracting version +eval ${PYCOV} ${PY2LCOV_TOOL} functions.dat --cmd $CMD -o no_version.info +if [ 0 != $? ] ; then + echo "coverage no_functions failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +COUNT=`grep -c VER: no_version.info` +if [ 0 != $COUNT ] ; then + echo "lack of --version flag had no effect" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# test exclusion +eval ${PYCOV} ${PY2LCOV_TOOL} -o excl.info --cmd $CMD --exclude test.py functions.dat +if [ 0 != $? ] ; then + echo "coverage no_functions failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +grep -E 'SF:.*test.py' excl.info +if [ 0 == $? ] ; then + echo "exclude was ignored" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + + +# generate help message: +eval ${PYCOV} ${PY2LCOV_TOOL} --help 2>&1 | tee help.txt +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "help failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep 'usage: py2lcov ' help.txt +if [ 0 != $? ] ; then + echo "no help message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +if [ $IS_GIT == 1 ] || [ $IS_P4 == 1 ] ; then + + # some usage errors + eval ${PYCOV} ${PY2LCOV_TOOL} functions.dat -o paramErr.info --cmd $CMD ${VERSION},-x + if [ 0 == $? ] ; then + echo "coverage version did not see error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + + # run again with --keep-going flag - should generate same result as we see without version script + eval ${PYCOV} ${PY2LCOV_TOOL} functions.dat -o keepGoing.info --cmd $CMD ${VERSION},-x --keep-going --verbose + if [ 0 != $? ] ; then + echo "keepGoing version saw error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + diff no_version.info keepGoing.info + if [ 0 != $? ] ; then + echo "no_version vs keepGoing failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +fi + + +# usage error: +# can't run this unless we have a new enough 'coverage' version +# to support the --data-file input +if [[ "${PYCOV}" =~ "COVERAGE_FILE=" || "${PY2LCOV_TOOL}" =~ "COVERAGE_FILE=" ]] ; then + ${LCOV_HOME}/bin/py2lcov -o missing.info --cmd $CMD +else + eval ${PYCOV} ${PY2LCOV_TOOL} -o missing.info --cmd $CMD +fi +if [ 0 == $? ] ; then + echo "did not see error with missing input data" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# usage error: +eval ${PYCOV} ${PY2LCOV_TOOL} -o noFile.info run.dat y.xml --cmd $CMD +if [ 0 == $? ] ; then + echo "did not see error with missing input file" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# usage error: +eval ${PYCOV} ${PY2LCOV_TOOL} -o badArg.info --noSuchParam run_help.dat --cmd $CMD +if [ 0 == $? ] ; then + echo "did not see error with unsupported param" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# can't run this unless we have a new enough 'coverage' version +# to support the --data-file input +if [[ "${PYCOV}" =~ "COVERAGE_FILE=" || "${PY2LCOV_TOOL}" =~ "COVERAGE_FILE=" ]] ; then + # can't generate coverage report for this feature... + COVERAGE_FILE=functions.dat ${LCOV_HOME}/bin/py2lcov -o fromEnv.info --cmd $CMD +else + # get input from environment var: + eval COVERAGE_FILE=functions.dat ${PYCOV} ${PY2LCOV_TOOL} -o fromEnv.info --cmd $CMD +fi + +if [ 0 != $? ] ; then + echo "unable to get input file from env. var" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# result should be identical: +diff no_version.info fromEnv.info +if [ 0 != $? ] ; then + echo "--input vs from env differ" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# aggregate the files - as a syntax check +$COVER $LCOV_TOOL $LCOV_OPTS -o aggregate.info -a functions.info -a no_functions.info $VERSION --ignore inconsistent +if [ 0 != $? ] ; then + echo "lcov aggregate failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +# and the ones that don't have version info... +$COVER $LCOV_TOOL $LCOV_OPTS -o aggregate2.info -a no_version.info -a excl.info +if [ 0 != $? ] ; then + echo "lcov aggregate2 failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +#check that python filtering works as expected... +$COVER $LCOV_TOOL $LCOV_OPTS -o region.info -a no_version.info --filter region +if [ 0 != $? ] ; then + echo "lcov filter region failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +$COVER $LCOV_TOOL $LCOV_OPTS -o branch_region.info -a no_version.info --filter branch_region +if [ 0 != $? ] ; then + echo "lcov filter branch_region failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +DA=`grep -c -E '^DA:' no_version.info` +BR=`grep -c -E '^BRDA:' no_version.info` + +REGION_DA=`grep -c -E '^DA:' region.info` +REGION_BR=`grep -c -E '^BRDA:' region.info` + +BRANCH_REGION_DA=`grep -c -E '^DA:' branch_region.info` +BRANCH_REGION_BR=`grep -c -E '^BRDA:' branch_region.info` + +if [ "$REGION_BR" != "$BRANCH_REGION_BR" ] ; then + echo "wrong branch region branch count $REGION_BR -> $BRANCH_REGION_BR" + exit 1 +fi +if [ "$DA" != "$BRANCH_REGION_DA" ] ; then + echo "wrong branch region line count $DA -> $BRANCH_REGION_DA" + exit 1 +fi + +if [ "$BR" -le "$REGION_BR" ] ; then + echo "wrong region branch count $BR -> $REGION_BR" + exit 1 +fi +if [ "$DA" -le "$REGION_DA" ] ; then + echo "wrong region line count $DA -> $REGION_DA" + exit 1 +fi + + +echo "Tests passed" + +if [[ "x$COVER" != "x" && $LOCAL_COVERAGE == 1 ]] ; then + cover + ${LCOV_HOME}/bin/perl2lcov -o perlcov.info --testname py2lcov $VERSION ./cover_db + ${PY2LCOV_TOOL} -o pycov.info --testname py2lcov --cmd $CMD $VERSION ${PYCOV_DB} + ${GENHTML_TOOL} -o pycov pycov.info perlcov.info --flat --show-navigation --show-proportion --branch $VERSION $ANNOTATE --ignore inconsistent,version +fi diff --git a/tests/py2lcov/test.py b/tests/py2lcov/test.py new file mode 100644 index 00000000..e12fd980 --- /dev/null +++ b/tests/py2lcov/test.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 + +import localmodule + +def main(): + + print("entering main"); + localmodule.enter("hello world", 1, 2) + + def localfunc(): + + def nested1(): + def nested2(): + print('also nested'); + + print('nested') + + return 1 + +# LCOV_EXCL_START +if __name__ == '__main__': + main() +# LCOV_EXCL_STOP diff --git a/tests/xml2lcov/Makefile b/tests/xml2lcov/Makefile new file mode 100644 index 00000000..b23c3488 --- /dev/null +++ b/tests/xml2lcov/Makefile @@ -0,0 +1,6 @@ +include ../common.mak + +TESTS := xml2lcov.sh + +clean: + $(shell ./xml2lcov.sh --clean) diff --git a/tests/xml2lcov/coverage.xml b/tests/xml2lcov/coverage.xml new file mode 100644 index 00000000..ef737368 --- /dev/null +++ b/tests/xml2lcov/coverage.xml @@ -0,0 +1,3373 @@ + + + + + + /Users/apetro/code/github_jasig/uPortal/uportal-war/target/generated-sources/annotations + /Users/apetro/code/github_jasig/uPortal/uportal-war/target/generated-sources/xjc + --source + /Users/apetro/code/github_jasig/uPortal/uportal-war/src/main/java + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/xml2lcov/xml2lcov.sh b/tests/xml2lcov/xml2lcov.sh new file mode 100755 index 00000000..c751e685 --- /dev/null +++ b/tests/xml2lcov/xml2lcov.sh @@ -0,0 +1,167 @@ +#!/bin/bash +set +x + +if [[ "x" == ${LCOV_HOME}x ]] ; then + if [ -f ../../bin/lcov ] ; then + LCOV_HOME=../.. + fi +fi +source ../common.tst + +rm -rf *.info *.json __pycache__ help.txt *.pyc *.dat + +clean_cover + +if [[ 1 == $CLEAN_ONLY ]] ; then + exit 0 +fi + + +# is this git or P4? +if [ 1 == "$USE_GIT" ] ; then + # this is git + VERSION="--version-script ${SCRIPT_DIR}/gitversion.pm" + ANNOTATE="--annotate-script ${SCRIPT_DIR}/gitblame.pm" +else + VERSION="--version-script ${SCRIPT_DIR}/getp4version" + ANNOTATE="--annotate-script ${SCRIPT_DIR}/p4annotate.pm" +fi + +if [ $IS_GIT == 0 ] && [ $IS_P4 == 0 ] ; then + VERSION="$VERSION --ignore usage" +fi + +if [ ! -x $PY2LCOV_SCRIPT ] ; then + echo "missing py2lcov script - dying" + exit 1 +fi + + +LCOV_OPTS="--branch-coverage $PARALLEL $PROFILE" + + +# NOTE: the 'coverage.xml' file here is a copy of the one at +# https://gist.github.com/apetro/fcfffb8c4cdab2c1061d +# except that I removed a huge number of packages - to reduce the +# disk space consumed by the testcase. There appears to be nothing +# in the remove data that was significant from a test perspective. + +# no source - so can't compute version +eval ${PYCOV} ${XML2LCOV_TOOL} -o test.info coverage.xml -v -v # $VERSION +if [ 0 != $? ] ; then + echo "xml2lcov failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# run with verbosity turned on... +eval ${PYCOV} ${XML2LCOV_TOOL} --verbose --verbose -o test.info coverage.xml +if [ 0 != $? ] ; then + echo "xml2lcov failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# version check should fail - because we have no source +eval ${PYCOV} ${XML2LCOV_TOOL} -o noSource.info coverage.xml $VERSION +if [ 0 == $? ] ; then + echo "xml2lcov missing source for version check " + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# generate help message: +eval ${PYCOV} ${XML2LCOV_TOOL} --help 2>&1 | tee help.txt +if [ 0 != ${PIPESTATUS[0]} ] ; then + echo "help failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi +grep 'usage: xml2lcov ' help.txt +if [ 0 != $? ] ; then + echo "no help message" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# some usage errors +eval ${PYCOV} ${XML2LCOV_TOOL} coverage.xml -o paramErr.info ${VERSION},-x +if [ 0 == $? ] ; then + echo "coverage version did not see error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +if [ 0 == 1 ] ; then + # disable this one for now + + # run again with --keep-going flag - should generate same result as we see without version script + eval ${PYCOV} ${XML2LCOV_TOOL} coverage.xml -o keepGoing.info ${VERSION},-x --keep-going --verbose + if [ 0 != $? ] ; then + echo "keepGoing version saw error" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi + diff test.info keepGoing.info + if [ 0 != $? ] ; then + echo "no_version vs keepGoing failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi + fi +fi + + +# usage error: +eval ${PYCOV} ${XML2LCOV_TOOL} -o missing.info +if [ 0 == $? ] ; then + echo "did not see error with missing input data" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# usage error: +eval ${PYCOV} ${XML2LCOV_TOOL} -o noFile.info y.xml +if [ 0 == $? ] ; then + echo "did not see error with missing input file" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# usage error: +eval ${PYCOV} ${XML2LCOV_TOOL} -o badArg.info --noSuchParam coverage.xml +if [ 0 == $? ] ; then + echo "did not see error with unsupported param" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +# aggregate the files - as a syntax check +# the file contains inconsistent data for 'org/jasig/portal/EntityTypes.java' +# function 'mapRow' is declared twice at different locations and +# overlaps with a previous decl +$COVER $LCOV_TOOL $LCOV_OPTS -o aggregate.info -a test.info --ignore inconsistent +if [ 0 != $? ] ; then + echo "lcov aggregate failed" + if [ 0 == $KEEP_GOING ] ; then + exit 1 + fi +fi + +echo "Tests passed" + +if [[ "x$COVER" != "x" && $LOCAL_COVERAGE == 1 ]] ; then + cover + ${PY2LCOV_TOOL} -o pycov.info --testname xml2lcov $VERSION ${PYCOV_DB} + ${GENHTML_TOOL} -o pycov pycov.info --flat --show-navigation --show-proportion --branch $VERSION $ANNOTATE --ignore inconsistent,version,annotate +fi

$func_code$count_code
$name$label$rate ($hit / $found)$rate ($hit / $found)$rate ($hit / $found)$name$count
$label$alias$hit
added()CBCadded_notCalled()UBCexcluded()CBCexcluded_notCalled()UBCincluded()CBCincluded_notCalled()UBCinserted()GNCinserted_notCalled()UNCmainCBCremoved()CBCremoved_notCalled()UBCunchanged()CBCunchanged_notCalled()UBCadded()GNCadded_notCalled()UNCexcluded()GNCexcluded_notCalled()UNCincluded()GNCincluded_notCalled()UNCinserted()GNCinserted_notCalled()UNCmainGNCremoved()GNCremoved_notCalled()UNCunchanged()CBCunchanged_notCalled()UBCadded()LBCadded_notCalled()UBCexcluded()LBCexcluded_notCalled()UBCincluded()LBCincluded_notCalled()UBCinserted()UNCinserted_notCalled()UNCmainCBCremoved()LBCremoved_notCalled()UBCunchanged()LBCunchanged_notCalled()UBCadded()LBCadded_notCalled()UNCexcluded()LBCexcluded_notCalled()UNCincluded()LBCincluded_notCalled()UNCinserted()UNCinserted_notCalled()UNCmainGNCremoved()LBCremoved_notCalled()UNCunchanged()LBCunchanged_notCalled()UBCadded()GBCadded_notCalled()UBCexcluded()GBCexcluded_notCalled()UBCincluded()GBCincluded_notCalled()UBCinserted()GNCinserted_notCalled()UNCmainCBCremoved()GBCremoved_notCalled()UBCunchanged()GBCunchanged_notCalled()UBCadded()GNCadded_notCalled()UNCexcluded()GNCexcluded_notCalled()UNCincluded()GNCincluded_notCalled()UNCinserted()GNCinserted_notCalled()UNCmainGNCremoved()GNCremoved_notCalled()UNCunchanged()GBCunchanged_notCalled()UBCadded()UBCadded_notCalled()UBCexcluded()UBCexcluded_notCalled()UBCincluded()UBCincluded_notCalled()UBCinserted()UNCinserted_notCalled()UNCmainCBCremoved()UBCremoved_notCalled()UBCunchanged()UBCunchanged_notCalled()UBCadded()UNCadded_notCalled()UNCexcluded()UNCexcluded_notCalled()UNCincluded()UNCincluded_notCalled()UNCinserted()UNCinserted_notCalled()UNCmainCBCremoved()UNCremoved_notCalled()UNCunchanged()UBCunchanged_notCalled()UBC