diff --git a/.github/workflows/issue-triage.yml b/.github/workflows/issue-triage.yml deleted file mode 100644 index c279b0a81..000000000 --- a/.github/workflows/issue-triage.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Issue Triage -on: - issues: - types: - - opened -jobs: - issue_triage: - runs-on: ubuntu-latest - permissions: - issues: write - steps: - - uses: pierotofy/issuewhiz@v1 - with: - ghToken: ${{ secrets.GITHUB_TOKEN }} - openAI: ${{ secrets.OPENAI_TOKEN }} - model: gpt-4o-2024-08-06 - filter: | - - "#" - variables: | - - Q: "A question about using a software or seeking guidance on doing something?" - - B: "Reporting an issue or a software bug?" - - P: "Describes an issue with processing a set of images or a particular dataset?" - - D: "Contains a link to a dataset or images?" - - E: "Contains a suggestion for an improvement or a feature request?" - - SC: "Describes an issue related to compiling or building source code?" - logic: | - - 'Q and (not B) and (not P) and (not E) and (not SC) and not (title_lowercase ~= ".*bug: .+")': [comment: "Could we move this conversation over to the forum at https://community.opendronemap.org? The forum is the right place to ask questions (we try to keep the GitHub issue tracker for feature requests and bugs only). Thank you!", close: true, stop: true] - - "B and (not P) and (not E) and (not SC)": [label: "software fault", stop: true] - - "P and D": [label: "possible software fault", stop: true] - - "P and (not D) and (not SC) and (not E)": [comment: "Thanks for the report, but it looks like you didn't include a copy of your dataset for us to reproduce this issue? Please make sure to follow our [issue guidelines](https://github.com/OpenDroneMap/ODM/blob/master/docs/issue_template.md) :pray: ", close: true, stop: true] - - "E": [label: enhancement, stop: true] - - "SC": [label: "possible software fault"] - - signature: "p.s. I'm just an automated script, not a human being." diff --git a/.github/workflows/publish-docker-gpu.yaml b/.github/workflows/publish-docker-gpu.yaml deleted file mode 100644 index 9ad834195..000000000 --- a/.github/workflows/publish-docker-gpu.yaml +++ /dev/null @@ -1,39 +0,0 @@ -name: Publish Docker GPU Images - -on: - push: - branches: - - master - tags: - - v* - -jobs: - build: - runs-on: ubuntu-latest - timeout-minutes: 2880 - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push Docker image - id: docker_build - uses: docker/build-push-action@v2 - with: - file: ./gpu.Dockerfile - platforms: linux/amd64 - push: true - no-cache: true - tags: opendronemap/odm:gpu - # Trigger NodeODM build - - name: Dispatch NodeODM Build Event - id: nodeodm_dispatch - run: | - curl -X POST -u "${{secrets.PAT_USERNAME}}:${{secrets.PAT_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/OpenDroneMap/NodeODM/actions/workflows/publish-docker-gpu.yaml/dispatches --data '{"ref": "master"}' diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml deleted file mode 100644 index 4aafa0265..000000000 --- a/.github/workflows/publish-docker.yaml +++ /dev/null @@ -1,53 +0,0 @@ -name: Publish Docker and WSL Images - -on: - push: - branches: - - master - tags: - - v* - -jobs: - build: - runs-on: self-hosted - timeout-minutes: 2880 - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - with: - config-inline: | - [worker.oci] - max-parallelism = 1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - # Use the repository information of the checked-out code to format docker tags - - name: Docker meta - id: docker_meta - uses: crazy-max/ghaction-docker-meta@v1 - with: - images: opendronemap/odm - tag-semver: | - {{version}} - - name: Build and push Docker image - id: docker_build - uses: docker/build-push-action@v2 - with: - file: ./portable.Dockerfile - platforms: linux/amd64,linux/arm64 - push: true - no-cache: true - tags: | - ${{ steps.docker_meta.outputs.tags }} - opendronemap/odm:latest - # Trigger NodeODM build - - name: Dispatch NodeODM Build Event - id: nodeodm_dispatch - run: | - curl -X POST -u "${{secrets.PAT_USERNAME}}:${{secrets.PAT_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/OpenDroneMap/NodeODM/actions/workflows/publish-docker.yaml/dispatches --data '{"ref": "master"}' \ No newline at end of file diff --git a/.github/workflows/publish-windows.yml b/.github/workflows/publish-windows.yml deleted file mode 100644 index 57cd39e6a..000000000 --- a/.github/workflows/publish-windows.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: Publish Windows Setup - -on: - push: - branches: - - master - tags: - - v* - -jobs: - build: - runs-on: windows-2019 - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Setup Python - uses: actions/setup-python@v2 - with: - python-version: '3.8.1' - architecture: 'x64' - - uses: Jimver/cuda-toolkit@v0.2.4 - id: cuda-toolkit - with: - cuda: '11.4.0' - - name: Setup cmake - uses: jwlawson/actions-setup-cmake@v1.13 - with: - cmake-version: '3.24.x' - - name: Extract code signing cert - id: code_sign - uses: timheuer/base64-to-file@v1 - with: - fileName: 'comodo.pfx' - encodedString: ${{ secrets.CODE_SIGNING_CERT }} - - name: Install venv - run: | - python -m pip install virtualenv - - name: Build sources - run: | - python configure.py build - - name: Free up space - run: | - rmdir SuperBuild\download /s /q - rmdir SuperBuild\build /s /q - shell: cmd - - name: Create setup - env: - CODE_SIGN_CERT_PATH: ${{ steps.code_sign.outputs.filePath }} - run: | - python configure.py dist --code-sign-cert-path $env:CODE_SIGN_CERT_PATH - - name: Upload Setup File - uses: actions/upload-artifact@v4 - with: - name: Setup - path: dist\*.exe - - name: Upload Setup to Release - uses: svenstaro/upload-release-action@v2 - if: startsWith(github.ref, 'refs/tags/') - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: dist/*.exe - file_glob: true - tag: ${{ github.ref }} - overwrite: true - diff --git a/.github/workflows/test-build-prs.yaml b/.github/workflows/test-build-prs.yaml deleted file mode 100644 index ddc127620..000000000 --- a/.github/workflows/test-build-prs.yaml +++ /dev/null @@ -1,87 +0,0 @@ -name: Build PRs - -on: - pull_request: - -jobs: - docker: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Set Swap Space - uses: pierotofy/set-swap-space@master - with: - swap-size-gb: 12 - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Build - uses: docker/build-push-action@v2 - with: - file: ./portable.Dockerfile - platforms: linux/amd64 - push: false - - # snapcraft: - # runs-on: ubuntu-latest - # strategy: - # matrix: - # architecture: - # - amd64 - # steps: - # - name: Checkout - # uses: actions/checkout@v2 - # - name: Set Swap Space - # uses: pierotofy/set-swap-space@master - # with: - # swap-size-gb: 12 - # - name: Build - # id: build - # uses: diddlesnaps/snapcraft-multiarch-action@v1 - # with: - # architecture: ${{ matrix.architecture }} - # - name: Review - # uses: diddlesnaps/snapcraft-review-tools-action@v1 - # with: - # snap: ${{ steps.build.outputs.snap }} - # isClassic: 'false' - - windows: - runs-on: windows-2019 - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Setup Python - uses: actions/setup-python@v2 - with: - python-version: '3.8.1' - architecture: 'x64' - - uses: Jimver/cuda-toolkit@v0.2.4 - id: cuda-toolkit - with: - cuda: '11.4.0' - - name: Setup cmake - uses: jwlawson/actions-setup-cmake@v1.13 - with: - cmake-version: '3.24.x' - - name: Install venv - run: | - python -m pip install virtualenv - - name: Build sources - run: | - python configure.py build - - name: Free up space - run: | - rmdir SuperBuild\download /s /q - rmdir SuperBuild\build /s /q - shell: cmd - - name: Create setup - run: | - python configure.py dist - - name: Upload Setup File - uses: actions/upload-artifact@v4 - with: - name: Setup - path: dist\*.exe diff --git a/contrib/pc2dem/pc2dem.py b/contrib/pc2dem/pc2dem.py index dc9743fed..51b601a8f 100755 --- a/contrib/pc2dem/pc2dem.py +++ b/contrib/pc2dem/pc2dem.py @@ -47,6 +47,7 @@ args.type, output_type='idw' if args.type == 'dtm' else 'max', radiuses=list(map(str, radius_steps)), + power=1, gapfill=args.gapfill_steps > 0, outdir=outdir, resolution=args.resolution, diff --git a/opendm/align.py b/opendm/align.py index a5643cedb..39e7e2349 100644 --- a/opendm/align.py +++ b/opendm/align.py @@ -81,6 +81,7 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): align_file = repr_func(align_file, input_crs) to_delete.append(align_file) + log.ODM_INFO("Running CODEM %s" % codem.__version__) conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir)) fnd_obj, aoi_obj = codem.preprocess(conf) fnd_obj.prep() @@ -102,6 +103,10 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): ) reg = app_reg.get_registration_transformation() + if isinstance(reg, dict): + reg_matrix = reg['matrix'] + else: + reg_matrix = reg[0] # Write JSON to stats folder with open(os.path.join(stats_dir, "registration.json"), 'w') as f: @@ -113,7 +118,7 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): 'fine': icp_reg.registration_parameters, }, indent=4)) - matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4)) + matrix = np.fromstring(reg_matrix, dtype=float, sep=' ').reshape((4, 4)) return matrix finally: for f in to_delete: diff --git a/opendm/config.py b/opendm/config.py old mode 100755 new mode 100644 index b1d195e95..100ae2eeb --- a/opendm/config.py +++ b/opendm/config.py @@ -69,7 +69,6 @@ 'pc_quality': 'opensfm', 'pc_rectify': 'odm_georeferencing', 'pc_sample': 'odm_filterpoints', - 'pc_skip_geometric': 'openmvs', 'primary_band': 'dataset', 'project_path': None, 'radiometric_calibration': 'opensfm', @@ -127,7 +126,7 @@ def url_string(string): r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) - + if re.match(regex, string) is None: raise argparse.ArgumentTypeError("%s is not a valid URL. The URL must be in the format: http(s)://host[:port]/[?token=]" % string) return string @@ -164,7 +163,7 @@ def config(argv=None, parser=None): parser = SettingsParser(description='ODM is a command line toolkit to generate maps, point clouds, 3D models and DEMs from drone, balloon or kite images.', usage='%s [options] ' % usage_bin, yaml_file=open(context.settings_path)) - + parser.add_argument('--project-path', metavar='', action=StoreValue, @@ -213,7 +212,7 @@ def config(argv=None, parser=None): 'More features can be useful for finding more matches between images, ' 'potentially allowing the reconstruction of areas with little overlap or insufficient features. ' 'More features also slow down processing. Default: %(default)s')) - + parser.add_argument('--feature-type', metavar='', action=StoreValue, @@ -222,7 +221,7 @@ def config(argv=None, parser=None): help=('Choose the algorithm for extracting keypoints and computing descriptors. ' 'Can be one of: %(choices)s. Default: ' '%(default)s')) - + parser.add_argument('--feature-quality', metavar='', action=StoreValue, @@ -231,7 +230,7 @@ def config(argv=None, parser=None): help=('Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. ' 'Can be one of: %(choices)s. Default: ' '%(default)s')) - + parser.add_argument('--matcher-type', metavar='', action=StoreValue, @@ -247,7 +246,7 @@ def config(argv=None, parser=None): default=0, type=int, help='Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s') - + parser.add_argument('--matcher-order', metavar='', action=StoreValue, @@ -295,6 +294,29 @@ def config(argv=None, parser=None): 'Can be one of: %(choices)s. Default: ' '%(default)s')) + parser.add_argument('--band-alignment-samples', + metavar='', + action=StoreValue, + default=30, + type=int, + help='Randomly sample a sub set of input images to find the best alignment transforms. Set to 0 to use all input images. Default: %(default)s') + + parser.add_argument('--band-alignment-local-homography', + action=StoreTrue, + nargs=0, + default=False, + help=('Compute a local homography matrix for each individual raw image and use this matrix to warp the raw image. ' + 'Otherwise, select the best homography matrix and use this global matrix to warp all raw images. Default: ' + '%(default)s')) + + parser.add_argument('--band-alignment-rig-optimization', + action=StoreTrue, + nargs=0, + default=False, + help=('Use enhanced correlation coefficient (ECC) algorithm to refine the warp matrix generated by the camera rig relatives ' + 'This option increases processing time. Default: ' + '%(default)s')) + parser.add_argument('--max-concurrency', metavar='', action=StoreValue, @@ -304,6 +326,15 @@ def config(argv=None, parser=None): 'processes. Peak memory requirement is ~1GB per ' 'thread and 2 megapixel image resolution. Default: %(default)s')) + parser.add_argument('--depthmap-resolution', + metavar='', + action=StoreValue, + type=float, + default=640, + help=('Controls the density of the point cloud by setting the resolution of the depthmap images. Higher values take longer to compute ' + 'but produce denser point clouds. Overrides the value calculated by --pc-quality.' + 'Default: %(default)s')) + parser.add_argument('--use-hybrid-bundle-adjustment', action=StoreTrue, nargs=0, @@ -326,12 +357,21 @@ def config(argv=None, parser=None): default=False, help='Do not attempt to merge partial reconstructions. This can happen when images do not have sufficient overlap or are isolated. Default: %(default)s') + parser.add_argument('--min-num-views', + metavar='', + action=StoreValue, + default=3, + type=int, + help=('The minumum number of views that should reconstruct a point for it to be valid during the point densification process. ' + 'Use lower values if the raw images have less overlap. Lower values result in denser point clouds but with more noise. ' + 'Default: %(default)s')) + parser.add_argument('--sky-removal', action=StoreTrue, nargs=0, default=False, help='Automatically compute image masks using AI to remove the sky. Experimental. Default: %(default)s') - + parser.add_argument('--bg-removal', action=StoreTrue, nargs=0, @@ -349,19 +389,19 @@ def config(argv=None, parser=None): nargs=0, default=False, help='Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs. Default: %(default)s') - + parser.add_argument('--skip-report', action=StoreTrue, nargs=0, default=False, help='Skip generation of PDF report. This can save time if you don\'t need a report. Default: %(default)s') - + parser.add_argument('--skip-orthophoto', action=StoreTrue, nargs=0, default=False, help='Skip generation of the orthophoto. This can save time if you only need 3D results or DEMs. Default: %(default)s') - + parser.add_argument('--ignore-gsd', action=StoreTrue, nargs=0, @@ -371,13 +411,13 @@ def config(argv=None, parser=None): 'Ordinarily, GSD estimates are used to cap the maximum resolution of image outputs and resizes images when necessary, resulting in faster processing and lower memory usage. ' 'Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. ' 'Never set --ignore-gsd to true unless you are positive you need it, and even then: do not use it. Default: %(default)s') - + parser.add_argument('--no-gpu', action=StoreTrue, nargs=0, default=False, help='Do not use GPU acceleration, even if it\'s available. Default: %(default)s') - + parser.add_argument('--mesh-size', metavar='', action=StoreValue, @@ -462,7 +502,7 @@ def config(argv=None, parser=None): nargs=0, default=False, help='Export the georeferenced point cloud in CSV format. Default: %(default)s') - + parser.add_argument('--pc-las', action=StoreTrue, nargs=0, @@ -488,7 +528,7 @@ def config(argv=None, parser=None): default=5, help='Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. ' 'Default: %(default)s') - + parser.add_argument('--pc-sample', metavar='', action=StoreValue, @@ -497,11 +537,34 @@ def config(argv=None, parser=None): help='Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. ' 'Default: %(default)s') - parser.add_argument('--pc-skip-geometric', + parser.add_argument('--pc-tile', + action=StoreTrue, + nargs=0, + default=False, + help='Reduce the memory usage needed for depthmap fusion by splitting large scenes into tiles. Turn this on if your machine doesn\'t have much RAM and/or you\'ve set --pc-quality to high or ultra. Experimental. ' + 'Default: %(default)s') + + parser.add_argument('--pc-sharp', + metavar='', + action=StoreValue, + default=5, + type=int, + help='Improve the accuracy of the point cloud by applying filters to sharpen the point cloud, but reduces the completeness of the reconstruction. ' + 'Default: %(default)s') + + parser.add_argument('--pc-geometric', action=StoreTrue, nargs=0, default=False, - help='Geometric estimates improve the accuracy of the point cloud by computing geometrically consistent depthmaps but may not be usable in larger datasets. This flag disables geometric estimates. ' + help='Improve the accuracy of the point cloud by computing geometrically consistent depthmaps. This increases processing time, but can improve results in urban scenes. ' + 'Default: %(default)s') + + parser.add_argument('--pc-ground-filter', + metavar='', + action=StoreValue, + default='smrf', + choices=['smrf', 'pmf'], + help='Set ground point classification filter. Can be one of: %(choices)s. ' 'Default: %(default)s') parser.add_argument('--smrf-scalar', @@ -519,7 +582,7 @@ def config(argv=None, parser=None): default=0.15, help='Simple Morphological Filter slope parameter (rise over run). ' 'Default: %(default)s') - + parser.add_argument('--smrf-threshold', metavar='', action=StoreValue, @@ -527,7 +590,7 @@ def config(argv=None, parser=None): default=0.5, help='Simple Morphological Filter elevation threshold parameter (meters). ' 'Default: %(default)s') - + parser.add_argument('--smrf-window', metavar='', action=StoreValue, @@ -536,6 +599,40 @@ def config(argv=None, parser=None): help='Simple Morphological Filter window radius parameter (meters). ' 'Default: %(default)s') + parser.add_argument('--texturing-use-dtm', + action=StoreTrue, + nargs=0, + default=False, + help=('Choose to use 2.5D DSM or DTM to project textures. Default: %(default)s')) + + parser.add_argument('--texturing-data-term', + metavar='', + action=StoreValue, + default='gmi', + choices=['gmi', 'area'], + help=('When texturing the 3D mesh, for each triangle, choose to prioritize images with sharp features (gmi) or those that cover the largest area (area). Default: %(default)s')) + + parser.add_argument('--texturing-outlier-removal-type', + metavar='', + action=StoreValue, + default='gauss_clamping', + choices=['none', 'gauss_clamping', 'gauss_damping'], + help=('Type of photometric outlier removal method. Can be one of: %(choices)s. Default: %(default)s')) + + parser.add_argument('--thermal-texturing-data-term', + metavar='', + action=StoreValue, + default='gmi', + choices=['gmi', 'area'], + help=('Choose to prioritize thermal images with sharp features (gmi) or those that cover the largest area (area). Default: %(default)s')) + + parser.add_argument('--thermal-texturing-outlier-removal-type', + metavar='', + action=StoreValue, + default='gauss_clamping', + choices=['none', 'gauss_clamping', 'gauss_damping'], + help=('Type of thermal photometric outlier removal method. Can be one of: %(choices)s. Default: %(default)s')) + parser.add_argument('--texturing-skip-global-seam-leveling', action=StoreTrue, nargs=0, @@ -586,7 +683,7 @@ def config(argv=None, parser=None): 'EPSG: or <+proj definition>\n' 'image_name geo_x geo_y geo_z [yaw (degrees)] [pitch (degrees)] [roll (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n' 'Default: %(default)s')) - + parser.add_argument('--align', metavar='', action=StoreValue, @@ -616,6 +713,24 @@ def config(argv=None, parser=None): help='Use this tag to build a DSM (Digital Surface Model, ground + objects) using a progressive ' 'morphological filter. Check the --dem* parameters for finer tuning. Default: %(default)s') + parser.add_argument('--dtm-interpolation', + metavar='', + action=StoreValue, + default='idw', + choices=['min', 'max', 'mean', 'idw'], + help=('Choose the DTM interpolation method to calculate a cell value using points within a given radius. ' + 'Can be one of: %(choices)s. Default: ' + '%(default)s')) + + parser.add_argument('--dsm-interpolation', + metavar='', + action=StoreValue, + default='idw', + choices=['min', 'max', 'mean', 'idw'], + help=('Choose the DSM interpolation method to calculate a cell value using points within a given radius. ' + 'Can be one of: %(choices)s. Default: ' + '%(default)s')) + parser.add_argument('--dem-gapfill-steps', metavar='', action=StoreValue, @@ -642,7 +757,7 @@ def config(argv=None, parser=None): type=int, help='Decimate the points before generating the DEM. 1 is no decimation (full quality). ' '100 decimates ~99%% of the points. Useful for speeding up generation of DEM results in very large datasets. Default: %(default)s') - + parser.add_argument('--dem-euclidean-map', action=StoreTrue, nargs=0, @@ -675,13 +790,13 @@ def config(argv=None, parser=None): default=False, help='Set this parameter if you want to generate a PNG rendering of the orthophoto. ' 'Default: %(default)s') - + parser.add_argument('--orthophoto-kmz', action=StoreTrue, nargs=0, default=False, help='Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. ' - 'Default: %(default)s') + 'Default: %(default)s') parser.add_argument('--orthophoto-compression', metavar='', @@ -690,7 +805,7 @@ def config(argv=None, parser=None): choices=['JPEG', 'LZW', 'PACKBITS', 'DEFLATE', 'LZMA', 'NONE'], default='DEFLATE', help='Set the compression to use for orthophotos. Can be one of: %(choices)s. Default: %(default)s') - + parser.add_argument('--orthophoto-cutline', action=StoreTrue, nargs=0, @@ -729,7 +844,7 @@ def config(argv=None, parser=None): action=StoreValue, metavar='', default=0, - help='Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. ' + help='Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. ' 'Note that not all cameras are present in the database. Set to 0 to use the database value. ' 'Default: %(default)s') @@ -768,7 +883,7 @@ def config(argv=None, parser=None): default=4000, metavar='', help='The maximum output resolution of extracted video frames in pixels. Default: %(default)s') - + parser.add_argument('--split', type=int, action=StoreValue, @@ -833,7 +948,7 @@ def config(argv=None, parser=None): help=('Use images\' GPS exif data for reconstruction, even if there are GCPs present.' 'This flag is useful if you have high precision GPS measurements. ' 'If there are no GCPs, this flag does nothing. Default: %(default)s')) - + parser.add_argument('--gps-accuracy', type=float, action=StoreValue, @@ -868,7 +983,7 @@ def config(argv=None, parser=None): default="auto", type=str, help=('When processing multispectral datasets, you can specify the name of the primary band that will be used for reconstruction. ' - 'It\'s recommended to choose a band which has sharp details and is in focus. ' + 'It\'s recommended to choose a band which has sharp details and is in focus. ' 'Default: %(default)s')) parser.add_argument('--skip-band-alignment', @@ -923,5 +1038,5 @@ def config(argv=None, parser=None): except exceptions.NodeConnectionError as e: log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e)) sys.exit(1) - + return args diff --git a/opendm/dem/codem/codem-0.25.3.dev0-py3-none-any.whl b/opendm/dem/codem/codem-0.25.3.dev0-py3-none-any.whl new file mode 100644 index 000000000..c2ea93704 Binary files /dev/null and b/opendm/dem/codem/codem-0.25.3.dev0-py3-none-any.whl differ diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py old mode 100755 new mode 100644 index 8113621e5..17f133560 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -30,15 +30,16 @@ except: pass -def classify(lasFile, scalar, slope, threshold, window): +def classify(lasFile, scalar, slope, threshold, window, filter='smrf'): start = datetime.now() try: - pdal.run_pdaltranslate_smrf(lasFile, lasFile, scalar, slope, threshold, window) - except: - log.ODM_WARNING("Error creating classified file %s" % lasFile) + pdal.run_pdal_translate(lasFile, lasFile, scalar, slope, threshold, window, filter) + + log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start)) + except Exception as e: + log.ODM_WARNING("Error creating classified file %s %s" % (lasFile, str(e))) - log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start)) return lasFile def rectify(lasFile, reclassify_threshold=5, min_area=750, min_points=500): @@ -61,12 +62,12 @@ def rectify(lasFile, reclassify_threshold=5, min_area=750, min_points=500): error = None -def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True, +def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], power=1, gapfill=True, outdir='', resolution=0.1, max_workers=1, max_tile_size=4096, decimation=None, with_euclidean_map=False, apply_smoothing=True, max_tiles=None): """ Create DEM from multiple radii, and optionally gapfill """ - + start = datetime.now() kwargs = { 'input': input_point_cloud, @@ -148,15 +149,15 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'] '"{tiles_vrt}" "{geotiff_small}"'.format(**kwargs)) # Fill scaled - gdal_fillnodata(['.', - '-co', 'NUM_THREADS=%s' % kwargs['threads'], + gdal_fillnodata(['.', + '-co', 'NUM_THREADS=%s' % kwargs['threads'], '-co', 'BIGTIFF=IF_SAFER', '-co', 'COMPRESS=DEFLATE', '--config', 'GDAL_CACHE_MAX', str(kwargs['max_memory']) + '%', '-b', '1', '-of', 'GTiff', kwargs['geotiff_small'], kwargs['geotiff_small_filled']]) - + # Merge filled scaled DEM with unfilled DEM using bilinear interpolation run('gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"' % (merged_vrt_path, geotiff_small_filled_path, tiles_vrt_path)) run('gdal_translate ' @@ -185,7 +186,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'] if with_euclidean_map: emap_path = io.related_file_path(output_path, postfix=".euclideand") compute_euclidean_map(tiles_vrt_path, emap_path, overwrite=True) - + for cleanup_file in [tiles_vrt_path, tiles_file_list, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]: if os.path.exists(cleanup_file): os.remove(cleanup_file) @@ -212,7 +213,7 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False): if gdal_proximity is not None: try: - gdal_proximity(['gdal_proximity.py', + gdal_proximity(['gdal_proximity.py', geotiff_path, output_path, '-values', str(nodata), '-co', 'TILED=YES', '-co', 'BIGTIFF=IF_SAFER', @@ -227,13 +228,13 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False): log.ODM_WARNING("Cannot compute euclidean distance file: %s" % output_path) else: log.ODM_WARNING("Cannot compute euclidean map, gdal_proximity is missing") - + else: log.ODM_INFO("Found a euclidean distance map: %s" % output_path) return output_path -def median_smoothing(geotiff_path, output_path, window_size=512, num_workers=1, radius=4): +def median_smoothing(geotiff_path, output_path, window_size=512, num_workers=1, radius=2): """ Apply median smoothing """ start = datetime.now() @@ -262,5 +263,5 @@ def get_dem_radius_steps(stats_file, steps, resolution, multiplier = 1.0): radius_steps = [point_cloud.get_spacing(stats_file, resolution) * multiplier] for _ in range(steps - 1): radius_steps.append(radius_steps[-1] * math.sqrt(2)) - - return radius_steps \ No newline at end of file + + return radius_steps diff --git a/opendm/dem/pdal.py b/opendm/dem/pdal.py index 2c0b8e4e1..b85a8a96a 100644 --- a/opendm/dem/pdal.py +++ b/opendm/dem/pdal.py @@ -49,7 +49,7 @@ def json_base(): return {'pipeline': []} -def json_gdal_base(filename, output_type, radius, resolution=1, bounds=None): +def json_gdal_base(filename, output_type, radius, power=1, resolution=1, bounds=None): """ Create initial JSON for PDAL pipeline containing a Writer element """ json = json_base() @@ -57,6 +57,7 @@ def json_gdal_base(filename, output_type, radius, resolution=1, bounds=None): 'type': 'writers.gdal', 'resolution': resolution, 'radius': radius, + 'power': power, # IDW distance exponent 'filename': filename, 'output_type': output_type, 'data_type': 'float' @@ -75,7 +76,7 @@ def json_las_base(fout): json = json_base() json['pipeline'].insert(0, { 'type': 'writers.las', - 'filename': fout + 'filename': fout }) return json @@ -152,19 +153,34 @@ def run_pipeline(json): os.remove(jsonfile) -def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window): - """ Run PDAL translate """ - cmd = [ - 'pdal', - 'translate', - '-i %s' % fin, - '-o %s' % fout, - 'smrf', - '--filters.smrf.scalar=%s' % scalar, - '--filters.smrf.slope=%s' % slope, - '--filters.smrf.threshold=%s' % threshold, - '--filters.smrf.window=%s' % window, - ] +def run_pdal_translate(fin, fout, scalar, slope, threshold, window, filter='smrf'): + """ Run PDAL translate with ground point classification filter """ + if filter == 'pmf': + cmd = [ + 'pdal', + 'translate', + '-i %s' % fin, + '-o %s' % fout, + 'pmf', + '--filters.pmf.exponential=%s' % ('true' if scalar > 1 else 'false'), + '--filters.pmf.slope=%s' % slope, + '--filters.pmf.max_distance=%s' % threshold, + '--filters.pmf.max_window_size=%s' % (window * 2), + ] + + # default to SMRF filter + else: + cmd = [ + 'pdal', + 'translate', + '-i %s' % fin, + '-o %s' % fout, + 'smrf', + '--filters.smrf.scalar=%s' % scalar, + '--filters.smrf.slope=%s' % slope, + '--filters.smrf.threshold=%s' % threshold, + '--filters.smrf.window=%s' % window, + ] system.run(' '.join(cmd)) diff --git a/opendm/gsd.py b/opendm/gsd.py index ebfc99ef3..e46d4010c 100644 --- a/opendm/gsd.py +++ b/opendm/gsd.py @@ -81,7 +81,7 @@ def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gs if ignore_gsd: return resolution - gsd = opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=has_gcp or ignore_resolution) + gsd = opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=has_gcp or ignore_resolution or gsd_error_estimate==0) if gsd is not None: gsd = gsd * (1 - gsd_error_estimate) * gsd_scaling @@ -102,7 +102,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False) """ Computes the average Ground Sampling Distance of an OpenSfM reconstruction. :param reconstruction_json path to OpenSfM's reconstruction.json - :return Ground Sampling Distance value (cm / pixel) or None if + :return Ground Sampling Distance value (cm / pixel) or None if a GSD estimate cannot be compute """ if not os.path.isfile(reconstruction_json): @@ -132,17 +132,17 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False) if not focal_ratio: log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json) return None - - gsds.append(calculate_gsd_from_focal_ratio(focal_ratio, - shot_height - ground_height, + + gsds.append(calculate_gsd_from_focal_ratio(focal_ratio, + shot_height - ground_height, camera['width'])) - + if len(gsds) > 0: mean = np.mean(gsds) if mean < 0: log.ODM_WARNING("Negative GSD estimated, this might indicate a flipped Z-axis.") return abs(mean) - + return None @@ -160,8 +160,8 @@ def calculate_gsd(sensor_width, flight_height, focal_length, image_width): >>> calculate_gsd(13.2, 100, 8.8, 0) """ if sensor_width != 0: - return calculate_gsd_from_focal_ratio(focal_length / sensor_width, - flight_height, + return calculate_gsd_from_focal_ratio(focal_length / sensor_width, + flight_height, image_width) else: return None @@ -176,5 +176,5 @@ def calculate_gsd_from_focal_ratio(focal_ratio, flight_height, image_width): """ if focal_ratio == 0 or image_width == 0: return None - + return ((flight_height * 100) / image_width) / focal_ratio \ No newline at end of file diff --git a/opendm/mesh.py b/opendm/mesh.py index 584458bfb..997dec2af 100644 --- a/opendm/mesh.py +++ b/opendm/mesh.py @@ -9,7 +9,8 @@ from scipy import signal import numpy as np -def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, available_cores=None, method='gridded', smooth_dsm=True, max_tiles=None): +def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, + available_cores=None, method='gridded', smooth_dsm=True, use_dtm=False, max_tiles=None): # Create DSM from point cloud # Create temporary directory @@ -22,11 +23,15 @@ def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution= log.ODM_INFO('Creating DSM for 2.5D mesh') + mesh_dem = 'mesh_dtm' if use_dtm else 'mesh_dsm' + output_type = 'max' if use_dtm else 'idw' + commands.create_dem( inPointCloud, - 'mesh_dsm', - output_type='max', + mesh_dem, + output_type=output_type, radiuses=radius_steps, + power=1, gapfill=True, outdir=tmp_directory, resolution=dsm_resolution, @@ -36,12 +41,12 @@ def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution= ) if method == 'gridded': - mesh = dem_to_mesh_gridded(os.path.join(tmp_directory, 'mesh_dsm.tif'), outMesh, maxVertexCount, maxConcurrency=max(1, available_cores)) + mesh = dem_to_mesh_gridded(os.path.join(tmp_directory, mesh_dem + '.tif'), outMesh, maxVertexCount, maxConcurrency=max(1, available_cores)) elif method == 'poisson': - dsm_points = dem_to_points(os.path.join(tmp_directory, 'mesh_dsm.tif'), os.path.join(tmp_directory, 'dsm_points.ply')) - mesh = screened_poisson_reconstruction(dsm_points, outMesh, depth=depth, - samples=samples, - maxVertexCount=maxVertexCount, + dsm_points = dem_to_points(os.path.join(tmp_directory, mesh_dem + '.tif'), os.path.join(tmp_directory, 'dsm_points.ply')) + mesh = screened_poisson_reconstruction(dsm_points, outMesh, depth=depth, + samples=samples, + maxVertexCount=maxVertexCount, threads=max(1, available_cores - 1)), # poissonrecon can get stuck on some machines if --threads == all cores else: raise 'Not a valid method: ' + method @@ -85,7 +90,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1): outMeshDirty = os.path.join(mesh_path, "{}.dirty{}".format(basename, ext)) - # This should work without issues most of the times, + # This should work without issues most of the times, # but just in case we lower maxConcurrency if it fails. while True: try: @@ -112,7 +117,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1): raise e - # Cleanup and reduce vertex count if necessary + # Cleanup and reduce vertex count if necessary # (as dem2mesh cannot guarantee that we'll have the target vertex count) cleanupArgs = { 'reconstructmesh': context.omvs_reconstructmesh_path, @@ -146,7 +151,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = outMeshDirty = os.path.join(mesh_path, "{}.dirty{}".format(basename, ext)) if os.path.isfile(outMeshDirty): os.remove(outMeshDirty) - + # Since PoissonRecon has some kind of a race condition on ppc64el, and this helps... if platform.machine() == 'ppc64le': log.ODM_WARNING("ppc64le platform detected, forcing single-threaded operation for PoissonRecon") @@ -175,7 +180,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = '--linearFit '.format(**poissonReconArgs)) except Exception as e: log.ODM_WARNING(str(e)) - + if os.path.isfile(outMeshDirty): break # Done! else: diff --git a/opendm/multispectral.py b/opendm/multispectral.py index 19767cd43..5d9fce529 100644 --- a/opendm/multispectral.py +++ b/opendm/multispectral.py @@ -1,4 +1,5 @@ import math +from pickletools import optimize import re import cv2 import os @@ -6,15 +7,17 @@ import numpy as np from opendm import log from opendm.concurrency import parallel_map +from opendm import thermal from opensfm.io import imread from skimage import exposure from skimage.morphology import disk from skimage.filters import rank, gaussian +from skimage.util import img_as_ubyte # Loosely based on https://github.com/micasense/imageprocessing/blob/master/micasense/utils.py -def dn_to_radiance(photo, image): +def dn_to_radiance(photo, image, band_vignetting_coefficients=None): """ Convert Digital Number values to Radiance values :param photo ODM_Photo @@ -25,7 +28,7 @@ def dn_to_radiance(photo, image): image = image.astype("float32") if len(image.shape) != 3: raise ValueError("Image should have shape length of 3 (got: %s)" % len(image.shape)) - + # Thermal (this should never happen, but just in case..) if photo.is_thermal(): return image @@ -37,10 +40,24 @@ def dn_to_radiance(photo, image): exposure_time = photo.exposure_time gain = photo.get_gain() gain_adjustment = photo.gain_adjustment + photometric_exp = photo.get_photometric_exposure() + + if a1 is None and photometric_exp is None: + log.ODM_WARNING("Cannot perform radiometric calibration, no FNumber/Exposure Time or Radiometric Calibration EXIF tags found in %s. Using Digital Number." % photo.filename) + return image - V, x, y = vignette_map(photo) - if x is None: + if a1 is None and photometric_exp is not None: + a1 = photometric_exp + + if band_vignetting_coefficients is not None: + V = band_vignetting_coefficients x, y = np.meshgrid(np.arange(photo.width), np.arange(photo.height)) + else: + V, x, y = vignette_map(photo) + if V is not None: + V = np.repeat(V[:, :, np.newaxis], image.shape[2], axis=2) + if x is None: + x, y = np.meshgrid(np.arange(photo.width), np.arange(photo.height)) if dark_level is not None: image -= dark_level @@ -51,34 +68,31 @@ def dn_to_radiance(photo, image): image /= bit_depth_max else: log.ODM_WARNING("Cannot normalize DN for %s, bit depth is missing" % photo.filename) - + + # Vignette correction if V is not None: - # vignette correction - V = np.repeat(V[:, :, np.newaxis], image.shape[2], axis=2) image *= V + # Row gradient correction if exposure_time and a2 is not None and a3 is not None: - # row gradient correction R = 1.0 / (1.0 + a2 * y / exposure_time - a3 * y) R = np.repeat(R[:, :, np.newaxis], image.shape[2], axis=2) image *= R - + # Floor any negative radiances to zero (can happen due to noise around blackLevel) if dark_level is not None: image[image < 0] = 0 - - # apply the radiometric calibration - i.e. scale by the gain-exposure product and + + # Apply the radiometric calibration - i.e. scale by the gain-exposure product and # multiply with the radiometric calibration coefficient if gain is not None and exposure_time is not None: image /= (gain * exposure_time) - - if a1 is not None: - # multiply with the radiometric calibration coefficient - image *= a1 - if gain_adjustment is not None: + if photo.camera_make == "DJI" and gain_adjustment is not None: image *= gain_adjustment + else: + image *= a1 return image @@ -111,15 +125,21 @@ def vignette_map(photo): vignette = 1.0 / vignette return vignette, x, y - + return None, None, None -def dn_to_reflectance(photo, image, use_sun_sensor=True): - radiance = dn_to_radiance(photo, image) - irradiance = compute_irradiance(photo, use_sun_sensor=use_sun_sensor) +def dn_to_reflectance(photo, image, band_irradiance, band_vignetting=None, use_sun_sensor=True): + radiance = dn_to_radiance(photo, image, band_vignetting) + + if band_irradiance is not None and use_sun_sensor is not True: + irradiance = band_irradiance + else: + irradiance = compute_irradiance(photo, use_sun_sensor=use_sun_sensor) + reflectance = radiance * math.pi / irradiance reflectance[reflectance < 0.0] = 0.0 reflectance[reflectance > 1.0] = 1.0 + return reflectance def compute_irradiance(photo, use_sun_sensor=True): @@ -127,13 +147,14 @@ def compute_irradiance(photo, use_sun_sensor=True): if photo.is_thermal(): return 1.0 - # Some cameras (Micasense, DJI) store the value (nice! just return) + # Some cameras (e.g. Micasense and DJI) store the value in metadata hirradiance = photo.get_horizontal_irradiance() if hirradiance is not None: return hirradiance # TODO: support for calibration panels + # For general cases if use_sun_sensor and photo.get_sun_sensor(): # Estimate it dls_orientation_vector = np.array([0,0,-1]) @@ -163,9 +184,26 @@ def compute_irradiance(photo, use_sun_sensor=True): return horizontal_irradiance elif use_sun_sensor: log.ODM_WARNING("No sun sensor values found for %s" % photo.filename) - + return 1.0 +def radiometric_calibrate(photo, image, images_path, image_type='reflectance', irradiance_by_hand=None, vignetting_info=None, use_sun_sensor=True): + band_irradiance_mean = None + if irradiance_by_hand is not None: + band_irradiance_mean = irradiance_by_hand.get(photo.band_name) + + band_vignette_map = None + if vignetting_info is not None: + band_vignette_map = vignetting_info.get(photo.band_name) + + if not photo.is_thermal(): + if image_type == 'reflectance': + return dn_to_reflectance(photo, image, band_irradiance_mean, band_vignette_map, use_sun_sensor) + else: + return dn_to_radiance(photo, image, band_vignette_map) + else: + return thermal.dn_to_temperature(photo, image, images_path) + def get_photos_by_band(multi_camera, user_band_name): band_name = get_primary_band_name(multi_camera, user_band_name) @@ -173,33 +211,31 @@ def get_photos_by_band(multi_camera, user_band_name): if band['name'] == band_name: return band['photos'] - def get_primary_band_name(multi_camera, user_band_name): if len(multi_camera) < 1: raise Exception("Invalid multi_camera list") - + # Pick RGB, or Green, or Blue, in this order, if available, otherwise first band if user_band_name == "auto": for aliases in [['rgb', 'redgreenblue'], ['green', 'g'], ['blue', 'b']]: for band in multi_camera: if band['name'].lower() in aliases: return band['name'] - + return multi_camera[0]['name'] for band in multi_camera: if band['name'].lower() == user_band_name.lower(): return band['name'] - + band_name_fallback = multi_camera[0]['name'] log.ODM_WARNING("Cannot find band name \"%s\", will use \"%s\" instead" % (user_band_name, band_name_fallback)) return band_name_fallback - def compute_band_maps(multi_camera, primary_band): """ - Computes maps of: + Computes maps of: - { photo filename --> associated primary band photo } (s2p) - { primary band filename --> list of associated secondary band photos } (p2s) by looking at capture UUID, capture time or filenames as a fallback @@ -210,7 +246,7 @@ def compute_band_maps(multi_camera, primary_band): if band['name'] == band_name: primary_band_photos = band['photos'] break - + # Try using capture time as the grouping factor try: unique_id_map = {} @@ -221,13 +257,13 @@ def compute_band_maps(multi_camera, primary_band): uuid = p.get_capture_id() if uuid is None: raise Exception("Cannot use capture time (no information in %s)" % p.filename) - + # Should be unique across primary band if unique_id_map.get(uuid) is not None: raise Exception("Unreliable UUID/capture time detected (duplicate)") unique_id_map[uuid] = p - + for band in multi_camera: photos = band['photos'] @@ -235,7 +271,7 @@ def compute_band_maps(multi_camera, primary_band): uuid = p.get_capture_id() if uuid is None: raise Exception("Cannot use UUID/capture time (no information in %s)" % p.filename) - + # Should match the primary band if unique_id_map.get(uuid) is None: raise Exception("Unreliable UUID/capture time detected (no primary band match)") @@ -245,6 +281,8 @@ def compute_band_maps(multi_camera, primary_band): if band['name'] != band_name: p2s.setdefault(unique_id_map[uuid].filename, []).append(p) + # log.ODM_INFO("File %s <-- Capture %s" % (p.filename, uuid)) + return s2p, p2s except Exception as e: # Fallback on filename conventions @@ -281,7 +319,27 @@ def compute_band_maps(multi_camera, primary_band): return s2p, p2s -def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p, p2s, max_concurrency=1, max_samples=30): +def compute_band_irradiances(multi_camera): + log.ODM_INFO("Computing band irradiance") + + band_irradiance_info = {} + for band in multi_camera: + irradiances = [] + band_irradiance_mean = 1.0 + for p in get_photos_by_band(multi_camera, band['name']): + hirradiance = p.get_horizontal_irradiance() + if hirradiance is not None: + irradiances.append(hirradiance) + if len(irradiances) > 0: + band_irradiance_mean = sum(irradiances) / len(irradiances) + + band_irradiance_info[band['name']] = band_irradiance_mean + log.ODM_INFO("%s band's mean irradiance: %s" % (band['name'], band_irradiance_mean)) + + return band_irradiance_info + +def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p, p2s, max_concurrency=1, max_samples=30, + irradiance_by_hand=None, use_sun_sensor=True, rig_optimization=False, use_local_homography=False): log.ODM_INFO("Computing band alignment") alignment_info = {} @@ -289,76 +347,155 @@ def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p # For each secondary band for band in multi_camera: if band['name'] != primary_band_name: - matrices = [] - - def parallel_compute_homography(p): + matrices_samples = [] + use_local_warp_matrix = use_local_homography + max_samples = max_samples if not use_local_warp_matrix and max_samples > 0 and max_samples < len(band['photos']) else len(band['photos']) + + # For MicaSense sensors, only apply use_local_warp_matrix and max_samples to the LWIR band + first_photo = band['photos'][0] + if first_photo is not None: + if first_photo.camera_make == 'MicaSense' and first_photo.band_name != 'LWIR': + use_local_warp_matrix = False + max_samples = 30 + + def parallel_compute_homography(photo): + filename = photo.filename try: - if len(matrices) >= max_samples: + # Caculate the best warp matrix using a few samples in favor of performance + if not use_local_warp_matrix and len(matrices_samples) >= max_samples: # log.ODM_INFO("Got enough samples for %s (%s)" % (band['name'], max_samples)) return # Find good matrix candidates for alignment - - primary_band_photo = s2p.get(p['filename']) + primary_band_photo = s2p.get(filename) if primary_band_photo is None: - log.ODM_WARNING("Cannot find primary band photo for %s" % p['filename']) + log.ODM_WARNING("Cannot find primary band photo for %s" % filename) return - warp_matrix, dimension, algo = compute_homography(os.path.join(images_path, p['filename']), - os.path.join(images_path, primary_band_photo.filename)) - + warp_matrix, dimension, algo = compute_homography(os.path.join(images_path, filename), + os.path.join(images_path, primary_band_photo.filename), + photo, + primary_band_photo, + images_path, + irradiance_by_hand, + use_sun_sensor, + rig_optimization) + if warp_matrix is not None: - log.ODM_INFO("%s --> %s good match" % (p['filename'], primary_band_photo.filename)) + log.ODM_INFO("%s --> %s good match" % (filename, primary_band_photo.filename)) - matrices.append({ + matrices_samples.append({ + 'filename': filename, + 'align_filename': primary_band_photo.filename, 'warp_matrix': warp_matrix, 'eigvals': np.linalg.eigvals(warp_matrix), 'dimension': dimension, 'algo': algo }) else: - log.ODM_INFO("%s --> %s cannot be matched" % (p['filename'], primary_band_photo.filename)) + log.ODM_INFO("%s --> %s cannot be matched" % (filename, primary_band_photo.filename)) except Exception as e: - log.ODM_WARNING("Failed to compute homography for %s: %s" % (p['filename'], str(e))) + log.ODM_WARNING("Failed to compute homography for %s: %s" % (filename, str(e))) - parallel_map(parallel_compute_homography, [{'filename': p.filename} for p in band['photos']], max_concurrency, single_thread_fallback=False) + parallel_map(parallel_compute_homography, band['photos'], max_concurrency, single_thread_fallback=False) - # Find the matrix that has the most common eigvals - # among all matrices. That should be the "best" alignment. - for m1 in matrices: - acc = np.array([0.0,0.0,0.0]) - e = m1['eigvals'] + if use_local_warp_matrix or max_samples > 100: + # Method 1 (faster): Find the matrix that has the most common eigvals among all matrices. That should be the "best" alignment. + for m1 in matrices_samples: + acc = np.array([0.0,0.0,0.0]) + e = m1['eigvals'] - for m2 in matrices: - acc += abs(e - m2['eigvals']) + for m2 in matrices_samples: + acc += abs(e - m2['eigvals']) + + m1['score'] = acc.sum() + + else: + # Method 2 (slower): Find the matrix that has the most common projections + for m1 in matrices_samples: + score = 0.0 + image_size = m1['warp_matrix'] + + for m2 in matrices_samples: + image_raw = imread(os.path.join(images_path, m2['filename']), unchanged=True, anydepth=True) + photo_raw = next((p for p in band['photos'] if p.filename == m2['filename']), None) + image_raw = radiometric_calibrate(photo_raw, image_raw, images_path, 'radiance', irradiance_by_hand, None, use_sun_sensor) + if image_raw.shape[2] == 3: + image_gray = to_8bit(cv2.cvtColor(image_raw, cv2.COLOR_BGR2GRAY)) + else: + image_gray = to_8bit(image_raw[:,:,0]) + + image_proj1 = align_image(image_gray, m1['warp_matrix'], m1['dimension']) + image_proj2 = align_image(image_gray, m2['warp_matrix'], m2['dimension']) + + margin = 0.1 # use 80% of image area to compare + h, w = image_proj1.shape + x1 = int(w * margin) + y1 = int(h * margin) + x2 = int(w * (1-margin)) + y2 = int(h * (1-margin)) + image_size = (x2-x1+1, y2-y1+1) + + image_proj1_samples = image_proj1[y1:y2, x1:x2] + image_proj2_samples = image_proj2[y1:y2, x1:x2] + diff = abs(np.subtract(image_proj1_samples, image_proj2_samples)) + score += np.sum(diff) / (w*h) + + # log.ODM_INFO("Warp matrix: %s (score: %s, sample pixels: %s x %s)" % (m1, score, image_size[0], image_size[1])) + m1['score'] = score - m1['score'] = acc.sum() - # Sort - matrices.sort(key=lambda x: x['score'], reverse=False) - - if len(matrices) > 0: - alignment_info[band['name']] = matrices[0] - log.ODM_INFO("%s band will be aligned using warp matrix %s (score: %s)" % (band['name'], matrices[0]['warp_matrix'], matrices[0]['score'])) + matrices_samples.sort(key=lambda x: x['score'], reverse=False) + + if len(matrices_samples) > 0: + best_candidate = matrices_samples[0] + + # Alignment matrices for all shots + matrices_all = [] + + for photo in [{'filename': p.filename} for p in band['photos']]: + primary_band_photo = s2p.get(photo['filename']) + local_warp_matrix = next((item for item in matrices_samples if item['filename'] == photo['filename']), None) # matrices_samples is a list + + if use_local_warp_matrix and local_warp_matrix is not None: + matrices_all.append(local_warp_matrix) + else: + matrices_all.append({ + 'filename': photo['filename'], + 'align_filename': primary_band_photo.filename, + 'warp_matrix': best_candidate['warp_matrix'], + 'eigvals': best_candidate['eigvals'], + 'dimension': best_candidate['dimension'], + 'algo': best_candidate['algo'] + }) + + alignment_info[band['name']] = matrices_all + + if use_local_warp_matrix: + log.ODM_INFO("%s band will be aligned using local warp matrices %s" % (band['name'], matrices_all)) + else: + log.ODM_INFO("%s band will be aligned using global warp matrix %s (score: %s)" % (band['name'], best_candidate['warp_matrix'], best_candidate['score'])) else: log.ODM_WARNING("Cannot find alignment matrix for band %s, The band might end up misaligned!" % band['name']) return alignment_info -def compute_homography(image_filename, align_image_filename): +def compute_homography(image_filename, align_image_filename, photo, align_photo, images_path, irradiance_by_hand=None, use_sun_sensor=True, rig_optimization=False): try: # Convert images to grayscale if needed image = imread(image_filename, unchanged=True, anydepth=True) + image = radiometric_calibrate(photo, image, images_path, 'radiance', irradiance_by_hand, None, use_sun_sensor) if image.shape[2] == 3: image_gray = to_8bit(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)) else: image_gray = to_8bit(image[:,:,0]) max_dim = max(image_gray.shape) - if max_dim <= 320: - log.ODM_WARNING("Small image for band alignment (%sx%s), this might be tough to compute." % (image_gray.shape[1], image_gray.shape[0])) + # if max_dim <= 320: + # log.ODM_WARNING("Small image for band alignment (%sx%s), this might be tough to compute." % (image_gray.shape[1], image_gray.shape[0])) align_image = imread(align_image_filename, unchanged=True, anydepth=True) + align_image = radiometric_calibrate(align_photo, align_image, images_path, 'radiance', irradiance_by_hand, None, use_sun_sensor) if align_image.shape[2] == 3: align_image_gray = to_8bit(cv2.cvtColor(align_image, cv2.COLOR_BGR2GRAY)) else: @@ -375,7 +512,7 @@ def compute_using(algorithm): return None, (None, None) det = np.linalg.det(h) - + # Check #1 homography's determinant will not be close to zero if abs(det) < 0.25: return None, (None, None) @@ -384,13 +521,13 @@ def compute_using(algorithm): svd = np.linalg.svd(h, compute_uv=False) if svd[-1] == 0: return None, (None, None) - + ratio = svd[0] / svd[-1] if ratio > 100000: return None, (None, None) return h, (align_image_gray.shape[1], align_image_gray.shape[0]) - + warp_matrix = None dimension = None algo = None @@ -398,21 +535,42 @@ def compute_using(algorithm): if max_dim > 320: algo = 'feat' result = compute_using(find_features_homography) - + if result[0] is None: algo = 'ecc' - log.ODM_INFO("Can't use features matching, will use ECC (this might take a bit)") + log.ODM_INFO("Can't use features matching for %s, will use ECC (this might take a bit)" % photo.filename) result = compute_using(find_ecc_homography) - if result[0] is None: - algo = None - else: # ECC only for low resolution images - algo = 'ecc' - log.ODM_INFO("Using ECC (this might take a bit)") - result = compute_using(find_ecc_homography) - if result[0] is None: - algo = None - + else: # for low resolution images + if photo.camera_make == 'MicaSense' and photo.band_name == 'LWIR': + algo = 'rig' + log.ODM_INFO("Using camera rig relatives to compute warp matrix for %s (rig relatives: %s)" % (photo.filename, str(photo.get_rig_relatives()))) + warp_matrix_intrinsic = find_rig_homography(photo, align_photo, image_gray, align_image_gray) + if rig_optimization: + log.ODM_INFO("Using ECC to optimize the rig relatives warp matrix for %s" % photo.filename) + warp_matrix_ecc = find_ecc_homography(image_gray, align_image_gray, warp_matrix_init=warp_matrix_intrinsic) + if warp_matrix_ecc is not None: + algo = 'rig+ecc' + warp_matrix_optimized = np.array(np.dot(warp_matrix_ecc, warp_matrix_intrinsic)) + warp_matrix_optimized /= warp_matrix_optimized[2,2] + else: + warp_matrix_optimized = warp_matrix_intrinsic + log.ODM_WARNING("Cannot compute ECC warp matrix for %s, use the rig relatives warp matrix instead" % photo.filename) + + else: + warp_matrix_optimized = warp_matrix_intrinsic + result = warp_matrix_optimized, (align_image_gray.shape[1], align_image_gray.shape[0]) + + else: + algo = 'ecc' + log.ODM_INFO("Using ECC for %s (this might take a bit)" % photo.filename) + result = compute_using(find_ecc_homography) + + if result[0] is None: + algo = None + + # log.ODM_INFO("Warp matrix for %s --> %s: \n%s (algorithm: %s)" % (photo.filename, align_photo.filename, str(result[0]), algo)) + warp_matrix, dimension = result return warp_matrix, dimension, algo @@ -420,7 +578,9 @@ def compute_using(algorithm): log.ODM_WARNING("Compute homography: %s" % str(e)) return None, (None, None), None -def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, termination_eps=1e-8, start_eps=1e-4): +def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=2000, termination_eps=1e-8, start_eps=1e-4, warp_matrix_init=None): + # Major props to Alexander Reynolds for his insight into the pyramided matching process found at + # https://stackoverflow.com/questions/45997891/cv2-motion-euclidean-for-the-warp-mode-in-ecc-image-alignment-method pyramid_levels = 0 h,w = image_gray.shape max_dim = max(h, w) @@ -437,34 +597,42 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, min_dim = min(h, w) + if (min_dim <= 300): + number_of_iterations = 5000 + termination_eps = 1e-7 + gaussian_filter_size = 9 # a constant since there is only one pyramid level + else: + gaussian_filter_size = 5 # will be increased in each pyramid level iteration + while min_dim > 300: min_dim /= 2.0 pyramid_levels += 1 - + log.ODM_INFO("Pyramid levels: %s" % pyramid_levels) - - # Quick check on size - if align_image_gray.shape[0] != image_gray.shape[0]: - align_image_gray = to_8bit(align_image_gray) - image_gray = to_8bit(image_gray) - fx = image_gray.shape[1]/align_image_gray.shape[1] - fy = image_gray.shape[0]/align_image_gray.shape[0] + fx = align_image_gray.shape[1] / image_gray.shape[1] + fy = align_image_gray.shape[0] / image_gray.shape[0] + if warp_matrix_init is not None: # initial rough alignment + image_gray = align_image(image_gray, warp_matrix_init, (align_image_gray.shape[1], align_image_gray.shape[0]), + flags=(cv2.INTER_LINEAR if (fx < 1.0 and fy < 1.0) else cv2.INTER_CUBIC)) + else: + if align_image_gray.shape[0] != image_gray.shape[0]: + image_gray = cv2.resize(image_gray, None, + fx=fx, + fy=fy, + interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4)) - align_image_gray = cv2.resize(align_image_gray, None, - fx=fx, - fy=fy, - interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4)) + # Define the motion model, scale the initial warp matrix to smallest level + default_matrix = np.eye(3, 3, dtype=np.float32) + warp_matrix = default_matrix * np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32)**(1-(pyramid_levels+1)) # Build pyramids image_gray_pyr = [image_gray] align_image_pyr = [align_image_gray] for level in range(pyramid_levels): - image_gray_pyr[0] = to_8bit(image_gray_pyr[0], force_normalize=True) image_gray_pyr.insert(0, cv2.resize(image_gray_pyr[0], None, fx=1/2, fy=1/2, interpolation=cv2.INTER_AREA)) - align_image_pyr[0] = to_8bit(align_image_pyr[0], force_normalize=True) align_image_pyr.insert(0, cv2.resize(align_image_pyr[0], None, fx=1/2, fy=1/2, interpolation=cv2.INTER_AREA)) @@ -472,29 +640,32 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, warp_matrix = np.eye(3, 3, dtype=np.float32) for level in range(pyramid_levels+1): - ig = gradient(gaussian(image_gray_pyr[level])) - aig = gradient(gaussian(align_image_pyr[level])) + ig = gradient(gaussian(normalize(image_gray_pyr[level]))) + aig = gradient(gaussian(normalize(align_image_pyr[level]))) if level == pyramid_levels and pyramid_levels == 0: eps = termination_eps else: eps = start_eps - ((start_eps - termination_eps) / (pyramid_levels)) * level - + # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, eps) try: - log.ODM_INFO("Computing ECC pyramid level %s" % level) - _, warp_matrix = cv2.findTransformECC(ig, aig, warp_matrix, cv2.MOTION_HOMOGRAPHY, criteria, inputMask=None, gaussFiltSize=9) + gaussian_filter_size = gaussian_filter_size + level * 2 + log.ODM_INFO("Computing ECC pyramid level %s using Gaussian filter size %s" % (level, gaussian_filter_size)) + _, warp_matrix = cv2.findTransformECC(ig, aig, warp_matrix, cv2.MOTION_HOMOGRAPHY, criteria, inputMask=None, gaussFiltSize=gaussian_filter_size) except Exception as e: if level != pyramid_levels: log.ODM_INFO("Could not compute ECC warp_matrix at pyramid level %s, resetting matrix" % level) warp_matrix = np.eye(3, 3, dtype=np.float32) else: - raise e + # raise e + return None + - if level != pyramid_levels: + if level != pyramid_levels: warp_matrix = warp_matrix * np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32) if downscale > 0: @@ -502,8 +673,7 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, else: return warp_matrix - -def find_features_homography(image_gray, align_image_gray, feature_retention=0.7, min_match_count=10): +def find_features_homography(image_gray, align_image_gray, feature_retention=0.8, min_match_count=4): # Detect SIFT features and compute descriptors. detector = cv2.SIFT_create(edgeThreshold=10, contrastThreshold=0.1) @@ -515,7 +685,7 @@ def find_features_homography(image_gray, align_image_gray, feature_retention=0.7 max_size = 4096 while max_dim / (2**downscale) > max_size: downscale += 1 - + if downscale > 0: f = 1 / (2**downscale) image_gray = cv2.resize(image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA) @@ -525,8 +695,8 @@ def find_features_homography(image_gray, align_image_gray, feature_retention=0.7 fx = image_gray.shape[1]/align_image_gray.shape[1] fy = image_gray.shape[0]/align_image_gray.shape[0] - align_image_gray = cv2.resize(align_image_gray, None, - fx=fx, + align_image_gray = cv2.resize(align_image_gray, None, + fx=fx, fy=fy, interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4)) @@ -558,6 +728,7 @@ def find_features_homography(image_gray, align_image_gray, feature_retention=0.7 # Debug # imMatches = cv2.drawMatches(im1, kp_image, im2, kp_align_image, matches, None) # cv2.imwrite("matches.jpg", imMatches) + # log.ODM_INFO("Good feature matches: %s" % len(matches)) # Extract location of good matches points_image = np.zeros((len(matches), 2), dtype=np.float32) @@ -571,12 +742,46 @@ def find_features_homography(image_gray, align_image_gray, feature_retention=0.7 h, _ = cv2.findHomography(points_image, points_align_image, cv2.RANSAC) if h is None: return None - + if downscale > 0: return h * (np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32) ** downscale) else: return h +def find_rig_homography(photo, align_photo, image_gray, align_image_gray): + image_undistorted_gray = photo.undistorted(image_gray) + align_image_undistorted_gray = align_photo.undistorted(align_image_gray) + + # compute homography matrices + M_ig = find_features_homography(image_gray, image_undistorted_gray) + M_aig = find_features_homography(align_image_gray, align_image_undistorted_gray) + M = photo.get_homography(align_photo) + + if M_ig is None: + M_ig = np.eye(3, 3, dtype=np.float32) + log.ODM_INFO("Cannot find feature homography between the raw image and undistorted image for %s, use identity matrix instead" % photo.filename) + + if M_aig is None: + M_aig = np.eye(3, 3, dtype=np.float32) + log.ODM_INFO("Cannot find feature homography between the raw image and undistorted image for %s, use identity matrix instead" % align_photo.filename) + + # log.ODM_INFO("%s --> %s transform matrices: M_src=%s, M_dst=%s, M_src_dst=%s" % (photo.filename, align_photo.filename, M_ig, M_aig, M)) + + warp_matrix = np.array(np.dot(np.linalg.inv(M_aig), np.dot(M, M_ig))) + warp_matrix /= warp_matrix[2,2] + return warp_matrix + +def normalize(im, min=None, max=None): + width, height = im.shape + norm = np.zeros((width, height), dtype=np.float32) + if min is not None and max is not None: + norm = (im - min) / (max-min) + else: + cv2.normalize(im, dst=norm, alpha=0.0, beta=1.0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) + norm[norm<0.0] = 0.0 + norm[norm>1.0] = 1.0 + return norm + def gradient(im, ksize=5): im = local_normalize(im) grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=ksize) @@ -585,22 +790,20 @@ def gradient(im, ksize=5): return grad def local_normalize(im): + norm = img_as_ubyte(normalize(im)) width, _ = im.shape disksize = int(width/5) if disksize % 2 == 0: disksize = disksize + 1 selem = disk(disksize) - im = rank.equalize(im, selem=selem) + im = rank.equalize(norm, selem=selem) return im - -def align_image(image, warp_matrix, dimension): - image = resize_match(image, dimension) - +def align_image(image, warp_matrix, dimension, flags=cv2.INTER_LINEAR): if warp_matrix.shape == (3, 3): - return cv2.warpPerspective(image, warp_matrix, dimension) + return cv2.warpPerspective(image, warp_matrix, dimension, flags=flags) else: - return cv2.warpAffine(image, warp_matrix, dimension) + return cv2.warpAffine(image, warp_matrix, dimension, flags=flags) def to_8bit(image, force_normalize=False): @@ -627,7 +830,7 @@ def to_8bit(image, force_normalize=False): return image - +# Need further review the use of this method def resize_match(image, dimension): h, w = image.shape[0], image.shape[1] mw, mh = dimension @@ -635,9 +838,30 @@ def resize_match(image, dimension): if w != mw or h != mh: fx = mw/w fy = mh/h - image = cv2.resize(image, None, - fx=fx, + image = cv2.resize(image, None, + fx=fx, fy=fx, interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4)) return image + +###################################################################################################################### +# Custom image band vignetting handler +###################################################################################################################### +def compute_band_vignette_map(multi_camera): + band_vignette_map = {} + + for band in multi_camera: + photos = get_photos_by_band(multi_camera, band['name']) + ref_photo = photos[0] + + if ref_photo.camera_make == "Parrot" and ref_photo.camera_model == "Sequoia": + log.ODM_INFO("Computing %s band vignetting coefficients" % band['name']) + vignetting_coefs = ref_photo.get_vignetting_coefficients_sequoia() + if vignetting_coefs is not None: + band_vignette_map[band['name']] = vignetting_coefs + log.ODM_INFO("%s band's vignetting coefficients: %s" % (band['name'], band_vignette_map.get(band['name']))) + + band_vignette_map[band['name']] = None + + return band_vignette_map diff --git a/opendm/orthophoto.py b/opendm/orthophoto.py index 319a504cb..3bd4a48bf 100644 --- a/opendm/orthophoto.py +++ b/opendm/orthophoto.py @@ -30,7 +30,7 @@ def get_orthophoto_vars(args): def build_overviews(orthophoto_file): log.ODM_INFO("Building Overviews") kwargs = {'orthophoto': orthophoto_file} - + # Run gdaladdo system.run('gdaladdo -r average ' '--config BIGTIFF_OVERVIEW IF_SAFER ' @@ -41,12 +41,13 @@ def generate_png(orthophoto_file, output_file=None, outsize=None): if output_file is None: base, ext = os.path.splitext(orthophoto_file) output_file = base + '.png' - + # See if we need to select top three bands bandparam = "" + scaleparam = "" gtif = gdal.Open(orthophoto_file) - if gtif.RasterCount > 4: + if gtif.RasterCount > 4: # multispectral bands = [] for idx in range(1, gtif.RasterCount+1): bands.append(gtif.GetRasterBand(idx).GetColorInterpretation()) @@ -57,25 +58,40 @@ def generate_png(orthophoto_file, output_file=None, outsize=None): green = bands.get(gdal.GCI_GreenBand) blue = bands.get(gdal.GCI_BlueBand) if red is None or green is None or blue is None: - raise Exception("Cannot find bands") + raise Exception("Cannot find RGB bands") - bandparam = "-b %s -b %s -b %s -a_nodata 0" % (red, green, blue) + alpha = bands.get(gdal.GCI_AlphaBand) + if alpha is not None: + bandparam = "-b %s -b %s -b %s -b %s -a_nodata 0" % (red, green, blue, alpha) + else: + bandparam = "-b %s -b %s -b %s -a_nodata 0" % (red, green, blue) except: bandparam = "-b 1 -b 2 -b 3 -a_nodata 0" + + scaleparam = "-scale 0 0.5 0 255" + + elif gtif.RasterCount > 0 and gtif.RasterCount <= 2: # graysacle + band1 = gtif.GetRasterBand(1) + band1.ComputeStatistics(0) + bandparam = "-a_nodata 0" + scaleparam = "-scale %s %s 0 255" % (band1.GetMinimum(), band1.GetMaximum()) + gtif = None osparam = "" if outsize is not None: - osparam = "-outsize %s 0" % outsize + osparam = "-outsize %s 0 -ot Byte" % outsize + else: + osparam = "-outsize %s%% %s%% -ot Byte" % (10, 10) - system.run('gdal_translate -of png "%s" "%s" %s %s ' - '--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, osparam, bandparam, get_max_memory())) + system.run('gdal_translate -of png "%s" "%s" %s %s %s ' + '--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, osparam, bandparam, scaleparam, get_max_memory())) def generate_kmz(orthophoto_file, output_file=None, outsize=None): if output_file is None: base, ext = os.path.splitext(orthophoto_file) output_file = base + '.kmz' - + # See if we need to select top three bands bandparam = "" gtif = gdal.Open(orthophoto_file) @@ -83,8 +99,8 @@ def generate_kmz(orthophoto_file, output_file=None, outsize=None): bandparam = "-b 1 -b 2 -b 3 -a_nodata 0" system.run('gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s ' - '--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory())) - + '--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory())) + def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution): if args.crop > 0 or args.boundary: Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha']) @@ -94,7 +110,7 @@ def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_ti if args.orthophoto_png: generate_png(orthophoto_file) - + if args.orthophoto_kmz: generate_kmz(orthophoto_file) @@ -108,7 +124,7 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance if not os.path.exists(input_raster): log.ODM_WARNING("Cannot mask raster, %s does not exist" % input_raster) return - + if not os.path.exists(vector_mask): log.ODM_WARNING("Cannot mask raster, %s does not exist" % vector_mask) return @@ -130,7 +146,7 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance max_coords_feature = feature if max_coords_feature is not None: burn_features = [max_coords_feature] - + shapes = [feature["geometry"] for feature in burn_features] out_image, out_transform = mask(rast, shapes, nodata=0) @@ -157,7 +173,7 @@ def feather_raster(input_raster, output_raster, blend_distance=20): return log.ODM_INFO("Computing feather raster: %s" % output_raster) - + with rasterio.open(input_raster, 'r') as rast: out_image = rast.read() if blend_distance > 0: @@ -302,7 +318,7 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}): blended = temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b] np.copyto(dstarr[b], blended, casting='unsafe', where=where) dstarr[-1][where] = 255.0 - + # check if dest has any nodata pixels available if np.count_nonzero(dstarr[-1]) == blocksize: break diff --git a/opendm/photo.py b/opendm/photo.py index 7549458fe..4d3e14d84 100644 --- a/opendm/photo.py +++ b/opendm/photo.py @@ -1,6 +1,9 @@ import logging import re import os +import base64 +import struct +import cv2 import math import exifread @@ -23,12 +26,12 @@ def find_mean_utc_time(photos): utc_times = [] - for p in photos: + for p in photos: if p.utc_time is not None: utc_times.append(p.utc_time / 1000.0) if len(utc_times) == 0: return None - + return np.mean(utc_times) @@ -43,7 +46,7 @@ def find_largest_photo_dims(photos): if mp > max_mp: max_mp = mp max_dims = (p.width, p.height) - + return max_dims def find_largest_photo_dim(photos): @@ -52,7 +55,7 @@ def find_largest_photo_dim(photos): if p.width is None: continue max_dim = max(max_dim, max(p.width, p.height)) - + return max_dim def find_largest_photo(photos): @@ -104,7 +107,7 @@ class ODM_Photo: def __init__(self, path_file): self.filename = os.path.basename(path_file) self.mask = None - + # Standard tags (virtually all photos have these) self.width = None self.height = None @@ -120,6 +123,7 @@ def __init__(self, path_file): # Multi-band fields self.band_name = 'RGB' self.band_index = 0 + self.rig_relatives = None self.capture_uuid = None # Multi-spectral fields @@ -135,10 +139,28 @@ def __init__(self, path_file): self.bits_per_sample = None self.vignetting_center = None self.vignetting_polynomial = None + self.vignetting_polynomial_power = None + self.vignetting_polynomial_coefficient = None + self.vignetting_coefficients = None + self.distortion_parameters = None + self.principal_point = None + self.focal_plane_resolution_x = None + self.focal_plane_resolution_y = None + self.utc_time = None + + # DLS + self.sun_sensor = None + self.dls_yaw = None + self.dls_pitch = None + self.dls_roll = None + + # Irradiance calibration + self.sensor_model_coefficient = None self.spectral_irradiance = None self.horizontal_irradiance = None self.irradiance_scale_to_si = None - self.utc_time = None + self.irradiance_list = None + self.irradiance_calibration = None # OPK angles self.yaw = None @@ -148,12 +170,6 @@ def __init__(self, path_file): self.phi = None self.kappa = None - # DLS - self.sun_sensor = None - self.dls_yaw = None - self.dls_pitch = None - self.dls_roll = None - # Aircraft speed self.speed_x = None self.speed_y = None @@ -173,13 +189,14 @@ def __init__(self, path_file): # Misc SFM self.camera_projection = 'brown' self.focal_ratio = 0.85 + self.focal_length = None # parse values from metadata self.parse_exif_values(path_file) def __str__(self): return '{} | camera: {} {} | dimensions: {} x {} | lat: {} | lon: {} | alt: {} | band: {} ({})'.format( - self.filename, self.camera_make, self.camera_model, self.width, self.height, + self.filename, self.camera_make, self.camera_model, self.width, self.height, self.latitude, self.longitude, self.altitude, self.band_name, self.band_index) def set_mask(self, mask): @@ -190,7 +207,7 @@ def update_with_geo_entry(self, geo_entry): self.longitude = geo_entry.x self.altitude = geo_entry.z if geo_entry.yaw is not None and geo_entry.pitch is not None and geo_entry.roll is not None: - self.yaw = geo_entry.yaw + self.yaw = geo_entry.yaw self.pitch = geo_entry.pitch self.roll = geo_entry.roll self.dls_yaw = geo_entry.yaw @@ -260,14 +277,14 @@ def parse_exif_values(self, _path_file): if 'EXIF FNumber' in tags: self.fnumber = self.float_value(tags['EXIF FNumber']) - + if 'EXIF ISOSpeed' in tags: self.iso_speed = self.int_value(tags['EXIF ISOSpeed']) elif 'EXIF PhotographicSensitivity' in tags: self.iso_speed = self.int_value(tags['EXIF PhotographicSensitivity']) elif 'EXIF ISOSpeedRatings' in tags: self.iso_speed = self.int_value(tags['EXIF ISOSpeedRatings']) - + if 'Image BitsPerSample' in tags: self.bits_per_sample = self.int_value(tags['Image BitsPerSample']) @@ -288,7 +305,7 @@ def parse_exif_values(self, _path_file): timezone = pytz.timezone('UTC') epoch = timezone.localize(datetime.utcfromtimestamp(0)) self.utc_time = (timezone.localize(utc_time) - epoch).total_seconds() * 1000.0 - + if 'MakerNote SpeedX' in tags and \ 'MakerNote SpeedY' in tags and \ 'MakerNote SpeedZ' in tags: @@ -300,7 +317,16 @@ def parse_exif_values(self, _path_file): 'EXIF ExifImageLength' in tags: self.exif_width = self.int_value(tags['EXIF ExifImageWidth']) self.exif_height = self.int_value(tags['EXIF ExifImageLength']) - + + if 'EXIF FocalPlaneXResolution' in tags: + self.focal_plane_resolution_x = self.float_value(tags['EXIF FocalPlaneXResolution']) + + if 'EXIF FocalPlaneYResolution' in tags: + self.focal_plane_resolution_y = self.float_value(tags['EXIF FocalPlaneYResolution']) + + if 'EXIF FocalLength' in tags: + self.focal_length = self.float_value(tags['EXIF FocalLength']) + except Exception as e: log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (self.filename, str(e))) @@ -320,12 +346,20 @@ def parse_exif_values(self, _path_file): if band_name is not None: self.band_name = band_name.replace(" ", "") + self.set_attr_from_xmp_tag('sensor_model_coefficient', xtags, [ + 'Camera:SensorModel', + ]) + self.set_attr_from_xmp_tag('band_index', xtags, [ 'DLS:SensorId', # Micasense RedEdge '@Camera:RigCameraIndex', # Parrot Sequoia, Sentera 21244-00_3.2MP-GS-0001 'Camera:RigCameraIndex', # MicaSense Altum ]) + self.set_attr_from_xmp_tag('rig_relatives', xtags, [ + 'Camera:RigRelatives' # MicaSense Altum and RedEdge-MX + ]) + self.set_attr_from_xmp_tag('radiometric_calibration', xtags, [ 'MicaSense:RadiometricCalibration', ]) @@ -339,9 +373,30 @@ def parse_exif_values(self, _path_file): 'Camera:VignettingPolynomial', 'Sentera:VignettingPolynomial', ]) - + + self.set_attr_from_xmp_tag('vignetting_polynomial_power', xtags, [ + 'Camera:VignettingPolynomial2DName' # Parrot Sequoia + ]) + + self.set_attr_from_xmp_tag('vignetting_polynomial_coefficient', xtags, [ + 'Camera:VignettingPolynomial2D' # Parrot Sequoia + ]) + + self.set_attr_from_xmp_tag('principal_point', xtags, [ + 'Camera:PrincipalPoint' # Micasense Altum and RedEdge-MX + ]) + + self.set_attr_from_xmp_tag('distortion_parameters', xtags, [ + 'Camera:PerspectiveDistortion' # Micasense Altum and RedEdge-MX + ]) + + self.set_attr_from_xmp_tag('focal_length', xtags, [ + 'Camera:PerspectiveFocalLength' # Micasense Altum and RedEdge-MX + ], float) + self.set_attr_from_xmp_tag('horizontal_irradiance', xtags, [ - 'Camera:HorizontalIrradiance' + 'Camera:HorizontalIrradiance', + 'DLS:HorizontalIrradiance' # Micasense DLS2 ], float) self.set_attr_from_xmp_tag('irradiance_scale_to_si', xtags, [ @@ -352,9 +407,18 @@ def parse_exif_values(self, _path_file): 'Camera:SunSensor', ], float) + self.set_attr_from_xmp_tag('irradiance_calibration', xtags, [ + '@Camera:IrradianceCalibrationMeasurement', # Parrot Sequoia + ]) + + self.set_attr_from_xmp_tag('irradiance_list', xtags, [ + 'Camera:IrradianceList', # Parrot Sequoia + ]) + self.set_attr_from_xmp_tag('spectral_irradiance', xtags, [ 'Camera:SpectralIrradiance', 'Camera:Irradiance', + 'DLS:SpectralIrradiance' # Micasense DLS2 ], float) self.set_attr_from_xmp_tag('capture_uuid', xtags, [ @@ -398,7 +462,7 @@ def parse_exif_values(self, _path_file): y = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdLon')) x = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdLat')) self.gps_xy_stddev = max(x, y) - + if '@drone-dji:RtkStdHgt' in xtags: self.gps_z_stddev = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdHgt')) else: @@ -410,7 +474,7 @@ def parse_exif_values(self, _path_file): '@Camera:GPSZAccuracy', 'GPSZAccuracy' ], float) - + # DJI Speed tags if '@drone-dji:FlightXSpeed' in xtags and \ '@drone-dji:FlightYSpeed' in xtags and \ @@ -435,7 +499,7 @@ def parse_exif_values(self, _path_file): '@drone-dji:ExposureTime' ], float) self.exposure_time /= 1e6 # is in microseconds - + # Account for over-estimation if self.gps_xy_stddev is not None: self.gps_xy_stddev *= 2.0 @@ -446,7 +510,7 @@ def parse_exif_values(self, _path_file): self.set_attr_from_xmp_tag('dls_yaw', xtags, ['DLS:Yaw'], float) self.set_attr_from_xmp_tag('dls_pitch', xtags, ['DLS:Pitch'], float) self.set_attr_from_xmp_tag('dls_roll', xtags, ['DLS:Roll'], float) - + camera_projection = self.get_xmp_tag(xtags, ['@Camera:ModelType', 'Camera:ModelType']) if camera_projection is not None: camera_projection = camera_projection.lower() @@ -460,9 +524,9 @@ def parse_exif_values(self, _path_file): self.camera_projection = camera_projection # OPK - self.set_attr_from_xmp_tag('yaw', xtags, ['@drone-dji:FlightYawDegree', '@Camera:Yaw', 'Camera:Yaw'], float) - self.set_attr_from_xmp_tag('pitch', xtags, ['@drone-dji:GimbalPitchDegree', '@Camera:Pitch', 'Camera:Pitch'], float) - self.set_attr_from_xmp_tag('roll', xtags, ['@drone-dji:GimbalRollDegree', '@Camera:Roll', 'Camera:Roll'], float) + self.set_attr_from_xmp_tag('yaw', xtags, ['@drone-dji:FlightYawDegree', '@Camera:Yaw', 'Camera:Yaw', 'Camera:IrradianceYaw'], float) + self.set_attr_from_xmp_tag('pitch', xtags, ['@drone-dji:GimbalPitchDegree', '@Camera:Pitch', 'Camera:Pitch', 'Camera:IrradiancePitch'], float) + self.set_attr_from_xmp_tag('roll', xtags, ['@drone-dji:GimbalRollDegree', '@Camera:Roll', 'Camera:Roll', 'Camera:IrradianceRoll'], float) # Normalize YPR conventions (assuming nadir camera) # Yaw: 0 --> top of image points north @@ -474,7 +538,7 @@ def parse_exif_values(self, _path_file): if self.has_ypr(): if self.camera_make.lower() in ['dji', 'hasselblad']: self.pitch = 90 + self.pitch - + if self.camera_make.lower() == 'sensefly': self.roll *= -1 @@ -488,7 +552,7 @@ def parse_exif_values(self, _path_file): # self.set_attr_from_xmp_tag('bandwidth', xtags, [ # 'Camera:WavelengthFWHM' # ], float) - + # Special case band handling for AeroVironment Quantix images # for some reason, they don't store band information in EXIFs if self.camera_make.lower() == 'aerovironment' and \ @@ -512,7 +576,10 @@ def parse_exif_values(self, _path_file): def compute_focal(self, tags, xtags): try: - self.focal_ratio = self.extract_focal(self.camera_make, self.camera_model, tags, xtags) + _focal_ratio, _focal_length = self.extract_focal(self.camera_make, self.camera_model, tags, xtags) + self.focal_ratio = _focal_ratio + if self.focal_length is None: + self.focal_length = _focal_length except (IndexError, ValueError) as e: log.ODM_WARNING("Cannot extract focal ratio for %s: %s" % (self.filename, str(e))) @@ -520,7 +587,7 @@ def extract_focal(self, make, model, tags, xtags): if make != "unknown": # remove duplicate 'make' information in 'model' model = model.replace(make, "") - + sensor_string = (make.strip() + " " + model.strip()).strip().lower() sensor_width = None @@ -531,7 +598,7 @@ def extract_focal(self, make, model, tags, xtags): pixels_per_unit = self.float_value(tags["EXIF FocalPlaneXResolution"]) if pixels_per_unit <= 0 and "EXIF FocalPlaneYResolution" in tags: pixels_per_unit = self.float_value(tags["EXIF FocalPlaneYResolution"]) - + if pixels_per_unit > 0 and self.width is not None: units_per_pixel = 1 / pixels_per_unit sensor_width = self.width * units_per_pixel * mm_per_unit @@ -558,7 +625,7 @@ def extract_focal(self, make, model, tags, xtags): else: focal_ratio = 0.85 - return focal_ratio + return focal_ratio, focal def set_attr_from_xmp_tag(self, attr, xmp_tags, tags, cast=None): v = self.get_xmp_tag(xmp_tags, tags) @@ -570,11 +637,11 @@ def set_attr_from_xmp_tag(self, attr, xmp_tags, tags, cast=None): if (cast == float or cast == int) and "/" in v: v = self.try_parse_fraction(v) setattr(self, attr, cast(v)) - + def get_xmp_tag(self, xmp_tags, tags): if isinstance(tags, str): tags = [tags] - + for tag in tags: if tag in xmp_tags: t = xmp_tags[tag] @@ -590,7 +657,7 @@ def get_xmp_tag(self, xmp_tags, tags): elif isinstance(t, int) or isinstance(t, float): return t - + # From https://github.com/mapillary/OpenSfM/blob/master/opensfm/exif.py def get_xmp(self, file): img_bytes = file.read() @@ -619,7 +686,7 @@ def get_xmp(self, file): def dms_to_decimal(self, dms, sign): """Converts dms coords to decimal degrees""" degrees, minutes, seconds = self.float_values(dms) - + if degrees is not None and minutes is not None and seconds is not None: return (-1 if sign.values[0] in 'SWsw' else 1) * ( degrees + @@ -674,17 +741,26 @@ def try_parse_fraction(self, val): return num / den if den != 0 else val except ValueError: pass - return val + return val + + def get_size(self): + return self.width, self.height def get_radiometric_calibration(self): + if self.camera_make == "Parrot" and self.camera_model == "Sequoia": + return self.get_radiometric_calibration_sequoia() + if isinstance(self.radiometric_calibration, str): parts = self.radiometric_calibration.split(" ") if len(parts) == 3: return list(map(float, parts)) return [None, None, None] - + def get_dark_level(self): + if self.camera_make == "Parrot" and self.camera_model == "Sequoia": + return self.get_dark_level_sequoia() + if self.black_level: levels = np.array([float(v) for v in self.black_level.split(" ")]) return levels.mean() @@ -715,6 +791,25 @@ def get_vignetting_polynomial(self): return coeffs + def get_principal_point(self): + if self.principal_point is not None: + parts = self.principal_point.split(",") + if len(parts) > 0: + return list(map(float, parts)) + return None + + def get_distortion_parameters(self): + if self.distortion_parameters is not None: + parts = self.distortion_parameters.split(" ") + if len(parts) > 0: + return list(map(float, parts)) + return None + + def get_focal_plane_resolution(self): + if self.focal_plane_resolution_x is not None and self.focal_plane_resolution_y is not None: + return self.focal_plane_resolution_x, self.focal_plane_resolution_y + return None + def get_utc_time(self): if self.utc_time: return datetime.fromtimestamp(self.utc_time / 1000, timezone.utc) @@ -725,29 +820,43 @@ def get_photometric_exposure(self): return self.exposure_time / (self.fnumber * self.fnumber) def get_horizontal_irradiance(self): + scale = self.get_irradiance_scale() if self.horizontal_irradiance is not None: - scale = 1.0 # Assumed - if self.irradiance_scale_to_si is not None: - scale = self.irradiance_scale_to_si - return self.horizontal_irradiance * scale - elif self.camera_make == "DJI" and self.spectral_irradiance is not None: - # Phantom 4 Multispectral saves this value in @drone-dji:Irradiance - return self.spectral_irradiance - + elif self.spectral_irradiance is not None: + return self.spectral_irradiance * scale + else: + return None + def get_sun_sensor(self): + if self.camera_make == "Parrot" and self.camera_model == "Sequoia": + return self.get_sun_sensor_sequoia() + if self.sun_sensor is not None: # TODO: Presence of XMP:SunSensorExposureTime # and XMP:SunSensorSensitivity might - # require additional logic. If these two tags are present, + # require additional logic. If these two tags are present, # then sun_sensor is not in physical units? - return self.sun_sensor / 65535.0 # normalize uint16 (is this correct?) + return self.sun_sensor / 65535.0 # normalize uint16 (assumed) elif self.spectral_irradiance is not None: - scale = 1.0 # Assumed - if self.irradiance_scale_to_si is not None: - scale = self.irradiance_scale_to_si - - return self.spectral_irradiance * scale + return self.spectral_irradiance * self.get_irradiance_scale() + else: + return None + + def get_irradiance_scale(self): + scale = 1.0 + if self.irradiance_scale_to_si is not None: + scale = self.irradiance_scale_to_si + elif self.camera_make == 'MicaSense': + # In the case of Micasense DLS2 missing the scale metadata, + # multiply the SpectralIrradiance and HorizontalIrradiance by 0.01 to get to W/m2/nm. + # See https://github.com/micasense/imageprocessing/issues/47 + scale = 0.01 + elif self.camera_make == 'DJI': + # Phantom 4 Multispectral saves irridiance in @drone-dji:Irradiance and Camera:Irradiance + scale = 1.0 / math.pi + + return scale def get_dls_pose(self): if self.dls_yaw is not None: @@ -765,6 +874,13 @@ def get_bit_depth_max(self): return None + def get_rig_relatives(self): + if self.rig_relatives is not None: + parts = self.rig_relatives.split(",") + if len(parts) > 0: + return list(map(float, parts)) + return None + def get_capture_id(self): # Use capture UUID first, capture time as fallback if self.capture_uuid is not None: @@ -798,7 +914,7 @@ def is_thermal(self): if(self.camera_make == "DJI" and self.camera_model == "ZH20T" and self.width == 640 and self.height == 512): return True return self.band_name.upper() in ["LWIR"] # TODO: more? - + def is_rgb(self): return self.band_name.upper() in ["RGB", "REDGREENBLUE"] @@ -819,7 +935,7 @@ def to_opensfm_exif(self, rolling_shutter = False, rolling_shutter_readout = 0): capture_time = 0.0 if self.utc_time is not None: capture_time = self.utc_time / 1000.0 - + gps = {} has_gps = self.latitude is not None and self.longitude is not None if has_gps: @@ -833,7 +949,7 @@ def to_opensfm_exif(self, rolling_shutter = False, rolling_shutter_readout = 0): dop = self.get_gps_dop() if dop is None: dop = 10.0 # Default - + gps['dop'] = dop d = { @@ -855,26 +971,26 @@ def to_opensfm_exif(self, rolling_shutter = False, rolling_shutter_readout = 0): 'phi': self.phi, 'kappa': self.kappa } - + # Speed is not useful without GPS if self.has_speed() and has_gps: d['speed'] = [self.speed_y, self.speed_x, self.speed_z] - + if rolling_shutter: d['rolling_shutter'] = get_rolling_shutter_readout(self, rolling_shutter_readout) - + return d def has_ypr(self): return self.yaw is not None and \ self.pitch is not None and \ self.roll is not None - + def has_opk(self): return self.omega is not None and \ self.phi is not None and \ self.kappa is not None - + def has_speed(self): return self.speed_x is not None and \ self.speed_y is not None and \ @@ -883,14 +999,14 @@ def has_speed(self): def has_geo(self): return self.latitude is not None and \ self.longitude is not None - + def compute_opk(self): if self.has_ypr() and self.has_geo(): y, p, r = math.radians(self.yaw), math.radians(self.pitch), math.radians(self.roll) - # Ref: New Calibration and Computing Method for Direct - # Georeferencing of Image and Scanner Data Using the - # Position and Angular Data of an Hybrid Inertial Navigation System + # Ref: New Calibration and Computing Method for Direct + # Georeferencing of Image and Scanner Data Using the + # Position and Angular Data of an Hybrid Inertial Navigation System # by Manfred Bäumker # YPR rotation matrix @@ -909,19 +1025,19 @@ def compute_opk(self): cbb = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]]) - + delta = 1e-7 - + alt = self.altitude if self.altitude is not None else 0.0 p1 = np.array(ecef_from_lla(self.latitude + delta, self.longitude, alt)) p2 = np.array(ecef_from_lla(self.latitude - delta, self.longitude, alt)) xnp = p1 - p2 m = np.linalg.norm(xnp) - + if m == 0: log.ODM_WARNING("Cannot compute OPK angles, divider = 0") return - + # Unit vector pointing north xnp /= m @@ -948,6 +1064,187 @@ def get_capture_megapixels(self): return self.width * self.height / 1e6 else: return 0.0 - + def is_make_model(self, make, model): return self.camera_make.lower() == make.lower() and self.camera_model.lower() == model.lower() + + ###################################################################################################################### + # Adapting MicaSense image processing script for thermal band alignment + # - https://github.com/micasense/imageprocessing/blob/236cd23978116fd4cdfc79e4cd1cdc68e1b004d8/micasense/imageutils.py + # - https://github.com/micasense/imageprocessing/blob/236cd23978116fd4cdfc79e4cd1cdc68e1b004d8/micasense/image.py + ###################################################################################################################### + def principal_point_px(self): + principal_point = self.get_principal_point() + focal_plane_resolution = self.get_focal_plane_resolution() + center_x = principal_point[0] * focal_plane_resolution[0] + center_y = principal_point[1] * focal_plane_resolution[1] + return (center_x, center_y) + + def cv2_camera_matrix(self): + center_x, center_y = self.principal_point_px() + focal_plane_resolution = self.get_focal_plane_resolution() + + # set up camera matrix for cv2 + cam_mat = np.zeros((3, 3)) + cam_mat[0, 0] = self.focal_length * focal_plane_resolution[0] + cam_mat[1, 1] = self.focal_length * focal_plane_resolution[1] + cam_mat[2, 2] = 1.0 + cam_mat[0, 2] = center_x + cam_mat[1, 2] = center_y + + return cam_mat + + def cv2_distortion_coeff(self): + return np.array(self.get_distortion_parameters())[[0, 1, 3, 4, 2]] + + def get_homography(self, ref, R=None, T=None, undistorted=True): + ''' get the homography that maps from this image to the reference image ''' + + # convert euler angles to a rotation matrix + def rotations_degrees_to_rotation_matrix(rotation_degrees): + cx = np.cos(np.deg2rad(rotation_degrees[0])) + cy = np.cos(np.deg2rad(rotation_degrees[1])) + cz = np.cos(np.deg2rad(rotation_degrees[2])) + sx = np.sin(np.deg2rad(rotation_degrees[0])) + sy = np.sin(np.deg2rad(rotation_degrees[1])) + sz = np.sin(np.deg2rad(rotation_degrees[2])) + + Rx = np.mat([ 1, 0, 0, + 0, cx,-sx, + 0, sx, cx]).reshape(3,3) + Ry = np.mat([ cy, 0, sy, + 0, 1, 0, + -sy, 0, cy]).reshape(3,3) + Rz = np.mat([ cz,-sz, 0, + sz, cz, 0, + 0, 0, 1]).reshape(3,3) + R = Rx*Ry*Rz + return R + + if R is None: + R = rotations_degrees_to_rotation_matrix(self.get_rig_relatives()) + if T is None: + T =np.zeros(3) + R_ref = rotations_degrees_to_rotation_matrix(ref.get_rig_relatives()) + A = np.zeros((4,4)) + A[0:3,0:3]=np.dot(R_ref.T,R) + A[0:3,3]=T + A[3,3]=1. + + C, _ = cv2.getOptimalNewCameraMatrix(self.cv2_camera_matrix(), + self.cv2_distortion_coeff() if undistorted else None, + self.get_size(), alpha=1) + Cr, _ = cv2.getOptimalNewCameraMatrix(ref.cv2_camera_matrix(), + ref.cv2_distortion_coeff() if undistorted else None, + ref.get_size(), alpha=1) + CC = np.zeros((4,4)) + CC[0:3,0:3] = C + CC[3,3]=1. + CCr = np.zeros((4,4)) + CCr[0:3,0:3] = Cr + CCr[3,3]=1. + + B = np.array(np.dot(CCr,np.dot(A,np.linalg.inv(CC)))) + B[:,2]=B[:,2]-B[:,3] + B = B[0:3,0:3] + B = B/B[2,2] + return np.array(B) + + def undistorted(self, image): + ''' return the undistorted image from input image ''' + new_cam_mat, _ = cv2.getOptimalNewCameraMatrix(self.cv2_camera_matrix(), + self.cv2_distortion_coeff(), + self.get_size(), + alpha=1) + map1, map2 = cv2.initUndistortRectifyMap(self.cv2_camera_matrix(), + self.cv2_distortion_coeff(), + np.eye(3), + new_cam_mat, + self.get_size(), + cv2.CV_32F) + # compute the undistorted 16 bit image + return cv2.remap(image, map1, map2, cv2.INTER_LINEAR) + + ###################################################################################################################### + # Parrot Sequoia image handler + # - https://github.com/dobedobedo/Parrot_Sequoia_Image_Handler + # - https://github.com/OpenDroneMap/ODM/commit/70b2913ec0377e72591288af0500de7c153b3656 + ###################################################################################################################### + + def get_dark_level_sequoia(self): + if self.sensor_model_coefficient is None: # Exif SensorModel is missing + return None + + sensor_coefs = np.array([float(v.strip(".")) for v in self.sensor_model_coefficient.split(",")]) + return sensor_coefs[1] + + def get_radiometric_calibration_sequoia(self): + if self.sensor_model_coefficient is None: # Exif SensorModel is missing + return [None, None, None] + + gain_scale = 100 * self.get_gain() * self.exposure_time + sensor_coefs = np.array([float(v.strip(".")) for v in self.sensor_model_coefficient.split(",")]) + a1 = gain_scale * math.pow(self.fnumber, 2) / ((sensor_coefs[0] * self.exposure_time * self.iso_speed + sensor_coefs[2]) / 65536.0) + return [a1, None, None] + + def get_sun_sensor_sequoia(self): + if self.irradiance_calibration is None: # Exif IrradianceCalibrationMeasurement is missing + return None + + irrad_cal_pm = np.array([float(v) for v in self.irradiance_calibration.split(",")]) + # GainIndex IntegTime CH0 CH1 + # irrad_cal_pm[0]=0 irrad_cal_pm[1] irrad_cal_pm[2] irrad_cal_pm[3] + # irrad_cal_pm[4]=1 irrad_cal_pm[5] irrad_cal_pm[6] irrad_cal_pm[7] + # irrad_cal_pm[8]=2 irrad_cal_pm[9] irrad_cal_pm[10] irrad_cal_pm[11] + # irrad_cal_pm[12]=3 irrad_cal_pm[13] irrad_cal_pm[14] irrad_cal_pm[15] + irrad_cal_0 = irrad_cal_pm[2] * (600.0 / irrad_cal_pm[1]) + irrad_cal_1 = irrad_cal_pm[6] * (600.0 / irrad_cal_pm[5]) + irrad_cal_2 = irrad_cal_pm[10] * (600.0 / irrad_cal_pm[9]) + irrad_cal_3 = irrad_cal_pm[14] * (600.0 / irrad_cal_pm[13]) + irrad_cal_fac = [irrad_cal_0, irrad_cal_1, irrad_cal_2, irrad_cal_3] + + irrad_binary = base64.b64decode(self.irradiance_list) + + irrad_last3 = None + irrad_last2 = None + irrad_last1 = None + + for irrad_pm in struct.iter_unpack("QHHHHfff", irrad_binary): + # Q=uint64 H=uint16 f=float32 + # irrad_pm[0]=TimeStamp irrad_pm[1]=CH0 count irrad_pm[2]=CH1 count + # irrad_pm[3]=GainIndex irrad_pm[4]=IntegTime + # irrad_pm[5]=Yaw irrad_pm[6]=Pitch irrad_pm[7]=Roll + # + irrad_fac = irrad_cal_fac[1]/irrad_cal_fac[irrad_pm[3]] * 600.0/irrad_pm[4] + irrad_last3 = irrad_last2 + irrad_last2 = irrad_last1 + irrad_last1 = irrad_pm[1] * irrad_fac + + if irrad_last3 is None: # No. of records less than 3 + return None + else: + irrad_avg = (irrad_last1 + irrad_last2 + irrad_last3) / 3.0 + return irrad_avg / irrad_cal_1 + + def get_vignetting_coefficients_sequoia(self): + if self.vignetting_polynomial_power is None or self.vignetting_polynomial_coefficient is None: + return None + + # Create powers and coefficients tuple list + powers = self.vignetting_polynomial_power.split(',') + coefficients = self.vignetting_polynomial_coefficient.split(',') + powers_coefficients = list() + for i in range(0, len(powers), 2): + powers_coefficients.append((int(powers[i]), int(powers[i+1]), float(coefficients[int(i/2)]))) + + # Compute vignetting coefficients + rows = self.height + cols = self.width + vig_array = np.ones((rows, cols)) + power_array = np.array(powers_coefficients) + for y, x in [(y, x) for y in range(rows) for x in range(cols)]: + vig_array[y, x] = (power_array[:, 2] * np.power(x/cols, power_array[:, 0]) * np.power(y/rows, power_array[:, 1])).sum() + + self.vignetting_coefficients = 1.0 / vig_array + + return 1.0 / vig_array diff --git a/opendm/point_cloud.py b/opendm/point_cloud.py index 79b7083d1..1d88c8c92 100644 --- a/opendm/point_cloud.py +++ b/opendm/point_cloud.py @@ -37,7 +37,7 @@ def ply_info(input_ply): i += 1 if i > 100: raise IOError("Cannot find end_header field. Invalid PLY?") - + return { 'has_normals': has_normals, @@ -62,7 +62,7 @@ def split(input_point_cloud, outdir, filename_template, capacity, dims=None): sys.exit(1) cmd = 'pdal split -i "%s" -o "%s" --capacity %s ' % (input_point_cloud, os.path.join(outdir, filename_template), capacity) - + if filename_template.endswith(".ply"): cmd += ("--writers.ply.sized_types=false " "--writers.ply.storage_mode=\"little endian\" ") @@ -91,12 +91,13 @@ def filter(input_point_cloud, output_point_cloud, output_stats, standard_deviati log.ODM_INFO("Sampling points around a %sm radius" % sample_radius) args.append('--radius %s' % sample_radius) - meank = 16 - log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation)) - args.append('--meank %s' % meank) - args.append('--std %s' % standard_deviation) - args.append('--stats "%s"' % output_stats) - + if standard_deviation > 0: + meank = 16 + log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation)) + args.append('--meank %s' % meank) + args.append('--std %s' % standard_deviation) + args.append('--stats "%s"' % output_stats) + if boundary is not None: log.ODM_INFO("Boundary {}".format(boundary)) fd, boundary_json_file = tempfile.mkstemp(suffix='.boundary.json') @@ -118,7 +119,7 @@ def fallback(): if not os.path.isfile(stats_file): return fallback() - + with open(stats_file, 'r') as f: j = json.loads(f.read()) if "spacing" in j: @@ -140,7 +141,7 @@ def export_summary_json(pointcloud_path, summary_file_path): def get_extent(input_point_cloud): fd, json_file = tempfile.mkstemp(suffix='.json') os.close(fd) - + # Get point cloud extent fallback = False @@ -159,7 +160,7 @@ def get_extent(input_point_cloud): bounds = {} with open(json_file, 'r') as f: result = json.loads(f.read()) - + if not fallback: summary = result.get('summary') if summary is None: raise Exception("Cannot compute summary for %s (summary key missing)" % input_point_cloud) @@ -174,7 +175,7 @@ def get_extent(input_point_cloud): bounds = native.get('bbox') if bounds is None: raise Exception("Cannot compute bounds for %s (bounds key missing)" % input_point_cloud) - + if bounds.get('maxx', None) is None or \ bounds.get('minx', None) is None or \ bounds.get('maxy', None) is None or \ @@ -182,7 +183,7 @@ def get_extent(input_point_cloud): bounds.get('maxz', None) is None or \ bounds.get('minz', None) is None: raise Exception("Cannot compute bounds for %s (invalid keys) %s" % (input_point_cloud, str(bounds))) - + os.remove(json_file) return bounds @@ -203,7 +204,7 @@ def merge(input_point_cloud_files, output_file, rerun=False): } system.run('lasmerge -i {all_inputs} -o "{output}"'.format(**kwargs)) - + def fast_merge_ply(input_point_cloud_files, output_file): # Assumes that all input files share the same header/content format @@ -213,11 +214,11 @@ def fast_merge_ply(input_point_cloud_files, output_file): if num_files == 0: log.ODM_WARNING("No input point cloud files to process") return - + if io.file_exists(output_file): log.ODM_WARNING("Removing previous point cloud: %s" % output_file) os.remove(output_file) - + vertex_count = sum([ply_info(pcf)['vertex_count'] for pcf in input_point_cloud_files]) master_file = input_point_cloud_files[0] with open(output_file, "wb") as out: @@ -229,7 +230,7 @@ def fast_merge_ply(input_point_cloud_files, output_file): i = 0 while line.strip().lower() != "end_header": line = fhead.readline() - + # Intercept element vertex field if line.lower().startswith("element vertex "): out.write(("element vertex %s\n" % vertex_count).encode('utf8')) @@ -239,7 +240,7 @@ def fast_merge_ply(input_point_cloud_files, output_file): i += 1 if i > 100: raise IOError("Cannot find end_header field. Invalid PLY?") - + for ipc in input_point_cloud_files: i = 0 with open(ipc, "rb") as fin: @@ -251,10 +252,10 @@ def fast_merge_ply(input_point_cloud_files, output_file): i += 1 if i > 100: raise IOError("Cannot find end_header field. Invalid PLY?") - + # Write fields out.write(fin.read()) - + return output_file @@ -281,16 +282,17 @@ def post_point_cloud_steps(args, tree, rerun=False): pc_classify_marker = os.path.join(tree.odm_georeferencing, 'pc_classify_done.txt') if not io.file_exists(pc_classify_marker) or rerun: - log.ODM_INFO("Classifying {} using Simple Morphological Filter (1/2)".format(tree.odm_georeferencing_model_laz)) + log.ODM_INFO("Classifying {} using morphological filter".format(tree.odm_georeferencing_model_laz)) commands.classify(tree.odm_georeferencing_model_laz, - args.smrf_scalar, - args.smrf_slope, - args.smrf_threshold, - args.smrf_window + args.smrf_scalar, + args.smrf_slope, + args.smrf_threshold, + args.smrf_window, + filter=args.pc_ground_filter ) - log.ODM_INFO("Classifying {} using OpenPointClass (2/2)".format(tree.odm_georeferencing_model_laz)) - classify(tree.odm_georeferencing_model_laz, args.max_concurrency) + # log.ODM_INFO("Classifying {} using OpenPointClass".format(tree.odm_georeferencing_model_laz)) + # classify(tree.odm_georeferencing_model_laz, args.max_concurrency) with open(pc_classify_marker, 'w') as f: f.write('Classify: smrf\n') @@ -298,14 +300,14 @@ def post_point_cloud_steps(args, tree, rerun=False): f.write('Slope: {}\n'.format(args.smrf_slope)) f.write('Threshold: {}\n'.format(args.smrf_threshold)) f.write('Window: {}\n'.format(args.smrf_window)) - + if args.pc_rectify: - commands.rectify(tree.odm_georeferencing_model_laz) + commands.rectify(tree.odm_georeferencing_model_laz, reclassify_threshold=args.smrf_threshold*1.5) # 50% more than the input SMRF elevation threshold # XYZ point cloud output if args.pc_csv: log.ODM_INFO("Creating CSV file (XYZ format)") - + if not io.file_exists(tree.odm_georeferencing_xyz_file) or rerun: system.run("pdal translate -i \"{}\" " "-o \"{}\" " @@ -320,7 +322,7 @@ def post_point_cloud_steps(args, tree, rerun=False): # LAS point cloud output if args.pc_las: log.ODM_INFO("Creating LAS file") - + if not io.file_exists(tree.odm_georeferencing_model_las) or rerun: system.run("pdal translate -i \"{}\" " "-o \"{}\" ".format( diff --git a/opendm/thermal.py b/opendm/thermal.py index f0ad20991..6e0c83817 100644 --- a/opendm/thermal.py +++ b/opendm/thermal.py @@ -15,8 +15,8 @@ def resize_to_match(image, match_photo = None): if match_photo is not None: h, w, _ = image.shape if w != match_photo.width or h != match_photo.height: - image = cv2.resize(image, None, - fx=match_photo.width/w, + image = cv2.resize(image, None, + fx=match_photo.width/w, fy=match_photo.height/h, interpolation=cv2.INTER_LANCZOS4) return image @@ -64,7 +64,6 @@ def dn_to_temperature(photo, image, images_path): image = image.astype("float32") return image else: - image = image.astype("float32") + # image = image.astype("float32") log.ODM_WARNING("Tried to radiometrically calibrate a non-thermal image with temperature values (%s)" % photo.filename) return image - diff --git a/opendm/types.py b/opendm/types.py index cc5889125..c36794314 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -28,7 +28,7 @@ def __init__(self, photos): self.gcp = None self.multi_camera = self.detect_multi_camera() self.filter_photos() - + def detect_multi_camera(self): """ Looks at the reconstruction photos and determines if this @@ -44,9 +44,9 @@ def detect_multi_camera(self): band_indexes[p.band_name] = str(p.band_index) band_photos[p.band_name].append(p) - + bands_count = len(band_photos) - + # Band name with the minimum number of photos max_band_name = None max_photos = -1 @@ -59,17 +59,17 @@ def detect_multi_camera(self): # Validate that all bands have the same number of images, # otherwise this is not a multi-camera setup img_per_band = len(band_photos[max_band_name]) - + mc = [] for band_name in band_indexes: mc.append({'name': band_name, 'photos': band_photos[band_name]}) - + filter_missing = False for band in band_photos: if len(band_photos[band]) < img_per_band: log.ODM_WARNING("Multi-camera setup detected, but band \"%s\" (identified from \"%s\") has only %s images (instead of %s), perhaps images are missing or are corrupted." % (band, band_photos[band][0].filename, len(band_photos[band]), len(band_photos[max_band_name]))) filter_missing = True - + if filter_missing: # Calculate files to ignore _, p2s = multispectral.compute_band_maps(mc, max_band_name) @@ -88,7 +88,7 @@ def detect_multi_camera(self): self.photos = [p for p in self.photos if p != photo] for i in range(len(mc)): mc[i]['photos'] = [p for p in mc[i]['photos'] if p != photo] - + log.ODM_INFO("New image count: %s" % len(self.photos)) # We enforce a normalized band order for all bands that we can identify @@ -97,22 +97,23 @@ def detect_multi_camera(self): 'RGB': '0', 'REDGREENBLUE': '0', - 'RED': '1', - 'R': '1', + 'BLUE': '1', + 'B': '1', 'GREEN': '2', 'G': '2', - 'BLUE': '3', - 'B': '3', + 'RED': '3', + 'R': '3', - 'NIR': '4', - 'N': '4', + 'REDEDGE': '4', + 'RE': '4', - 'REDEDGE': '5', - 'RE': '5', + 'NIR': '5', + 'N': '5', 'PANCHRO': '6', + 'P': '6', 'LWIR': '7', 'L': '7', @@ -127,7 +128,7 @@ def detect_multi_camera(self): for c, d in enumerate(mc): log.ODM_INFO(f"Band {c + 1}: {d['name']}") - + return mc return None @@ -135,7 +136,7 @@ def detect_multi_camera(self): def filter_photos(self): if not self.multi_camera: return # Nothing to do, use all images - + else: # Sometimes people might try process both RGB + Blue/Red/Green bands # because these are the contents of the SD card from a drone (e.g. DJI P4 Multispectral) @@ -143,18 +144,18 @@ def filter_photos(self): bands = {} for b in self.multi_camera: bands[b['name'].lower()] = b['name'] - + bands_to_remove = [] if 'rgb' in bands or 'redgreenblue' in bands: if 'red' in bands and 'green' in bands and 'blue' in bands: bands_to_remove.append(bands['rgb'] if 'rgb' in bands else bands['redgreenblue']) - + # Mavic 3M's RGB camera lens are too different than the multispectral ones # so we drop the RGB channel instead elif self.photos[0].is_make_model("DJI", "M3M") and 'red' in bands and 'green' in bands: bands_to_remove.append(bands['rgb'] if 'rgb' in bands else bands['redgreenblue']) - + else: for b in ['red', 'green', 'blue']: if b in bands: @@ -176,13 +177,13 @@ def is_georeferenced(self): def has_gcp(self): return self.is_georeferenced() and self.gcp is not None and self.gcp.exists() - + def has_geotagged_photos(self): for photo in self.photos: if photo.latitude is None and photo.longitude is None: return False - return True + return True def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, output_model_txt_geo, rerun=False): if not io.file_exists(output_coords_file) or not io.file_exists(output_gcp_file) or rerun: @@ -190,25 +191,25 @@ def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, o if gcp.exists(): if gcp.entries_count() == 0: raise RuntimeError("This GCP file does not have any entries. Are the entries entered in the proper format?") - + gcp.check_entries() # Convert GCP file to a UTM projection since the rest of the pipeline # does not handle other SRS well. rejected_entries = [] utm_gcp = GCPFile(gcp.create_utm_copy(output_gcp_file, filenames=[p.filename for p in self.photos], rejected_entries=rejected_entries, include_extras=True)) - + if not utm_gcp.exists(): raise RuntimeError("Could not project GCP file to UTM. Please double check your GCP file for mistakes.") - + for re in rejected_entries: log.ODM_WARNING("GCP line ignored (image not found): %s" % str(re)) - + if utm_gcp.entries_count() > 0: log.ODM_INFO("%s GCP points will be used for georeferencing" % utm_gcp.entries_count()) else: raise RuntimeError("A GCP file was provided, but no valid GCP entries could be used. Note that the GCP file is case sensitive (\".JPG\" is not the same as \".jpg\").") - + self.gcp = utm_gcp # Compute RTC offsets from GCP points @@ -223,7 +224,7 @@ def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, o f.write(coords_header + "\n") f.write("{} {}\n".format(x_off, y_off)) log.ODM_INFO("Generated coords file from GCP: %s" % coords_header) - + # Deprecated: This is mostly for backward compatibility and should be # be removed at some point shutil.copyfile(output_coords_file, output_model_txt_geo) @@ -235,7 +236,7 @@ def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, o log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file) log.ODM_INFO("GCP file already exist: %s" % output_gcp_file) self.gcp = GCPFile(output_gcp_file) - + self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file) return self.georef @@ -245,7 +246,7 @@ def georeference_with_gps(self, images_path, output_coords_file, output_model_tx location.extract_utm_coords(self.photos, images_path, output_coords_file) else: log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file) - + # Deprecated: This is mostly for backward compatibility and should be # be removed at some point if not io.file_exists(output_model_txt_geo) or rerun: @@ -255,7 +256,7 @@ def georeference_with_gps(self, images_path, output_coords_file, output_model_tx w.write(f.readline()) # Offset else: log.ODM_INFO("Model geo file already exist: %s" % output_model_txt_geo) - + self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file) except: log.ODM_WARNING('Could not generate coordinates file. The orthophoto will not be georeferenced.') @@ -264,7 +265,7 @@ def georeference_with_gps(self, images_path, output_coords_file, output_model_tx return self.georef def save_proj_srs(self, file): - # Save proj to file for future use (unless this + # Save proj to file for future use (unless this # dataset is not georeferenced) if self.is_georeferenced(): with open(file, 'w') as f: @@ -273,7 +274,7 @@ def save_proj_srs(self, file): def get_proj_srs(self): if self.is_georeferenced(): return self.georef.proj4() - + def get_proj_offset(self): if self.is_georeferenced(): return (self.georef.utm_east_offset, self.georef.utm_north_offset) @@ -284,7 +285,7 @@ def get_photo(self, filename): for p in self.photos: if p.filename == filename: return p - + class ODM_GeoRef(object): @staticmethod def FromCoordsFile(coords_file): @@ -318,10 +319,10 @@ def __init__(self, srs, utm_east_offset, utm_north_offset): def proj4(self): return self.srs.to_proj4() - + def utm_offset(self): return (self.utm_east_offset, self.utm_north_offset) - + class ODM_Tree(object): def __init__(self, root_path, gcp_file = None, geo_file = None, align_file = None): # root path to the project @@ -362,6 +363,7 @@ def __init__(self, root_path, gcp_file = None, geo_file = None, align_file = Non # filter points self.filtered_point_cloud = os.path.join(self.odm_filterpoints, "point_cloud.ply") + self.filtered_point_cloud_classified = os.path.join(self.odm_filterpoints, "point_cloud_classified.ply") self.filtered_point_cloud_stats = os.path.join(self.odm_filterpoints, "point_cloud_stats.json") # odm_meshing @@ -381,7 +383,7 @@ def __init__(self, root_path, gcp_file = None, geo_file = None, align_file = Non self.odm_georeferencing_gcp_utm = os.path.join(self.odm_georeferencing, 'gcp_list_utm.txt') self.odm_geo_file = geo_file or io.find('geo.txt', self.root_path) self.odm_align_file = align_file or io.find('align.laz', self.root_path) or io.find('align.las', self.root_path) or io.find('align.tif', self.root_path) - + self.odm_georeferencing_proj = 'proj.txt' self.odm_georeferencing_model_txt_geo = os.path.join( self.odm_georeferencing, 'odm_georeferencing_model_geo.txt') @@ -405,7 +407,7 @@ def __init__(self, root_path, gcp_file = None, geo_file = None, align_file = Non # tiles self.orthophoto_tiles = os.path.join(self.root_path, "orthophoto_tiles") - # Split-merge + # Split-merge self.submodels_path = os.path.join(self.root_path, 'submodels') # Tiles @@ -439,13 +441,13 @@ def rerun(self): return (self.args.rerun is not None and self.args.rerun == self.name) or \ (self.args.rerun_all) or \ (self.args.rerun_from is not None and self.name in self.args.rerun_from) - + def run(self, outputs = {}): start_time = system.now_raw() log.logger.log_json_stage_run(self.name, start_time) log.ODM_INFO('Running %s stage' % self.name) - + self.process(self.args, outputs) # The tree variable should always be populated at this point @@ -474,7 +476,7 @@ def delta_progress(self): return max(0.0, self.progress - self.prev_stage.progress) else: return max(0.0, self.progress) - + def previous_stages_progress(self): if self.prev_stage: return max(0.0, self.prev_stage.progress) @@ -486,7 +488,7 @@ def update_progress_end(self): def update_progress(self, progress): progress = max(0.0, min(100.0, progress)) - progressbc.send_update(self.previous_stages_progress() + + progressbc.send_update(self.previous_stages_progress() + (self.delta_progress() / 100.0) * float(progress)) def last_stage(self): @@ -494,7 +496,7 @@ def last_stage(self): return self.next_stage.last_stage() else: return self - + def process(self, args, outputs): raise NotImplementedError diff --git a/opendm/utils.py b/opendm/utils.py index 71d7ca3de..075694ac6 100644 --- a/opendm/utils.py +++ b/opendm/utils.py @@ -14,6 +14,10 @@ def default(self, obj): def get_depthmap_resolution(args, photos): + if 'depthmap_resolution_is_set' in args: + # Override pc-quality + return int(args.depthmap_resolution) + max_dims = find_largest_photo_dims(photos) min_dim = 320 # Never go lower than this @@ -24,17 +28,17 @@ def get_depthmap_resolution(args, photos): megapixels = (w * h) / 1e6 multiplier = 1 - if megapixels < 6: - multiplier = 2 - elif megapixels > 42: + if megapixels > 6: multiplier = 0.5 + elif megapixels > 42: + multiplier = 0.25 pc_quality_scale = { - 'ultra': 0.5, - 'high': 0.25, - 'medium': 0.125, - 'low': 0.0675, - 'lowest': 0.03375 + 'ultra': 1.0, + 'high': 0.5, + 'medium': 0.25, + 'low': 0.125, + 'lowest': 0.0625 } return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier)) diff --git a/requirements.txt b/requirements.txt index 844668f15..d310b3080 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,7 +33,7 @@ fpdf2==2.4.6 Shapely==1.7.1 onnxruntime==1.12.1 pygltflib==1.15.3 -codem==0.24.0 trimesh==3.17.1 pandas==1.5.2 piexif==1.1.3 +https://github.com/AutoModality/ODM/raw/develop/opendm/dem/codem/codem-0.25.3.dev0-py3-none-any.whl diff --git a/stages/mvstex.py b/stages/mvstex.py index 6b4b64b12..91a70610e 100644 --- a/stages/mvstex.py +++ b/stages/mvstex.py @@ -36,6 +36,7 @@ def add_run(nvm_file, primary=True, band=None): 'model': tree.odm_mesh, 'nadir': False, 'primary': primary, + 'band': band, 'nvm_file': nvm_file, 'labeling_file': os.path.join(tree.odm_texturing, "odm_textured_model_geo_labeling.vec") if subdir else None }] @@ -46,6 +47,7 @@ def add_run(nvm_file, primary=True, band=None): 'model': tree.odm_25dmesh, 'nadir': True, 'primary': primary, + 'band': band, 'nvm_file': nvm_file, 'labeling_file': os.path.join(tree.odm_25dtexturing, "odm_textured_model_geo_labeling.vec") if subdir else None }] @@ -80,6 +82,8 @@ def add_run(nvm_file, primary=True, band=None): os.unlink(unaligned_obj) # Format arguments to fit Mvs-Texturing app + dataTerm = args.texturing_data_term + outlierRemovalType = args.texturing_outlier_removal_type skipGlobalSeamLeveling = "" keepUnseenFaces = "" nadir = "" @@ -91,13 +95,21 @@ def add_run(nvm_file, primary=True, band=None): if (r['nadir']): nadir = '--nadir_mode' + # thermal band + if (r['band'] == 'lwir'): + dataTerm = args.thermal_texturing_data_term + outlierRemovalType = args.thermal_texturing_outlier_removal_type + skipGlobalSeamLeveling = "" + skipLocalSeamLeveling = "" + keepUnseenFaces = "" + # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': os.path.join(r['out_dir'], "odm_textured_model_geo"), 'model': r['model'], - 'dataTerm': 'gmi', - 'outlierRemovalType': 'gauss_clamping', + 'dataTerm': dataTerm, + 'outlierRemovalType': outlierRemovalType, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': 'none', diff --git a/stages/odm_dem.py b/stages/odm_dem.py index 6964e036d..2868fe63a 100755 --- a/stages/odm_dem.py +++ b/stages/odm_dem.py @@ -12,6 +12,7 @@ from opendm import pseudogeo from opendm.tiles.tiler import generate_dem_tiles from opendm.cogeo import convert_to_cogeo +from opendm.photo import find_largest_photo_dims class ODMDEMStage(types.ODM_Stage): def process(self, args, outputs): @@ -22,14 +23,31 @@ def process(self, args, outputs): pc_model_found = io.file_exists(dem_input) ignore_resolution = False pseudo_georeference = False - + if not reconstruction.is_georeferenced(): log.ODM_WARNING("Not georeferenced, using ungeoreferenced point cloud...") ignore_resolution = True pseudo_georeference = True - resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction, - gsd_scaling=1.0, + # calculate GSD scaling based on point cloud density + pc_quality_scale = { + 'ultra': 1.0, + 'high': 2.0, + 'medium': 4.0, + 'low': 8.0, + 'lowest': 16.0 + } + gsd_scaling = pc_quality_scale[args.pc_quality] + if 'depthmap_resolution_is_set' in args: + max_dims = find_largest_photo_dims(reconstruction.photos) + if max_dims is not None: + w, h = max_dims + max_dim = max(w, h) + gsd_scaling = max_dim / args.depthmap_resolution + + resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction, + gsd_error_estimate=0, + gsd_scaling=gsd_scaling, ignore_gsd=args.ignore_gsd, ignore_resolution=ignore_resolution and args.ignore_gsd, has_gcp=reconstruction.has_gcp()) @@ -66,14 +84,16 @@ def process(self, args, outputs): commands.create_dem( dem_input, product, - output_type='idw' if product == 'dtm' else 'max', + output_type=args.dtm_interpolation if product == 'dtm' else args.dsm_interpolation, radiuses=list(map(str, radius_steps)), + power=1, gapfill=args.dem_gapfill_steps > 0, outdir=odm_dem_root, resolution=resolution / 100.0, decimation=args.dem_decimation, max_workers=args.max_concurrency, with_euclidean_map=args.dem_euclidean_map, + apply_smoothing=True, max_tiles=None if reconstruction.has_geotagged_photos() else math.ceil(len(reconstruction.photos) / 2) ) @@ -89,7 +109,7 @@ def process(self, args, outputs): if args.tiles: generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency, resolution) - + if args.cog: convert_to_cogeo(dem_geotiff_path, max_workers=args.max_concurrency) diff --git a/stages/odm_georeferencing.py b/stages/odm_georeferencing.py index de16c374b..bfcb03a5d 100644 --- a/stages/odm_georeferencing.py +++ b/stages/odm_georeferencing.py @@ -107,7 +107,7 @@ def process(self, args, outputs): with open(gcp_geojson_export_file, 'w') as f: f.write(json.dumps(geojson, indent=4)) - + with zipfile.ZipFile(gcp_geojson_zip_export_file, 'w', compression=zipfile.ZIP_LZMA) as f: f.write(gcp_geojson_export_file, arcname=os.path.basename(gcp_geojson_export_file)) @@ -278,6 +278,3 @@ def transform_textured_model(obj): if args.optimize_disk_space and io.file_exists(tree.odm_georeferencing_model_laz) and io.file_exists(tree.filtered_point_cloud): os.remove(tree.filtered_point_cloud) - - - diff --git a/stages/odm_meshing.py b/stages/odm_meshing.py index 6f5f603e6..42096c5e1 100644 --- a/stages/odm_meshing.py +++ b/stages/odm_meshing.py @@ -8,6 +8,8 @@ from opendm import gsd from opendm import types from opendm.dem import commands +from opendm.dem import pdal +from opendm.photo import find_largest_photo_dims class ODMeshingStage(types.ODM_Stage): def process(self, args, outputs): @@ -19,49 +21,83 @@ def process(self, args, outputs): # Create full 3D model unless --skip-3dmodel is set if not args.skip_3dmodel: - if not io.file_exists(tree.odm_mesh) or self.rerun(): - log.ODM_INFO('Writing ODM Mesh file in: %s' % tree.odm_mesh) + if not io.file_exists(tree.odm_mesh) or self.rerun(): + log.ODM_INFO('Writing ODM Mesh file in: %s' % tree.odm_mesh) + + mesh.screened_poisson_reconstruction(tree.filtered_point_cloud, + tree.odm_mesh, + depth=self.params.get('oct_tree'), + samples=self.params.get('samples'), + maxVertexCount=self.params.get('max_vertex'), + pointWeight=self.params.get('point_weight'), + threads=max(1, self.params.get('max_concurrency') - 1)), # poissonrecon can get stuck on some machines if --threads == all cores + else: + log.ODM_WARNING('Found a valid ODM Mesh file in: %s' % + tree.odm_mesh) - mesh.screened_poisson_reconstruction(tree.filtered_point_cloud, - tree.odm_mesh, - depth=self.params.get('oct_tree'), - samples=self.params.get('samples'), - maxVertexCount=self.params.get('max_vertex'), - pointWeight=self.params.get('point_weight'), - threads=max(1, self.params.get('max_concurrency') - 1)), # poissonrecon can get stuck on some machines if --threads == all cores - else: - log.ODM_WARNING('Found a valid ODM Mesh file in: %s' % - tree.odm_mesh) - self.update_progress(50) - # Always generate a 2.5D mesh + # Always generate a 2.5D mesh for texturing and orthophoto projection # unless --use-3dmesh is set. if not args.use_3dmesh: - if not io.file_exists(tree.odm_25dmesh) or self.rerun(): + if not io.file_exists(tree.odm_25dmesh) or self.rerun(): - log.ODM_INFO('Writing ODM 2.5D Mesh file in: %s' % tree.odm_25dmesh) + log.ODM_INFO('Writing ODM 2.5D Mesh file in: %s' % tree.odm_25dmesh) - multiplier = math.pi / 2.0 - radius_steps = commands.get_dem_radius_steps(tree.filtered_point_cloud_stats, 3, args.orthophoto_resolution, multiplier=multiplier) - dsm_resolution = radius_steps[0] / multiplier + # calculate GSD scaling based on point cloud density + pc_quality_scale = { + 'ultra': 1.0, + 'high': 2.0, + 'medium': 4.0, + 'low': 8.0, + 'lowest': 16.0 + } + gsd_scaling = pc_quality_scale[args.pc_quality] + if 'depthmap_resolution_is_set' in args: + max_dims = find_largest_photo_dims(reconstruction.photos) + if max_dims is not None: + w, h = max_dims + max_dim = max(w, h) + gsd_scaling = max_dim / args.depthmap_resolution - log.ODM_INFO('ODM 2.5D DSM resolution: %s' % dsm_resolution) - - if args.fast_orthophoto: - dsm_resolution *= 8.0 + dem_resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction, + gsd_error_estimate=0, + gsd_scaling=gsd_scaling, + ignore_gsd=args.ignore_gsd, + ignore_resolution=(not reconstruction.is_georeferenced()) and args.ignore_gsd, + has_gcp=reconstruction.has_gcp()) / 100.0 + if args.fast_orthophoto: + dem_resolution *= 2.0 - mesh.create_25dmesh(tree.filtered_point_cloud, tree.odm_25dmesh, - radius_steps, - dsm_resolution=dsm_resolution, - depth=self.params.get('oct_tree'), - maxVertexCount=self.params.get('max_vertex'), - samples=self.params.get('samples'), - available_cores=args.max_concurrency, - method='poisson' if args.fast_orthophoto else 'gridded', - smooth_dsm=True, - max_tiles=None if reconstruction.has_geotagged_photos() else math.ceil(len(reconstruction.photos) / 2)) - else: - log.ODM_WARNING('Found a valid ODM 2.5D Mesh file in: %s' % - tree.odm_25dmesh) + radius_steps = [str(dem_resolution * math.sqrt(2)), str(dem_resolution * 2)] + + log.ODM_INFO('ODM 2.5D DEM resolution: %s' % dem_resolution) + + dem_input = tree.filtered_point_cloud + if args.texturing_use_dtm: + pdal.run_pdal_translate(tree.filtered_point_cloud, + tree.filtered_point_cloud_classified, + args.smrf_scalar, + args.smrf_slope, + args.smrf_threshold, + args.smrf_window, + filter=args.pc_ground_filter) + dem_input = tree.filtered_point_cloud_classified + + mesh.create_25dmesh(dem_input, tree.odm_25dmesh, + radius_steps=radius_steps, + dsm_resolution=dem_resolution, + depth=self.params.get('oct_tree'), + maxVertexCount=self.params.get('max_vertex'), + samples=self.params.get('samples'), + available_cores=args.max_concurrency, + method='poisson' if args.fast_orthophoto else 'gridded', + smooth_dsm=True, + use_dtm=args.texturing_use_dtm, + max_tiles=None if reconstruction.has_geotagged_photos() else math.ceil(len(reconstruction.photos) / 2)) + if io.file_exists(tree.filtered_point_cloud_classified): + os.remove(tree.filtered_point_cloud_classified) + else: + log.ODM_WARNING('Found a valid ODM 2.5D Mesh file in: %s' % + tree.odm_25dmesh) diff --git a/stages/odm_postprocess.py b/stages/odm_postprocess.py index 1f1da8da3..34098d79f 100644 --- a/stages/odm_postprocess.py +++ b/stages/odm_postprocess.py @@ -1,12 +1,14 @@ import os +import numpy as np import rasterio from datetime import datetime -from osgeo import gdal +from osgeo import gdal, gdal_array from opendm import io from opendm import log from opendm import types from opendm import photo +from opendm.cogeo import convert_to_cogeo from opendm.utils import copy_paths, get_processing_results_paths from opendm.ogctiles import build_3dtiles @@ -19,14 +21,15 @@ def process(self, args, outputs): rasters = [tree.odm_orthophoto_tif, tree.path("odm_dem", "dsm.tif"), - tree.path("odm_dem", "dtm.tif")] + tree.path("odm_dem", "dtm.tif"), + tree.path("odm_dem", "ndsm.tif")] + # Add TIFF tags mean_capture_time = photo.find_mean_utc_time(reconstruction.photos) mean_capture_dt = None if mean_capture_time is not None: mean_capture_dt = datetime.fromtimestamp(mean_capture_time).strftime('%Y:%m:%d %H:%M:%S') + '+00:00' - # Add TIFF tags for product in rasters: if os.path.isfile(product): log.ODM_INFO("Adding TIFFTAGs to {}".format(product)) @@ -65,12 +68,61 @@ def process(self, args, outputs): else: log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % product) + # Generate normalized DSM if both DSM and DTM exist + dsm = tree.path("odm_dem", "dsm.tif") + dtm = tree.path("odm_dem", "dtm.tif") + ndsm = tree.path("odm_dem", "ndsm.tif") + if os.path.isfile(dsm) and os.path.isfile(dtm): + try: + log.ODM_INFO("Generating normalized DSM: %s" % ndsm) + dsm_ds = gdal.Open(dsm) + dsm_band = dsm_ds.GetRasterBand(1) + dsm_array = np.ma.masked_equal(dsm_band.ReadAsArray(), dsm_band.GetNoDataValue()) + + dtm_ds = gdal.Open(dtm) + dtm_band = dtm_ds.GetRasterBand(1) + dtm_array = np.ma.masked_equal(dtm_band.ReadAsArray(), dtm_band.GetNoDataValue()) + + # Check and align the shapes of both arrays + if dsm_array.shape[0] < dtm_array.shape[0]: + dtm_array = dtm_array[0:dsm_array.shape[0], :] + elif dsm_array.shape[0] > dtm_array.shape[0]: + dsm_array = dsm_array[0:dtm_array.shape[0], :] + + if dsm_array.shape[1] < dtm_array.shape[1]: + dtm_array = dtm_array[:, 0:dsm_array.shape[1]] + elif dsm_array.shape[1] > dtm_array.shape[1]: + dsm_array = dsm_array[:, 0:dtm_array.shape[1]] + + # nDSM = DSM - DTM + ndsm_data = dsm_array - dtm_array + ndsm_ds = gdal_array.SaveArray(ndsm_data, ndsm, "GTIFF", dsm_ds) + + # set same nodata value as DSM + no_data = dsm_band.GetNoDataValue() + ndsm_ds.GetRasterBand(1).SetNoDataValue(no_data) + + # close the tiff files + dsm_ds = None + dtm_ds = None + ndsm_ds = None + + if os.path.isfile(ndsm): + log.ODM_INFO("Generating normalized DSM finished.") + convert_to_cogeo(ndsm) + else: + log.ODM_WARNING("Generating normalized DSM failed.") + + except Exception as e: + log.ODM_WARNING("Cannot generate normalized DSM. %s" % str(e)) + + # Generate OGC 3D tiles outputs if getattr(args, '3d_tiles'): build_3dtiles(args, tree, reconstruction, self.rerun()) + # Copy output results to a designated folder if args.copy_to: try: copy_paths([os.path.join(args.project_path, p) for p in get_processing_results_paths()], args.copy_to, self.rerun()) except Exception as e: log.ODM_WARNING("Cannot copy to %s: %s" % (args.copy_to, str(e))) - diff --git a/stages/openmvs.py b/stages/openmvs.py index b02c42f4c..19cd73dfb 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -37,7 +37,7 @@ def process(self, args, outputs): octx.run(cmd) else: log.ODM_WARNING("Found existing %s" % openmvs_scene_file) - + self.update_progress(10) depthmaps_dir = os.path.join(tree.openmvs, "depthmaps") @@ -57,22 +57,24 @@ def process(self, args, outputs): resolution_level = int(round(math.log(outputs['undist_image_max_size'] / float(depthmap_resolution)) / math.log(2))) log.ODM_INFO("Running dense reconstruction. This might take a while.") - + log.ODM_INFO("Estimating depthmaps") - number_views_fuse = 2 + number_views = args.min_num_views - 1 # The number of views that OpenMVS considers for depthmap estimation + number_views_fuse = 2 # The minimum number of images that agrees with an estimate during fusion in order to consider it inlier densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini') - subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap. + subres_levels = 0 # The number of lower resolutions to process before estimating output resolution depthmap (0 = disabled) filter_point_th = -20 config = [ "--resolution-level %s" % int(resolution_level), '--dense-config-file "%s"' % densify_ini_file, + "--min-resolution %s" % depthmap_resolution, "--max-resolution %s" % int(outputs['undist_image_max_size']), "--max-threads %s" % args.max_concurrency, "--number-views-fuse %s" % number_views_fuse, "--sub-resolution-levels %s" % subres_levels, "--archive-type 3", - '-w "%s"' % depthmaps_dir, + '-w "%s"' % depthmaps_dir, "-v 0" ] @@ -85,22 +87,20 @@ def process(self, args, outputs): extra_config = [] - if args.pc_skip_geometric: - extra_config.append("--geometric-iters 0") - masks_dir = os.path.join(tree.opensfm, "undistorted", "masks") masks = os.path.exists(masks_dir) and len(os.listdir(masks_dir)) > 0 if masks: extra_config.append("--ignore-mask-label 0") + sharp = 7 if args.pc_geometric else (0 if args.pc_filter == 0 else args.pc_sharp) with open(densify_ini_file, 'w+') as f: - f.write("Optimize = 7\nMin Views Filter = 1\n") + f.write("Optimize = %s\nMin Views Filter = %s\n" % (sharp, number_views)) def run_densify(): - system.run('"%s" "%s" %s' % (context.omvs_densify_path, + system.run('"%s" "%s" %s' % (context.omvs_densify_path, openmvs_scene_file, ' '.join(config + gpu_config + extra_config))) - + try: run_densify() except system.SubprocessException as e: @@ -127,24 +127,24 @@ def run_densify(): subscene_densify_ini_file = os.path.join(tree.openmvs, 'subscene-config.ini') with open(subscene_densify_ini_file, 'w+') as f: - f.write("Optimize = 0\nEstimation Geometric Iters = 0\nMin Views Filter = 1\n") + f.write("Optimize = 0\nEstimation Geometric Iters = 0\nMin Views Filter = %s\n" % (number_views)) config = [ "--sub-scene-area 660000", # 8000 "--max-threads %s" % args.max_concurrency, - '-w "%s"' % depthmaps_dir, + '-w "%s"' % depthmaps_dir, "-v 0", ] - system.run('"%s" "%s" %s' % (context.omvs_densify_path, + system.run('"%s" "%s" %s' % (context.omvs_densify_path, openmvs_scene_file, ' '.join(config + gpu_config))) - + scene_files = glob.glob(os.path.join(tree.openmvs, "scene_[0-9][0-9][0-9][0-9].mvs")) if len(scene_files) == 0: raise system.ExitException("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.") log.ODM_INFO("Fusing depthmaps for %s scenes" % len(scene_files)) - + scene_ply_files = [] for sf in scene_files: @@ -160,6 +160,7 @@ def run_densify(): # Fuse config = [ '--resolution-level %s' % int(resolution_level), + "--min-resolution %s" % depthmap_resolution, '--max-resolution %s' % int(outputs['undist_image_max_size']), "--sub-resolution-levels %s" % subres_levels, '--dense-config-file "%s"' % subscene_densify_ini_file, diff --git a/stages/run_opensfm.py b/stages/run_opensfm.py index 850c6ce83..081f7c734 100644 --- a/stages/run_opensfm.py +++ b/stages/run_opensfm.py @@ -2,6 +2,8 @@ import os import shutil import glob +import cv2 +import numpy as np from opendm import log from opendm import io @@ -65,18 +67,18 @@ def cleanup_disk_space(): # being replaced below. It's an isolated use case. octx.export_stats(self.rerun()) - + self.update_progress(75) # We now switch to a geographic CRS if reconstruction.is_georeferenced() and (not io.file_exists(tree.opensfm_topocentric_reconstruction) or self.rerun()): - octx.run('export_geocoords --reconstruction --proj "%s" --offset-x %s --offset-y %s' % + octx.run('export_geocoords --reconstruction --proj "%s" --offset-x %s --offset-y %s' % (reconstruction.georef.proj4(), reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset)) shutil.move(tree.opensfm_reconstruction, tree.opensfm_topocentric_reconstruction) shutil.move(tree.opensfm_geocoords_reconstruction, tree.opensfm_reconstruction) else: log.ODM_WARNING("Will skip exporting %s" % tree.opensfm_geocoords_reconstruction) - + self.update_progress(80) updated_config_flag_file = octx.path('updated_config.txt') @@ -94,14 +96,24 @@ def cleanup_disk_space(): # Undistorted images will be used for texturing / MVS - alignment_info = None primary_band_name = None + irradiance_info = None + vignetting_info = None + alignment_info = None largest_photo = None undistort_pipeline = [] def undistort_callback(shot_id, image): - for func in undistort_pipeline: - image = func(shot_id, image) + # for func in undistort_pipeline: + # image = func(shot_id, image) + + if args.radiometric_calibration != "none": + image = radiometric_calibrate(shot_id, image) + if reconstruction.multi_camera and not args.skip_band_alignment: + image = align_to_primary_band(shot_id, image) + + # image = normalize_float_to_uint16(shot_id, image) + return image def resize_thermal_images(shot_id, image): @@ -113,11 +125,14 @@ def resize_thermal_images(shot_id, image): def radiometric_calibrate(shot_id, image): photo = reconstruction.get_photo(shot_id) + if photo.is_thermal(): return thermal.dn_to_temperature(photo, image, tree.dataset_raw) else: - return multispectral.dn_to_reflectance(photo, image, use_sun_sensor=args.radiometric_calibration=="camera+sun") - + band_irradiance_mean = irradiance_info.get(photo.band_name) + # log.ODM_INFO("Horizontal irradiance for %s: %s (mean: %s)" % (photo.filename, photo.get_horizontal_irradiance(), band_irradiance_mean)) + band_vignette_map = vignetting_info.get(photo.band_name) + return multispectral.dn_to_reflectance(photo, image, band_irradiance_mean, band_vignette_map, use_sun_sensor=args.radiometric_calibration=="camera+sun") def align_to_primary_band(shot_id, image): photo = reconstruction.get_photo(shot_id) @@ -130,24 +145,47 @@ def align_to_primary_band(shot_id, image): if photo.band_name == primary_band_name: return image - ainfo = alignment_info.get(photo.band_name) - if ainfo is not None: - return multispectral.align_image(image, ainfo['warp_matrix'], ainfo['dimension']) + ainfo_band = alignment_info.get(photo.band_name) + if ainfo_band is not None: + ainfo_shot = next((item for item in ainfo_band if item['filename'] == shot_id), None) # alignment_info is a dictionary but ainfo_band is a list + # log.ODM_INFO("Aligning %s to %s (warp matrix: %s)" % (ainfo_shot['filename'], ainfo_shot['align_filename'], ainfo_shot['warp_matrix'])) + if ainfo_shot is not None: + aligned_image = multispectral.align_image(image, ainfo_shot['warp_matrix'], ainfo_shot['dimension']) + return aligned_image + else: + log.ODM_WARNING("Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected." % (shot_id)) else: log.ODM_WARNING("Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected." % (shot_id)) return image + def normalize_float_to_uint16(shot_id, image): + photo = reconstruction.get_photo(shot_id) + if photo.camera_make == 'MicaSense': + if not photo.is_thermal(): + image[image<0] = 0 + image[image>2] = 2 + image *= 32768 + else: + image *= 100 + image += 27315 + + image[image<0] = 0 + image[image>65535] = 65535 + return image.astype(np.uint16) + if reconstruction.multi_camera: - largest_photo = find_largest_photo([p for p in photos]) + largest_photo = find_largest_photo(photos) + irradiance_info = multispectral.compute_band_irradiances(reconstruction.multi_camera) + vignetting_info = multispectral.compute_band_vignette_map(reconstruction.multi_camera) undistort_pipeline.append(resize_thermal_images) if args.radiometric_calibration != "none": undistort_pipeline.append(radiometric_calibrate) - + image_list_override = None if reconstruction.multi_camera: - + # Undistort only secondary bands primary_band_name = multispectral.get_primary_band_name(reconstruction.multi_camera, args.primary_band) image_list_override = [os.path.join(tree.dataset_raw, p.filename) for p in photos if p.band_name.lower() != primary_band_name.lower()] @@ -163,15 +201,21 @@ def align_to_primary_band(shot_id, image): if not io.file_exists(added_shots_file) or self.rerun(): s2p, p2s = multispectral.compute_band_maps(reconstruction.multi_camera, primary_band_name) - + if not args.skip_band_alignment: - alignment_info = multispectral.compute_alignment_matrices(reconstruction.multi_camera, primary_band_name, tree.dataset_raw, s2p, p2s, max_concurrency=args.max_concurrency) + alignment_info = multispectral.compute_alignment_matrices(reconstruction.multi_camera, primary_band_name, tree.dataset_raw, s2p, p2s, + max_concurrency=args.max_concurrency, + max_samples=args.band_alignment_samples, + irradiance_by_hand=irradiance_info, + use_sun_sensor=args.radiometric_calibration=="camera+sun", + rig_optimization=args.band_alignment_rig_optimization, + use_local_homography=args.band_alignment_local_homography) else: log.ODM_WARNING("Skipping band alignment") alignment_info = {} - + log.ODM_INFO("Adding shots to reconstruction") - + octx.backup_reconstruction() octx.add_shots_to_reconstruction(p2s) octx.touch(added_shots_file) @@ -185,7 +229,7 @@ def align_to_primary_band(shot_id, image): if reconstruction.multi_camera: octx.restore_reconstruction_backup() - # Undistort primary band and write undistorted + # Undistort primary band and write undistorted # reconstruction.json, tracks.csv octx.convert_and_undistort(self.rerun(), undistort_callback, runId='primary') @@ -194,7 +238,7 @@ def align_to_primary_band(shot_id, image): else: log.ODM_WARNING('Found a valid OpenSfM NVM reconstruction file in: %s' % tree.opensfm_reconstruction_nvm) - + if reconstruction.multi_camera: log.ODM_INFO("Multiple bands found") @@ -209,9 +253,9 @@ def align_to_primary_band(shot_id, image): primary_band_name = multispectral.get_primary_band_name(reconstruction.multi_camera, args.primary_band) if p2s is None: s2p, p2s = multispectral.compute_band_maps(reconstruction.multi_camera, primary_band_name) - + for fname in p2s: - + # Primary band maps to itself if band['name'] == primary_band_name: img_map[add_image_format_extension(fname, 'tif')] = add_image_format_extension(fname, 'tif') @@ -226,7 +270,7 @@ def align_to_primary_band(shot_id, image): nvm.replace_nvm_images(tree.opensfm_reconstruction_nvm, img_map, nvm_file) else: log.ODM_WARNING("Found existing NVM file %s" % nvm_file) - + # Skip dense reconstruction if necessary and export # sparse reconstruction instead if args.fast_orthophoto: @@ -256,4 +300,4 @@ def align_to_primary_band(shot_id, image): ] for f in files: if os.path.exists(f): - os.remove(f) \ No newline at end of file + os.remove(f)