diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..0086358db --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: true diff --git a/.github/ISSUE_TEMPLATE/project.yml b/.github/ISSUE_TEMPLATE/project.yml new file mode 100644 index 000000000..b1768a6ed --- /dev/null +++ b/.github/ISSUE_TEMPLATE/project.yml @@ -0,0 +1,122 @@ +name: Project +description: Issue template to streamline the creation of project pages. +title: "Project: " +labels: ["project", "event:PW45_2026_Boston"] +assignees: + - sjh26 + +body: + +- type: dropdown + id: draft-status + attributes: + label: Draft Status + description: Select whether the project is ready to be created once this issue is submitted. + options: + - Draft - team will hold off on page creation + - Ready - team will start page creating immediately + + validations: + required: true + +- type: markdown + attributes: + value: | + _Please enter a project name in the title bar above this text. Avoid special characters and parentheses. Format: "Project: Project Name"._ + +- type: dropdown + attributes: + label: Category + description: Select a category that best describes your project. This will help others to quickly understand the focus of your project. + options: + - DICOM + - VR/AR and Rendering + - IGT and Training + - Segmentation / Classification / Landmarking + - Quantification and Computation + - Cloud / Web + - Infrastructure + - Other + validations: + required: true + +- type: markdown + attributes: + value: | + _If you are unable to find a category that is suitable for your project, or believe that a specific category is missing, please discuss it with the organizers._ + +- type: textarea + attributes: + label: Key Investigators + description: | + *Please note the formatting on the Key Investigators list:* + `- Firstname Lastname (Affiliation, Country)` + placeholder: | + - FirstName1 LastName1 (Affiliation, Country) + - FirstName2 LastName2 (Affiliation, Country) + - FirstName3 LastName3 (Affiliation, Country) + validations: + required: true + +- type: textarea + attributes: + label: Project Description + description: Add a short paragraph describing the project. + validations: + required: false + +- type: textarea + attributes: + label: Objective + description: Describe here WHAT you would like to achieve (what you will have as end result). + placeholder: | + 1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. + 1. Objective B. ... + 1. Objective C. ... + value: | + 1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. + validations: + required: false + +- type: textarea + attributes: + label: Approach and Plan + description: Describe here HOW you would like to achieve the objectives stated above. + placeholder: | + 1. Describe specific steps of **what you plan to do** to achieve the above described objectives. + 1. ... + 1. ... + value: | + 1. Describe specific steps of **what you plan to do** to achieve the above described objectives. + validations: + required: false + +- type: textarea + attributes: + label: Progress and Next Steps + description: Update this section as you make progress, describing of what you have ACTUALLY DONE. If there are specific steps that you could not complete then you can describe them here, too. + placeholder: | + 1. Describe specific steps you **have actually done**. + 1. ... + 1. ... + value: | + 1. Describe specific steps you **have actually done**. + validations: + required: false + +- type: textarea + attributes: + label: Illustrations + description: Add pictures and links to videos that demonstrate what has been accomplished. + placeholder: | + ![Description of picture](Example2.jpg) + ![Some more images](Example2.jpg) + validations: + required: false + +- type: textarea + attributes: + label: Background and References + description: If you developed any software, include link to the source code repository. If possible, also add links to sample data, and to any relevant publications. + validations: + required: false diff --git a/.github/advanced-issue-labeler.yml b/.github/advanced-issue-labeler.yml new file mode 100644 index 000000000..4d2732576 --- /dev/null +++ b/.github/advanced-issue-labeler.yml @@ -0,0 +1,9 @@ +policy: + - section: + - id: [draft-status] + block-list: ['None', 'Other'] + label: + - name: 'draft' + keys: ['Draft - team will hold off on page creation'] + - name: 'ready' + keys: ['Ready - team will start page creating immediately'] diff --git a/.github/workflows/draft-to-ready.yml b/.github/workflows/draft-to-ready.yml new file mode 100644 index 000000000..ffa6544ad --- /dev/null +++ b/.github/workflows/draft-to-ready.yml @@ -0,0 +1,26 @@ +name: Apply ready label on draft label removal +on: + issues: + types: [ unlabeled ] + +permissions: + contents: read + +jobs: + apply-ready: + if: github.event.label.name == 'draft' + runs-on: ubuntu-latest + permissions: + issues: write + + steps: + - name: Add "ready" label + uses: actions/github-script@v6 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.payload.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["ready"] + }) diff --git a/.github/workflows/issue-labeler.yml b/.github/workflows/issue-labeler.yml new file mode 100644 index 000000000..91c81e6ab --- /dev/null +++ b/.github/workflows/issue-labeler.yml @@ -0,0 +1,36 @@ +name: Issue labeler +on: + issues: + types: [ opened ] + +permissions: + contents: read + +jobs: + label-component: + if: contains(github.event.issue.labels.*.name, 'project') + runs-on: ubuntu-latest + + permissions: + # required for all workflows + issues: write + + + steps: + - uses: actions/checkout@v3 + + - name: Parse issue form + uses: stefanbuck/github-issue-parser@v3 + id: issue-parser + with: + template-path: .github/ISSUE_TEMPLATE/project.yml + + - name: Set labels based on draft status field + uses: redhat-plumbers-in-action/advanced-issue-labeler@v2 + with: + issue-form: ${{ steps.issue-parser.outputs.jsonString }} + section: draft-status + block-list: | + None + Other + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/project-page-pull-request.yml b/.github/workflows/project-page-pull-request.yml new file mode 100644 index 000000000..dde85c0b4 --- /dev/null +++ b/.github/workflows/project-page-pull-request.yml @@ -0,0 +1,265 @@ +name: Project Page Pull Request Creation + +on: + issues: + types: [ labeled ] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build: + if: github.event.label.name == 'project:create' + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - name: Fetch the project issue info + id: project_issue + env: + GH_TOKEN: ${{ github.token }} + run: | + title=$(gh issue view ${{ github.event.issue.number }} --repo ${{ github.repository }} --json title --jq '.title') + echo "title [$title]" + + # If any, remove"Project:" prefix + title=$(echo "$title" | sed 's/^Project:\s*//') + echo "title [$title]" + + echo "title=$title" >> $GITHUB_OUTPUT + + - name: Find project issue comment + uses: peter-evans/find-comment@v2.4.0 + id: fc + with: + issue-number: ${{ github.event.issue.number }} + comment-author: 'github-actions[bot]' + body-includes: Project Page Pull Request Creation + + - name: Create or update comment ⌛ + id: couc + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.issue.number }} + body: | + ## Project Page Pull Request Creation + :hourglass: **IN PROGRESS**: ![Project Page Pull Request Creation](${{ github.server_url }}/${{ github.repository }}/actions/workflows/project-page-pull-request.yml/badge.svg) + edit-mode: replace + + - name: Extract event info + id: event_info + run: | + # TODO: Extract event name from "event:NAME" label + event_name="PW45_2026_Boston" + echo "name=$event_name" >> $GITHUB_OUTPUT + + - uses: actions/checkout@v3 + + + - name: Generate project directory name + id: project_directory + run: | + title=$(echo ${{ steps.project_issue.outputs.title }}) + echo "title [$title]" + + # Convert to title case + title_cased=$(echo "$title" | sed 's/.*/\L&/; s/[a-z]*/\u&/g') + echo "title_cased [$title_cased]" + + # Sanitize string to use as a directory name + directory_name=$(echo "$title_cased" | tr -cs '[:alnum:]' '_' | sed 's/_//g') + echo "directory_name [$directory_name]" + + echo "name=$directory_name" >> $GITHUB_OUTPUT + + - name: Check if project already exists + id: check_project_exists + run: | + event_name=${{ steps.event_info.outputs.name }} + project_directory=${{ steps.event_info.outputs.name }}/Projects/${{ steps.project_directory.outputs.name }} + if [ -d "$project_directory" ]; then + echo "::error::Project already exist" + exit 1 + fi + + - name: Create or update comment 🛑 + if: ${{ failure() && steps.check_project_exists.outcome == 'failure' }} + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ steps.couc.outputs.comment-id }} + issue-number: ${{ github.event.issue.number }} + body: | + ## Project Page Pull Request Creation + :stop_sign: **STOPPED**: Project already exists + edit-mode: replace + + - name: Create project directory + run: | + event_name=${{ steps.event_info.outputs.name }} + directory_name=${{ steps.project_directory.outputs.name }} + mkdir $event_name/Projects/$directory_name + + - name: Bulk issue body + id: bulk + env: + GH_TOKEN: ${{ github.token }} + run: | + body=$(gh issue view ${{ github.event.issue.number }} --repo ${{ github.repository }} --json body --jq '.body') + echo "$body" > ${{ runner.temp }}/bulk.txt + + - name: Display bulk.txt + run: | + cat ${{ runner.temp }}/bulk.txt + + - name: Split bulk + run: | + csplit ${{ runner.temp }}/bulk.txt \ + '/^### Project Description/' \ + '/^### Objective/' \ + '/^### Approach and Plan/' \ + '/^### Progress and Next Steps/' \ + '/^### Illustrations/' \ + '/^### Background and References/' \ + -f ${{ runner.temp }}/project_pull_request_part_ -b "%02d.md" + + - name: Cleanup files + run: | + cat ${{ runner.temp }}/project_pull_request_part_01.md | sed "1 d" > ${{ runner.temp }}/description.md + cat ${{ runner.temp }}/project_pull_request_part_02.md | sed "1 d" > ${{ runner.temp }}/objective.md + cat ${{ runner.temp }}/project_pull_request_part_03.md | sed "1 d" > ${{ runner.temp }}/approach.md + cat ${{ runner.temp }}/project_pull_request_part_04.md | sed "1 d" > ${{ runner.temp }}/progress.md + cat ${{ runner.temp }}/project_pull_request_part_05.md | sed "1 d" > ${{ runner.temp }}/illustrations.md + cat ${{ runner.temp }}/project_pull_request_part_06.md | sed "1 d" > ${{ runner.temp }}/background.md + + - name: Display clean files + run: | + echo "Description:" + cat ${{ runner.temp }}/description.md + echo "Objective:" + cat ${{ runner.temp }}/objective.md + echo "Approach:" + cat ${{ runner.temp }}/approach.md + echo "Progress:" + cat ${{ runner.temp }}/progress.md + echo "Illustrations:" + cat ${{ runner.temp }}/illustrations.md + echo "Background:" + cat ${{ runner.temp }}/background.md + + + - name: Issue Forms Body Parser + id: parse + uses: zentered/issue-forms-body-parser@v2.0.0 + + - name: Extract issue fields + id: extract + run: | + echo ${{ toJSON(steps.parse.outputs.data) }} | \ + jq --arg title "${{ steps.project_issue.outputs.title }}" \ + --rawfile description "${{ runner.temp }}/description.md" \ + --rawfile objective "${{ runner.temp }}/objective.md" \ + --rawfile illustrations "${{ runner.temp }}/illustrations.md" \ + --rawfile approach "${{ runner.temp }}/approach.md" \ + --rawfile progress "${{ runner.temp }}/progress.md" \ + --rawfile background "${{ runner.temp }}/background.md" '{ + "title": $title, + "category": .category.text, + "description": $description, + "objective": $objective, + "approach": $approach, + "progress": $progress, + "illustrations": $illustrations, + "background": $background, + "investigators": [.["key-investigators"].list[].text | + capture("(?[^\\s]+(?:\\s[^\\s]+)*?)\\s*\\((?[^,]*)(?:,\\s*(?[^)]*))?\\)") | + {name, affiliation: (.affiliation // ""), country: (.country // "")}], + }' > ${{ runner.temp }}/template-data.json + + - name: Display template-data.json + run: | + cat ${{ runner.temp }}/template-data.json + + - uses: actions/setup-python@v4.6.0 + with: + python-version: "3.x" + + - name: Install jinja2 + run: | + python -m pip install jinja2 + + # Provide the cli interface + python -m pip install jinja2_cli + + # Install the package providing the "to_yaml" and "regex_replace" filters. + python -m pip install jinja2-ansible-filters + + - name: Generate Project README.md + run: | + jinja2 \ + ${{ steps.event_info.outputs.name }}/Projects/Template/README.md.j2 \ + ${{ runner.temp }}/template-data.json \ + --format json \ + -e "jinja2_ansible_filters.AnsibleCoreFiltersExtension" \ + -o ${{ steps.event_info.outputs.name }}/Projects/${{ steps.project_directory.outputs.name }}/README.md + + - uses: tibdex/github-app-token@v1 + id: generate-token + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + + - name: Create Pull Request + id: cpr + uses: peter-evans/create-pull-request@v5 + with: + token: ${{ steps.generate-token.outputs.token }} + commit-message: | + ${{ steps.event_info.outputs.name }}: Add project ${{ steps.project_directory.outputs.name }} + committer: GitHub + author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> + branch: ${{ steps.event_info.outputs.name }}/${{ steps.project_directory.outputs.name }} + delete-branch: true + title: Add project "${{ steps.project_issue.outputs.title }}" to "${{ steps.event_info.outputs.name }}" + body: | + Fixes #${{ github.event.issue.number }} + labels: | + project + event:${{ steps.event_info.outputs.name }} + + - name: Create or update comment ❌ + if: ${{ failure() && steps.check_project_exists.outcome == 'success' }} + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ steps.couc.outputs.comment-id }} + issue-number: ${{ github.event.issue.number }} + body: | + ## Project Page Pull Request Creation + :x: **FAILED**: See ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + edit-mode: replace + + - name: Create or update comment ✅ + if: ${{ success() }} + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ steps.couc.outputs.comment-id }} + issue-number: ${{ github.event.issue.number }} + body: | + ## Project Page Pull Request Creation + :white_check_mark: **COMPLETED**: See ${{ steps.cpr.outputs.pull-request-url }} + edit-mode: replace + + - name: Remove "project:create" label + if: ${{ always() }} + uses: actions/github-script@v6 + with: + script: | + github.rest.issues.removeLabel({ + issue_number: context.payload.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + name: ["project:create"] + }) diff --git a/.gitignore b/.gitignore index 8225a6ac0..051e5db44 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,8 @@ _site/ Gemfile.lock -*~ \ No newline at end of file +*~ + +.DS_Store + +.idea/ +.vscode/ diff --git a/Gemfile b/Gemfile index 37f5eaa42..5acc62226 100644 --- a/Gemfile +++ b/Gemfile @@ -1,2 +1,4 @@ source 'https://rubygems.org' +ruby ">=3.3" gem 'github-pages', group: :jekyll_plugins +gem 'jemoji' diff --git a/MAINTAINERS.md b/MAINTAINERS.md new file mode 100644 index 000000000..ebf665cbf --- /dev/null +++ b/MAINTAINERS.md @@ -0,0 +1,12 @@ +# Creating the main page for a new project week + +Use changes in these commits as guides: +- https://github.com/NA-MIC/ProjectWeek/commit/9fb0d047ad1ca9a7692edcfe7ac9903b646ec15c +- https://github.com/NA-MIC/ProjectWeek/commit/f9163343e86c4cb8ea51801c27244f2045568a1a + +# Updating the project page creation process + +- create a label for the project week, see https://github.com/NA-MIC/ProjectWeek/labels +- Make changes as in these commits: + - https://github.com/NA-MIC/ProjectWeek/commit/8b53715c34665c8b05f788f39abaf562c3dfba95 + - https://github.com/NA-MIC/ProjectWeek/commit/5c660237c6e3779d8a6190453dcb41d5fd3d8f89 diff --git a/PW26_2017_London_Canada/README.md b/PW26_2017_London_Canada/README.md index 7ec3c6320..d625d5b79 100644 --- a/PW26_2017_London_Canada/README.md +++ b/PW26_2017_London_Canada/README.md @@ -1,13 +1,13 @@ ![PW26](PW26.png) ## Welcome to the web page for the 26th Project Week! -It is a pleasure to announce that the Western Slicer Project Week will be held in London, Ontario, Canada on July 17-21, 2017. This is the first extension Slicer project week that grew organically from a proposed visit of Dr. Nobuhiko Hata to Western. Enough interest existed to propose a small Slicer hackfest, which quickly grew into a full blown project week. With the expansion of the hackfest into a full project week, an additional meeting was arranged in conjunction with SPWW to discuss and develop image-guided therapy specific goals. Thus, this is also the inaugural Slicer IGT Project Week. +It is a pleasure to announce that the Western Slicer Project Week will be held in London, Ontario, Canada on July 17-21, 2017. This is the first extension Slicer project week that grew organically from a proposed visit of Dr. Nobuhiko Hata to Western. Enough interest existed to propose a small Slicer hackfest, which quickly grew into a full blown project week. With the expansion of the hackfest into a full project week, an additional meeting was arranged in conjunction with SPWW to discuss and develop image-guided therapy specific goals. Thus, this is also the inaugural Slicer IGT Project Week. This project week is an event endorsed by the MICCAI Society. ![GroupShot](GroupShot.jpg) ## Local Organizing Committee - + - Host: [Terry Peters](http://www.robarts.ca/terry-peters), Robarts Scientist; Professor Medical Imaging; Medical Biophysics; Biomedical Engineering - Email Local Organizing Committee: @@ -15,10 +15,10 @@ This project week is an event endorsed by the MICCAI Society. ## Resources -This is a collection of resources submitted by attendees +This is a collection of resources submitted by attendees - Welcome presentation by Andras Lasso - Historical overview by Nobuhiko Hata -- Python resources (thanks John Drozd!) +- Python resources (thanks John Drozd!) - http://cscircles.cemc.uwaterloo.ca/5-input/ - https://www.jetbrains.com/pycharm/ - http://www.datacarpentry.org/python-ecology-lesson/ @@ -77,16 +77,16 @@ Automated Segmentation of the human skull, face and airways from MRI dicom image ### Breakout sessions -+ 3D Slicer introduction ++ 3D Slicer introduction + 3D Slicer features overview (Andras Lasso) -+ Segmentation ++ Segmentation + Segment editor tutorials (video tutorial and slides) + Segment editor reference manual -+ Slicelets ++ Slicelets + Customizing 3D Slicer: slicelets/guidelets (Andras Lasso) + Slicer programming tutorial + VASST Lab Slicelet Template (Thanks Ying Li!) -+ SlicerIGT ++ SlicerIGT + PerkLab videos ## Registrants @@ -206,5 +206,3 @@ Automated Segmentation of the human skull, face and airways from MRI dicom image 57 John Baxter (Robarts Research Institute) 58 Jean-Christophe Fillion-Robin (Kitware, Inc.) - - diff --git a/PW27_2018_Boston/BreakoutSessions/AR-VR.md b/PW27_2018_Boston/BreakoutSessions/AR-VR.md index 66b00c0c3..20795a822 100644 --- a/PW27_2018_Boston/BreakoutSessions/AR-VR.md +++ b/PW27_2018_Boston/BreakoutSessions/AR-VR.md @@ -32,7 +32,7 @@ Back to [Breakout Sessions List](../README.md#BreakoutSessions) - By simulating mouse interactions all the effects could be used - New effects - Surface deformation: Grow ROI on segment surface then push or pull with modification function (VR possibly makes it actually usable by allowing quick evaluation of the result while changing the input, and seeing all in real 3D with depth) - + - **Discussion** - Sam Jang recommendations: Keep focus on the model, not fly around; rotate or walk around; keep actions very consistent. - If elbow and wrist rests on some gel pad then fine manipulation is feasible @@ -51,7 +51,7 @@ Back to [Breakout Sessions List](../README.md#BreakoutSessions) - From @curtislisle: During the VR interaction session at the recent Project Week, we discussed interaction modes. I was reminded of early research by some former colleagues at Univ. of Central Florida. Here is a URL and Bibliography to some of the work that might be of interest: https://www.mitpressjournals.org/doi/abs/10.1162/pres.1995.4.4.403 -This is the work I described about selecting a nearby object to establish a coordinate system transformation and move the world with respect to the eyepoint instead of flying the eyepoint towards the target object. The point was made about simulation sickness when moving the eyepoint, but our lab's work indicated this paradigm of direct manipulation of the virtual environment was effective. +This is the work I described about selecting a nearby object to establish a coordinate system transformation and move the world with respect to the eyepoint instead of flying the eyepoint towards the target object. The point was made about simulation sickness when moving the eyepoint, but our lab's work indicated this paradigm of direct manipulation of the virtual environment was effective. Some literature search of the PRESENCE journal might help us take advantage of some of the earlier work. It is nice to see that rendering and VR technology is finally more widely accessible. @@ -68,7 +68,7 @@ Some literature search of the PRESENCE journal might help us take advantage of s - Orientation marker ### Proposed changes - + - Excluding VR view from main layout management: Node reference parentLayoutNodeID in view nodes - None by default, meaning main layout. Set to node (e.g. itself) to indicate it's standalone and should not be managed - Abstract layout node class diff --git a/PW27_2018_Boston/BreakoutSessions/FDA-and-3D-Slicer.md b/PW27_2018_Boston/BreakoutSessions/FDA-and-3D-Slicer.md index 16bf44033..4b48e28d5 100644 --- a/PW27_2018_Boston/BreakoutSessions/FDA-and-3D-Slicer.md +++ b/PW27_2018_Boston/BreakoutSessions/FDA-and-3D-Slicer.md @@ -23,7 +23,7 @@ The Slicer license allows commerial use: > The license does not impose restrictions on the use of the software. > 3D Slicer is NOT FDA approved. It is the users responsibility to ensure compliance with applicable rules and regulations. -Open source software can be included in the package being registered, it's helpful to isolate modules/classes that are used. Testing has to incorporate verifying the functionality of that software. +Open source software can be included in the package being registered, it's helpful to isolate modules/classes that are used. Testing has to incorporate verifying the functionality of that software. ### 510(k) @@ -46,7 +46,7 @@ In the Software section of the 510(k) application you have to provide: * Requirements Specification: developed from Clinical User Needs and Design Input * Software Design Document * Includes module level descriptions for the software - * Includes third party libraries: + * Includes third party libraries: | Name | Description | How Used in the Software | | ------- | ----------- | ------------------------ | diff --git a/PW27_2018_Boston/PreparatoryMeetingsNotes.md b/PW27_2018_Boston/PreparatoryMeetingsNotes.md index b96db92ac..705b0e205 100644 --- a/PW27_2018_Boston/PreparatoryMeetingsNotes.md +++ b/PW27_2018_Boston/PreparatoryMeetingsNotes.md @@ -47,7 +47,7 @@ These are notes from PW#27 Preparation Hangouts held weekly on Tuesdays at 10am * Main topic: Gran Canaria Team projects * Administrivia: should we use this to help people copy the template page to make their own project page? https://www.mediawiki.org/wiki/Extension:Duplicator -* Discourse sub community? +* Discourse sub community? * Subscribe, see the welcome page below. google, github, Facebook are all supported authentication options so people can one-click use their existing accounts if they want: * https://discourse.slicer.org/t/welcome-to-the-3d-slicer-forum/8 * After logging in, visit: https://discourse.slicer.org/c/community/project-week, and click the “Watching” button to enable notifications: diff --git a/PW27_2018_Boston/Projects/3DPrintedProstateCancerModels/README.md b/PW27_2018_Boston/Projects/3DPrintedProstateCancerModels/README.md index 4327a172f..1fd2a3d08 100644 --- a/PW27_2018_Boston/Projects/3DPrintedProstateCancerModels/README.md +++ b/PW27_2018_Boston/Projects/3DPrintedProstateCancerModels/README.md @@ -9,7 +9,7 @@ Key Investigators - William Huang (NYU School of Medicine) - Andrey Fedorov (BWH, HMS) - Danielle Pace (MIT) (has put together a pipeline for preparing 3d printed models before, happy to share experience) -- Anneke Meyer (University of Magdeburg, Germany) +- Anneke Meyer (University of Magdeburg, Germany) # Project Description @@ -34,7 +34,7 @@ urethra,and rectal wall. ## Progress and Next Steps -1. Learned and tested 3D Slicer segmentation tools. Compared surface cut tool to manual segmentation and grow from seeds. +1. Learned and tested 3D Slicer segmentation tools. Compared surface cut tool to manual segmentation and grow from seeds. 2. Continue to work on workflow for segmentation and printing- discuss with Danielle Pace. @@ -47,5 +47,3 @@ urethra,and rectal wall. * Wake N, Chandarana H, Huang WC, Taneja SS, Rosenkrantz AB. Application of anatomically accurate, patient-specific 3D printed models from MRI data in urological oncology. Clin Radiol. 2016;71(6):610-4. http://dx.doi.org/10.1016/j.crad.2016.02.012. http://www.clinicalradiologyonline.net/article/S0009-9260(16)00087-8/fulltext * Wake N, Rude T, Kang SK, et al. 3D printed renal cancer models derived from MRI data: application in pre-surgical planning. Abdom Radiol (NY). 2017;42(5):1501-9. http://dx.doi.org/10.1007/s00261-016-1022-2. https://link.springer.com/article/10.1007/s00261-016-1022-2 - - diff --git a/PW27_2018_Boston/Projects/AffordableIGTSimulatorsWithSlicerIGTAndPLUS/README.md b/PW27_2018_Boston/Projects/AffordableIGTSimulatorsWithSlicerIGTAndPLUS/README.md index 6f230018e..d406526f8 100644 --- a/PW27_2018_Boston/Projects/AffordableIGTSimulatorsWithSlicerIGTAndPLUS/README.md +++ b/PW27_2018_Boston/Projects/AffordableIGTSimulatorsWithSlicerIGTAndPLUS/README.md @@ -29,7 +29,7 @@ In this project we aim at creating and integrating an Image Guided Therapy (IGT) ## Approach and Plan 1. Define a proper ArUco optical tracker system (web camera and markers selection and distribution). -2. Program the integration of the tracked probe in the ultrasound simulator device included in PLUS Toolkit. +2. Program the integration of the tracked probe in the ultrasound simulator device included in PLUS Toolkit. 3. Implement strategies for track accuracy improvement. 4. Replace the default model simulated in the PLUS device with a custom arm model. 5. Test and verify the overall system. @@ -38,12 +38,12 @@ In this project we aim at creating and integrating an Image Guided Therapy (IGT) -1. The system setup (camera and trackers, probe, needle, phantom block, etc.) was tested and successfully completed. +1. The system setup (camera and trackers, probe, needle, phantom block, etc.) was tested and successfully completed. 2. We confirm that the camera auto-focus is not suitable for tracking. Webcam models without this facility should be considered in the future. 3. The pivot and spin calibrations for needle-like tools and the fiducial calibration of bodies yield an accuracy good enough for training applications and no strategies for track accuracy improvement was needed. Without actual ultrasound images, manual calibration of probe was required. 4. Instead of the arm model, we create a very simple one based on basic geometrical shapes. However, the proof of concept for customizing any particular case is done. 5. Unfortunately, the calibration of the US simulator failed. The prescribed spatial model seems to mismatch with the actual and virtual scenarios. Therefore the US image does not correspond to the expected one. This should be checked and fixed in the next few days. - + # Illustrations diff --git a/PW27_2018_Boston/Projects/CHRIS-slicer/README.md b/PW27_2018_Boston/Projects/CHRIS-slicer/README.md index 5887ee078..4b0659aff 100644 --- a/PW27_2018_Boston/Projects/CHRIS-slicer/README.md +++ b/PW27_2018_Boston/Projects/CHRIS-slicer/README.md @@ -38,7 +38,7 @@ Back to [Projects List](../../README.md#ProjectsList) pfurl --verb POST --raw --http ${HOST_IP}:5005/api/v1/cmd \ --httpResponseBodyParse --jsonwrapper 'payload' \ --msg \ -'{ +'{ "action": "coordinate", "threadAction": true, "meta-store": { diff --git a/PW27_2018_Boston/Projects/CIPDeepLearningLungSegmentation/README.md b/PW27_2018_Boston/Projects/CIPDeepLearningLungSegmentation/README.md index 701a7de68..c6839471f 100644 --- a/PW27_2018_Boston/Projects/CIPDeepLearningLungSegmentation/README.md +++ b/PW27_2018_Boston/Projects/CIPDeepLearningLungSegmentation/README.md @@ -8,12 +8,12 @@ Back to [Projects List](../../README.md#ProjectsList) - Raúl San José (BWH) # Project Description -Integrate a lung segmentation algorithm based on Deep Learning (Keras+Tensorflow) into the Chest Imaging Platform. +Integrate a lung segmentation algorithm based on Deep Learning (Keras+Tensorflow) into the Chest Imaging Platform. The goal is to make available in Slicer this and other similar tools based on Deep Learning. ## Objective -1. Integrate a Lung Segmentation algorithm based on Deep Learning in the Chest Imaging Platform. +1. Integrate a Lung Segmentation algorithm based on Deep Learning in the Chest Imaging Platform. 1. Make available these and other similar tools in Slicer ## Approach and Plan diff --git a/PW27_2018_Boston/Projects/CompressedVideoSaving/README.md b/PW27_2018_Boston/Projects/CompressedVideoSaving/README.md index 5efa646be..dd32a93bc 100644 --- a/PW27_2018_Boston/Projects/CompressedVideoSaving/README.md +++ b/PW27_2018_Boston/Projects/CompressedVideoSaving/README.md @@ -63,4 +63,3 @@ Next steps - [Ipad for image-guided neurosurgery](http://digital-library.theiet.org/content/journals/10.1049/htl.2017.0062?crawler=true&mimetype=application/pdf&tags=noindex) - diff --git a/PW27_2018_Boston/Projects/DICOMforQuantitativeImaging/README.md b/PW27_2018_Boston/Projects/DICOMforQuantitativeImaging/README.md index b87058b32..60dc50df6 100644 --- a/PW27_2018_Boston/Projects/DICOMforQuantitativeImaging/README.md +++ b/PW27_2018_Boston/Projects/DICOMforQuantitativeImaging/README.md @@ -22,7 +22,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Approach and Plan -Issues to fix: +Issues to fix: * https://github.com/QIICR/QuantitativeReporting/issues/201 * https://github.com/QIICR/QuantitativeReporting/issues/209 * https://github.com/QIICR/QuantitativeReporting/issues/210 diff --git a/PW27_2018_Boston/Projects/EM_trackers_magnetics_and_algorithms/README.md b/PW27_2018_Boston/Projects/EM_trackers_magnetics_and_algorithms/README.md index 4b97fe680..61d897903 100644 --- a/PW27_2018_Boston/Projects/EM_trackers_magnetics_and_algorithms/README.md +++ b/PW27_2018_Boston/Projects/EM_trackers_magnetics_and_algorithms/README.md @@ -71,4 +71,3 @@ No new ones; see Background and References for old ones. - [traneus's breadboard 6DOF EM tracker](https://web.archive.org/web/20151002101401/http://home.comcast.net/~traneus/dry_emtrackertricoil.htm) - [traneus's PhD dissertation on an EM tracker](https://web.archive.org/web/20151002101400/http://home.comcast.net/~traneus/thesis.pdf) - [four figures in traneus's dissertation](https://web.archive.org/web/20151002101400/http://home.comcast.net/~traneus/thesifig.pdf) - diff --git a/PW27_2018_Boston/Projects/ESLD_DSS/README.md b/PW27_2018_Boston/Projects/ESLD_DSS/README.md index 9795a0447..a75ccc406 100644 --- a/PW27_2018_Boston/Projects/ESLD_DSS/README.md +++ b/PW27_2018_Boston/Projects/ESLD_DSS/README.md @@ -31,7 +31,7 @@ We are using Partner's image database for a corpus of imaging data (liver diseas 1. Understand standard/established clinical scores and effects/representation in medical imaging. 1. Familiarize with the data from this study. -1. Review and discuss current literature/ feature extraction approaches. +1. Review and discuss current literature/ feature extraction approaches. 1. This is kind of a Project Kick-Off: Create a work plan how to approach this problem also beyond the scope of the project week. ## Approach and Plan @@ -41,14 +41,14 @@ We are using Partner's image database for a corpus of imaging data (liver diseas ## Progress -1. We had a first team meeting to bring together computer scientists and clinicians. +1. We had a first team meeting to bring together computer scientists and clinicians. 1. Dr. Wall reviewed her progress in selecting a small set of optimal diseased and control patients. This process has been challenging because many people with liver disease have had surgery or tumor ablation that changes the liver morphology. It is also not possible to select only patients on 3T scanner before BWH began using EPIC (2015). 1. Alireza Ziaei, Raul San Jose, and Randy Gollub are assisting with RPDR querying and image retrieval. 1. Jennifer worked on CITI training for IRB clearance to access the data. And talked with experts using PyRadiomics on MRI Data and their approaches on evaluating features (Michael Schwier and Joost van Griethuysen). ## Next Steps -1. Lock down the image queying and retrieval pipeline. +1. Lock down the image queying and retrieval pipeline. 1. Get deidentified data to University of Bremen team. 1. Think hard about segmentation, machine learning, and analysis techniques for the data. diff --git a/PW27_2018_Boston/Projects/ExtensionsWithCUDA/README.md b/PW27_2018_Boston/Projects/ExtensionsWithCUDA/README.md index f0af83304..fb3f02f8c 100644 --- a/PW27_2018_Boston/Projects/ExtensionsWithCUDA/README.md +++ b/PW27_2018_Boston/Projects/ExtensionsWithCUDA/README.md @@ -21,7 +21,7 @@ Provide an easy path for distributing extensions that use CUDA. 1. Create sample CUDA extension 1. Create simple CUDA extension "Slicer CUDA Probe" - 1. Perform manual build, upload, and test + 1. Perform manual build, upload, and test 1. https://www.slicer.org/wiki/Documentation/Nightly/Developers/Tutorials/BuildTestPackageDistributeExtensions 1. https://www.slicer.org/wiki/Documentation/Nightly/Developers/Build_ExtensionsIndex 1. Set up VM that matches factory, but with CUDA installed diff --git a/PW27_2018_Boston/Projects/GirderWebCloud/README.md b/PW27_2018_Boston/Projects/GirderWebCloud/README.md index 638cd4f7c..3403eab1e 100644 --- a/PW27_2018_Boston/Projects/GirderWebCloud/README.md +++ b/PW27_2018_Boston/Projects/GirderWebCloud/README.md @@ -21,7 +21,7 @@ My expertise is in Girder and scalable cloud based processing. I will give a ~15 Commercial cloud services are good for experimentation without long term commitment, and are useful when you need to have dynamic and elastic scaling. The providers are constantly rolling out new services, and there is a large amount of expertise encoded into these services (e.g. compare the cost of using AWS Elastic Load Balancer versus the time to gain the expertise of knowing how to run a load balancer), but the accounting model may have a mismatch with grant funded research (e.g. it may be easier to pay for an hour of someone's time to build a service versus paying for an hour of a cloud based service, even though the cloud based service is much cheaper in this comparison). -To realize the full power of the cloud, a different mindset is in order compared to purchased hardware and software. Think about using extremely powerful and expensive cloud resources for a very short period of time, or using many more resources in the short term than you would otherwise. +To realize the full power of the cloud, a different mindset is in order compared to purchased hardware and software. Think about using extremely powerful and expensive cloud resources for a very short period of time, or using many more resources in the short term than you would otherwise. * Use a new and expensive GPU instance, but only for an hour. * Use a powerful instance for a day to do all of your memory intensive processing. @@ -39,7 +39,7 @@ To realize the full power of the cloud, a different mindset is in order compared * There are specialized providers in addition to the large players * Great bandwidth and unlimited data storage are both just an API call away * Because you don't own anything, there is no cost to trying out new HW when it becomes available, and you haven't paid for HW that is now obsolete - + ### Disadvantages * Can be expensive, especially for a predictable, stable usage of resources @@ -124,5 +124,3 @@ Task Job record after execution. - [Girder Worker source code](https://github.com/girder/girder_worker) - [Girder Ansible Galaxy role](https://galaxy.ansible.com/girder/girder/) - [Girder Worker Ansible Galaxy role](https://galaxy.ansible.com/girder/girder-worker/) - - diff --git a/PW27_2018_Boston/Projects/HarmonusIGT/README.md b/PW27_2018_Boston/Projects/HarmonusIGT/README.md index 8b557ccd0..e77085c0c 100644 --- a/PW27_2018_Boston/Projects/HarmonusIGT/README.md +++ b/PW27_2018_Boston/Projects/HarmonusIGT/README.md @@ -60,4 +60,3 @@ Harmonus has developed Slicer extensions for MR guided prostate biopsy, supporti Edit this page on GitHub - diff --git a/PW27_2018_Boston/Projects/IntegrationOfMedicalImagingSimulatorsInSlicer/README.md b/PW27_2018_Boston/Projects/IntegrationOfMedicalImagingSimulatorsInSlicer/README.md index cc028c12a..67962615e 100644 --- a/PW27_2018_Boston/Projects/IntegrationOfMedicalImagingSimulatorsInSlicer/README.md +++ b/PW27_2018_Boston/Projects/IntegrationOfMedicalImagingSimulatorsInSlicer/README.md @@ -31,7 +31,7 @@ Since the project is aimed to obtain simulated objective data that allow testing ## Progress and Next Steps -1. We have followed the approach plan in order to reached some objectives as the plan and prototype +1. We have followed the approach plan in order to reached some objectives as the plan and prototype 1. We have gather valuable information to develop some of the modules and we have a simple loadable module that can extract the information that we need. diff --git a/PW27_2018_Boston/Projects/InteractiveCHDSegmentation/README.md b/PW27_2018_Boston/Projects/InteractiveCHDSegmentation/README.md index 1454d04d3..a8e423e48 100644 --- a/PW27_2018_Boston/Projects/InteractiveCHDSegmentation/README.md +++ b/PW27_2018_Boston/Projects/InteractiveCHDSegmentation/README.md @@ -13,7 +13,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Objective -1. Aim: segment all cardiac chambers and great vessels from cardiac MRI, for children with congenital heart disease. +1. Aim: segment all cardiac chambers and great vessels from cardiac MRI, for children with congenital heart disease. 2. 20 training cases + large anatomical variability - remains a challenge for automatic segmentation. 3. Approach: Integrate some interaction from the user, e.g. scribbles or landmarks. @@ -37,4 +37,3 @@ Back to [Projects List](../../README.md#ProjectsList) - HVSMR Challenge Data: (http://segchd.csail.mit.edu) - diff --git a/PW27_2018_Boston/Projects/MedicalInfraredImagingwithSlicer/README.md b/PW27_2018_Boston/Projects/MedicalInfraredImagingwithSlicer/README.md index 8a9be2edd..32629c51f 100644 --- a/PW27_2018_Boston/Projects/MedicalInfraredImagingwithSlicer/README.md +++ b/PW27_2018_Boston/Projects/MedicalInfraredImagingwithSlicer/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../FIXME.md#ProjectsList) -# Medical Infrared Imaging with Slicer +# Medical Infrared Imaging with Slicer ## Key Investigators - Jorge Quintero-Nehrkorn (IACTEC - MACbioIDi) @@ -22,7 +22,7 @@ This project is a research collaboration between the public research institute I ## Approach and Plan -1. Create a new Slicer module for processing thermal infrared images. +1. Create a new Slicer module for processing thermal infrared images. 2. Review segmentation, registration and other image processing techniques for foot ulcer detection with infrared images. 3. Testing. 4. Assessment of live video streaming using ffmpeg. @@ -33,12 +33,12 @@ This project is a research collaboration between the public research institute I making progress.--> 1. We have finished the integration of the Thermal Seek Pro camera in Plus Toolkit. -2. We have continued the development of the thermal infrared images module. +2. We have continued the development of the thermal infrared images module. 3. The next objetives are : - To finish the images registration. - To integrate new infrared cameras, like Thermal Expert Q1 camera. - - To add some different segmentation methods in order to perform a comparison. + - To add some different segmentation methods in order to perform a comparison. # Illustrations diff --git a/PW27_2018_Boston/Projects/ModelFittingTools/README.md b/PW27_2018_Boston/Projects/ModelFittingTools/README.md index 766fa05e4..e180144f9 100644 --- a/PW27_2018_Boston/Projects/ModelFittingTools/README.md +++ b/PW27_2018_Boston/Projects/ModelFittingTools/README.md @@ -10,7 +10,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Andrew Beers (MGH/HST) - Hans Meine (MEVIS) -# Project Description +# Project Description ## Objective @@ -56,7 +56,7 @@ Back to [Projects List](../../README.md#ProjectsList) -- Projects considered to be (possibly) part of the refactored extension : +- Projects considered to be (possibly) part of the refactored extension : - https://github.com/millerjv/PkModeling - https://github.com/QIICR/T1Mapping - https://github.com/SlicerProstate/SlicerProstate/tree/master/DWModeling diff --git a/PW27_2018_Boston/Projects/NorMIT-Plan/README.md b/PW27_2018_Boston/Projects/NorMIT-Plan/README.md index a5a5b325a..bd209956b 100644 --- a/PW27_2018_Boston/Projects/NorMIT-Plan/README.md +++ b/PW27_2018_Boston/Projects/NorMIT-Plan/README.md @@ -9,7 +9,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Louise Oram (The Intervention Centre, Oslo University Hospital; Oslo and Akershus University College) # Project Description - + NorMIT-Plan is part of the **Nor**wegian centre for **M**inimally **I**nvasive Guided **T**hrerapy (NorMIT). The centre offers resources for medical technology research in minimally invasive therapies. NorMIT-Plan is a software package developed as a 3D Slicer extension which will provide tools for segmentation, 3D modeling and surgical plannif for liver resection procedures. ## Objective diff --git a/PW27_2018_Boston/Projects/OpenAnatomyBrowser/README.md b/PW27_2018_Boston/Projects/OpenAnatomyBrowser/README.md index b765b07fa..d79a3fefb 100644 --- a/PW27_2018_Boston/Projects/OpenAnatomyBrowser/README.md +++ b/PW27_2018_Boston/Projects/OpenAnatomyBrowser/README.md @@ -22,9 +22,9 @@ The main objective of this project is to develop the open anatomy browser as a t ## Approach and Plan 1. Refine a HAWG parser -1. Study the framework and modules of oabrowser +1. Study the framework and modules of oabrowser 1. Review other examples -1. Investigate additional features to validate the prototypes +1. Investigate additional features to validate the prototypes ## Progress and Next Steps diff --git a/PW27_2018_Boston/Projects/OrganmotionCompensationInMR/README.md b/PW27_2018_Boston/Projects/OrganmotionCompensationInMR/README.md index f0602a670..aa8e77a97 100644 --- a/PW27_2018_Boston/Projects/OrganmotionCompensationInMR/README.md +++ b/PW27_2018_Boston/Projects/OrganmotionCompensationInMR/README.md @@ -9,7 +9,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Joost van Griethuysen (BWH) # Project Description -Creating a program to generate 4D MRI sequences applying the retrospectiv stacking method on 2D MR slices. +Creating a program to generate 4D MRI sequences applying the retrospectiv stacking method on 2D MR slices. The available data is comprised of an time resolved alternating sequence of navigator and data slices and a pure sequence of time resolved navigator slices. All navigator slices being acquired at the exact same location and the data slices "scanning" the complete liver in a cyclic manner. To generate a 4D MR sequence from that the program has to collect all data frames that were acquired during the same breating phase, i.e. not at the same time but at different times during the same breathing phase. To find these, the navigator slices are utilized. Finding correspondences between the navigator slices of the pure navigator sequence and the once of the alternating sequence means to find similar or same breathing phases. Thus we find all corresponding data slices giving the 3D liver at the specific breathing phase using the correspondence of its encompassing navigator slices. ## Objective @@ -18,13 +18,13 @@ The available data is comprised of an time resolved alternating sequence of navi ## Approach and Plan -1. Using reference implementation +1. Using reference implementation ## Progress and Next Steps -- got insight in available and relevant DICOM tags (big thanks to Joost for the DICOM Explorer) +- got insight in available and relevant DICOM tags (big thanks to Joost for the DICOM Explorer) - hit a roadblock when data appeared to be faulty - wrote a python script sorting the data by acquisition time, turns out data is faulty after all (thanks to Joost again) diff --git a/PW27_2018_Boston/Projects/PlacentaFlattening/README.md b/PW27_2018_Boston/Projects/PlacentaFlattening/README.md index 856470635..6b324a055 100644 --- a/PW27_2018_Boston/Projects/PlacentaFlattening/README.md +++ b/PW27_2018_Boston/Projects/PlacentaFlattening/README.md @@ -11,7 +11,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Polina Golland (MIT) # Project Description -The project seeks to flattened images of the placenta for visualization. Segemented placenta are mapped to a canonical template such as an ellipsoid for visualization of anatomy and function. +The project seeks to flattened images of the placenta for visualization. Segemented placenta are mapped to a canonical template such as an ellipsoid for visualization of anatomy and function. We aim to build a module in Slicer to establish correspondence between the original volume and the flattened one. ## Objective @@ -27,7 +27,7 @@ We aim to build a module in Slicer to establish correspondence between the origi - Identified bugs in the transformation, it seems there is an inherent rotation that Slicer is unaware of so the field does not apply correctly. Next Steps: -- Rotation on simple test cases +- Rotation on simple test cases - Rotation on transformed volumes - Develop visualization module diff --git a/PW27_2018_Boston/Projects/ProstateMpMRIWebViewer/README.md b/PW27_2018_Boston/Projects/ProstateMpMRIWebViewer/README.md index 910be993d..4fa8305d2 100644 --- a/PW27_2018_Boston/Projects/ProstateMpMRIWebViewer/README.md +++ b/PW27_2018_Boston/Projects/ProstateMpMRIWebViewer/README.md @@ -14,7 +14,7 @@ Back to [Projects List](../../README.md#ProjectsList) 1. Building a web application for reviewing and scoring multi-parametric MRI of prostate 1. Add support for reading and writing DICOM objects -1. Creating user interaction capabilities for the viewer (placing fiducial, measurements) +1. Creating user interaction capabilities for the viewer (placing fiducial, measurements) 1. Add support for saving the PIRADS reporting ## Approach and Plan @@ -28,7 +28,7 @@ Back to [Projects List](../../README.md#ProjectsList) ### Progress 1. Built PIRADS trainer using OHIF Standalone Viewer packages 1. Added the tools (zoom, levels, pan, fiducials) and hanging protocol for the prostate study -1. Added reporting UI for PIRADS reporting +1. Added reporting UI for PIRADS reporting 1. Tested on a sample of ProstateX dataset ### Next Steps @@ -39,7 +39,7 @@ Back to [Projects List](../../README.md#ProjectsList) 1. Enhancing the reporting UI, SVG for clicking prostate zones 1. Enhancing the feedback to give more detailed message(e.g., how close the distance is) 1. Adding support for 4D DCE images (prefetching across time and scrolling between volumes) -1. Improve Ktrans colormap +1. Improve Ktrans colormap # Illustrations @@ -59,7 +59,3 @@ Click on the image below for the video demo: 1. https://github.com/pieper/dcmjs 1. https://github.com/OHIF 1. https://github.com/cornerstonejs/cornerstoneTools - - - - diff --git a/PW27_2018_Boston/Projects/ProstateZoneSegmentation/README.md b/PW27_2018_Boston/Projects/ProstateZoneSegmentation/README.md index f36067b45..3cd34500d 100644 --- a/PW27_2018_Boston/Projects/ProstateZoneSegmentation/README.md +++ b/PW27_2018_Boston/Projects/ProstateZoneSegmentation/README.md @@ -12,7 +12,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -The goal of this project is to create and evaluate variants of a CNN for multi-label segmentation of prostate zones in MR images. The prostate zones are essential for lesion classification and therapy planning. +The goal of this project is to create and evaluate variants of a CNN for multi-label segmentation of prostate zones in MR images. The prostate zones are essential for lesion classification and therapy planning. After successful segmentation, a sector map could be extracted that is used for PI-RADS reporting. This has the potential to automate and better standardize prostate lesion location reporting. ## Objective @@ -48,4 +48,3 @@ After successful segmentation, a sector map could be extracted that is used for - Source code: https://github.com/YourUser/YourRepository - Documentation: https://link.to.docs - Test data: https://link.to.test.data - diff --git a/PW27_2018_Boston/Projects/PyRadiomics/README.md b/PW27_2018_Boston/Projects/PyRadiomics/README.md index f4cc772bb..fbecae7aa 100644 --- a/PW27_2018_Boston/Projects/PyRadiomics/README.md +++ b/PW27_2018_Boston/Projects/PyRadiomics/README.md @@ -10,7 +10,7 @@ Back to [Projects List](../../README.md#ProjectsList) - [Ahmed Hosny](https://github.com/ahmedhosny) 1 - [Steve Pieper](https://github.com/pieper) 6 - [Hugo Aerts (PI)](https://github.com/hugoaerts) 1, 2 - + 1 Department of Radiation Oncology, Dana-Farber Cancer Institute, Brigham and Women's Hospital, Harvard Medical School, Boston, MA, USA.
2 Department of Radiology, Brigham and Women's Hospital, Harvard Medical School, Boston, MA, USA.
3 Department of Radiology, Netherlands Cancer Institute, Amsterdam, The Netherlands.
@@ -56,7 +56,7 @@ Back to [Projects List](../../README.md#ProjectsList) [Related PR](https://github.com/Radiomics/pyradiomics/pull/338) - Designed a model definition configuration file and validation schemas -TODO: +TODO: - Implement functionality to apply models - Add several model types (e.g. linear regression, logistic regression) @@ -87,7 +87,7 @@ TODO: ### Objective #3 -![Slicer Radiomics new layout 1 (manual customization)](slicerRadiomics_layout1.png) +![Slicer Radiomics new layout 1 (manual customization)](slicerRadiomics_layout1.png) ![Slicer Radiomics new layout 2 (parameter file customization)](slicerRadiomics_layout2.png) diff --git a/PW27_2018_Boston/Projects/PythonWrapOpenIGTLink/README.md b/PW27_2018_Boston/Projects/PythonWrapOpenIGTLink/README.md index 41ca72a57..343d306e2 100644 --- a/PW27_2018_Boston/Projects/PythonWrapOpenIGTLink/README.md +++ b/PW27_2018_Boston/Projects/PythonWrapOpenIGTLink/README.md @@ -29,7 +29,7 @@ Back to [Projects List](../../README.md#ProjectsList) 4. (Also had discussions regarding protocol design for the motivating project) ## Next Step -1. Merge branch with OpenIGTLink +1. Merge branch with OpenIGTLink # Illustrations diff --git a/PW27_2018_Boston/Projects/QuantitativeSmallAnimalImaging/README.md b/PW27_2018_Boston/Projects/QuantitativeSmallAnimalImaging/README.md index 990c7a0fc..5d7c7b062 100644 --- a/PW27_2018_Boston/Projects/QuantitativeSmallAnimalImaging/README.md +++ b/PW27_2018_Boston/Projects/QuantitativeSmallAnimalImaging/README.md @@ -10,15 +10,15 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -For this project, we aim to bring small animal MR datasets in DICOM format and repeat the process developed for the +For this project, we aim to bring small animal MR datasets in DICOM format and repeat the process developed for the QIICR program to segment a lesion (a Neuroendocrine Tumor in this case), convert the segmentation to a DICOM segmentation using the DCMQI slicer extension, and finally measure the segmentation using the Quantitative Reporting module. Our aim -is to develop a set of repeatable analysis steps we can put into place to analyze additional datasets in our lab. +is to develop a set of repeatable analysis steps we can put into place to analyze additional datasets in our lab. ## Objective 1. Develop a set of processing steps for lesion analysis that are repeatable for other small animal datasets. Access if -clinical tools from the QIICR program will apply to small animal MR datasets as well. +clinical tools from the QIICR program will apply to small animal MR datasets as well. ## Approach and Plan @@ -35,9 +35,9 @@ to clinical scanners. - Follow excellent QIICR tutorial instructions - DCMQI conversion to DICOM segmentation object failed during the first attempt. Consulted Andrey. - One of slices from the Phillips small animal scanner was identified with inconsistent header contents compared to other slices. -- Change made to DCMQI to accommodate this dataset. +- Change made to DCMQI to accommodate this dataset. - Reprocessed successfully and measured DICOM segmentation object using Quantitative Reporting module -- Build and trained a CNN using Keras. Consulted with Alireza about how to connect with DeepInfer. +- Build and trained a CNN using Keras. Consulted with Alireza about how to connect with DeepInfer. - Planning to complete DeepInfer integration of our new model over the coming weeks. # Illustrations @@ -53,4 +53,3 @@ to clinical scanners. - Documentation: http://qiicr.org/dcmqi-guide/tutorials/intro.html - Documentation: https://qiicr.gitbooks.io/dicom4qi/ - diff --git a/PW27_2018_Boston/Projects/README.md b/PW27_2018_Boston/Projects/README.md index 5e3a4aaf1..bf97cb49d 100644 --- a/PW27_2018_Boston/Projects/README.md +++ b/PW27_2018_Boston/Projects/README.md @@ -17,4 +17,3 @@ Note: some steps above may require creating a [pull request](https://help.github [forum]: https://discourse.slicer.org/c/community/project-week [project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW27_2018_Boston/Projects/Template/README.md - diff --git a/PW27_2018_Boston/Projects/RadiomicsRepeatability/README.md b/PW27_2018_Boston/Projects/RadiomicsRepeatability/README.md index 786ce7692..9613bbb1c 100644 --- a/PW27_2018_Boston/Projects/RadiomicsRepeatability/README.md +++ b/PW27_2018_Boston/Projects/RadiomicsRepeatability/README.md @@ -21,7 +21,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Approach and Plan -1. Review/discuss approaches from current literature +1. Review/discuss approaches from current literature 1. Investigate the results on the Prostate MRI test-retest data 1. Draft a paper on pyradiomics repeatability evaluated on the Prostate MRI test-retest data diff --git a/PW27_2018_Boston/Projects/SegmentEditorUsecaseNeedles/README.md b/PW27_2018_Boston/Projects/SegmentEditorUsecaseNeedles/README.md index 1b8333c29..17b9b0fb5 100644 --- a/PW27_2018_Boston/Projects/SegmentEditorUsecaseNeedles/README.md +++ b/PW27_2018_Boston/Projects/SegmentEditorUsecaseNeedles/README.md @@ -7,11 +7,11 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -The superior ability of MR to visualize soft tissue has led to an increase in its use in guiding percutaneous needle-based interventions such as brachytherapy and biopsy, especially in the human pelvis. A technical challenge associated with the use of MRI imaging in such cases, in contrast to say, CT imaging, is the clear visualization of needles that are inserted into cancerous tissue to either deliver radiation or extract a sample. We have developed algorithms for catheter segmentation and visualization through numerous project weeks [1-3]. In each case, we relied on a custom (editing) tool for creating gold standard segmentations of the needles. We would like to explore the use of SegmentEditor for this task. +The superior ability of MR to visualize soft tissue has led to an increase in its use in guiding percutaneous needle-based interventions such as brachytherapy and biopsy, especially in the human pelvis. A technical challenge associated with the use of MRI imaging in such cases, in contrast to say, CT imaging, is the clear visualization of needles that are inserted into cancerous tissue to either deliver radiation or extract a sample. We have developed algorithms for catheter segmentation and visualization through numerous project weeks [1-3]. In each case, we relied on a custom (editing) tool for creating gold standard segmentations of the needles. We would like to explore the use of SegmentEditor for this task. ## Objective -1. Objective. Explore SegmentEditor as an alternative to current manual segmentation of needles in MRI. +1. Objective. Explore SegmentEditor as an alternative to current manual segmentation of needles in MRI. ## Approach and Plan @@ -22,7 +22,7 @@ The superior ability of MR to visualize soft tissue has led to an increase in it ## Progress and Next Steps 1. We learned how to apply SegmentEditor and explored the extra effects of the SegmentEditorExtraEffects module -1. We considered adapting the SurfaceCut extra effect in order to draw multiple Bezier splines, but it would require substantial work to modify the UI of SegmentEditor to allow for segmentation and modification of several needles for a single case. +1. We considered adapting the SurfaceCut extra effect in order to draw multiple Bezier splines, but it would require substantial work to modify the UI of SegmentEditor to allow for segmentation and modification of several needles for a single case. 1. We decided instead to improve NeedleFinder, that has been developed for the specific case of multi needle segmentation. 1. The next steps are the make the NeedleFinder UI more user friendly and ask users for their feedback how to make the workflow easier/faster. diff --git a/PW27_2018_Boston/Projects/ShapeVariationAnalyzer/README.md b/PW27_2018_Boston/Projects/ShapeVariationAnalyzer/README.md index 8195e3d57..4c9b1746f 100644 --- a/PW27_2018_Boston/Projects/ShapeVariationAnalyzer/README.md +++ b/PW27_2018_Boston/Projects/ShapeVariationAnalyzer/README.md @@ -5,7 +5,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Key Investigators - [Juan-Carlos Prieto](https://github.com/juanprietob) (University of North Carolina) -- [Nina Tubau](https://github.com/ninatubau) (University of Michigan) +- [Nina Tubau](https://github.com/ninatubau) (University of Michigan) # Project Description diff --git a/PW27_2018_Boston/Projects/SlicerCustomApplication/README.md b/PW27_2018_Boston/Projects/SlicerCustomApplication/README.md index 923883558..dd0d5cc77 100644 --- a/PW27_2018_Boston/Projects/SlicerCustomApplication/README.md +++ b/PW27_2018_Boston/Projects/SlicerCustomApplication/README.md @@ -16,7 +16,7 @@ Back to [Projects List](../../README.md#ProjectsList) 1. Setup readthedocs documentation 1. Setup continuous integration - diff --git a/PW27_2018_Boston/Projects/SlicerDMRICore/README.md b/PW27_2018_Boston/Projects/SlicerDMRICore/README.md index 06403496d..0ea7aa3dd 100644 --- a/PW27_2018_Boston/Projects/SlicerDMRICore/README.md +++ b/PW27_2018_Boston/Projects/SlicerDMRICore/README.md @@ -22,7 +22,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Remove remaining DMRI i/o code from Slicer core - Objective 2: - Test pip installation of dependency list in Qt5/VS2015 Slicer on Windows and Mac - - Discuss with Jc and other contributors + - Discuss with Jc and other contributors ## Progress and Next Steps diff --git a/PW27_2018_Boston/Projects/SlicerDeepLearningIntegration/README.md b/PW27_2018_Boston/Projects/SlicerDeepLearningIntegration/README.md index 7b1294f62..e0c6b946d 100644 --- a/PW27_2018_Boston/Projects/SlicerDeepLearningIntegration/README.md +++ b/PW27_2018_Boston/Projects/SlicerDeepLearningIntegration/README.md @@ -31,4 +31,3 @@ Integrate a lung segmentation algorithm based on Deep Learning in Slicer. 1. This integration was done through the CustomSlicerGenerator in MacOS and Linux. 1. Luckily, it would be obsolete in Slicer 5!! A template with a Python distribution based on Anaconda or others may be used 1. Also, we found out other extensions like DeepInfer and TOOMCAT that may be useful in the meantime - diff --git a/PW27_2018_Boston/Projects/SlicerGuidedUltraSoundCalibration/README.md b/PW27_2018_Boston/Projects/SlicerGuidedUltraSoundCalibration/README.md index 128dcbf7b..929289ab1 100644 --- a/PW27_2018_Boston/Projects/SlicerGuidedUltraSoundCalibration/README.md +++ b/PW27_2018_Boston/Projects/SlicerGuidedUltraSoundCalibration/README.md @@ -24,15 +24,15 @@ The main purpose of this project is to create a module that integrates an alread ## Progress and Next Steps 1. Added a model node to show a sphere within the image on slicer -2. Added a cross hair fiducial to collect the image coordinate from the center of the straw -3. Created a loable extension to connect the python module to the C++ code -4. Built slicer on my computer -5. Built openCV 3.3 +2. Added a cross hair fiducial to collect the image coordinate from the center of the straw +3. Created a loable extension to connect the python module to the C++ code +4. Built slicer on my computer +5. Built openCV 3.3 6. Made the view Red view only -7. Added inputs for ultrasound system IP address and port -8. Added ultrasound streaming and freeze buttons -9. Added ultrasound streaming and freezing capabilities to these buttons -10. Next steps: to intgrate the guided portion of the algorithm +7. Added inputs for ultrasound system IP address and port +8. Added ultrasound streaming and freeze buttons +9. Added ultrasound streaming and freezing capabilities to these buttons +10. Next steps: to intgrate the guided portion of the algorithm diff --git a/PW27_2018_Boston/Projects/SlicerReadTheDocs/README.md b/PW27_2018_Boston/Projects/SlicerReadTheDocs/README.md index ea9f72bde..d2e8c290c 100644 --- a/PW27_2018_Boston/Projects/SlicerReadTheDocs/README.md +++ b/PW27_2018_Boston/Projects/SlicerReadTheDocs/README.md @@ -35,7 +35,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Slicer is now part of the [awesome-healthcare](https://github.com/kakoni/awesome-healthcare#imaging) list. - See also [What is an awesome list ?](https://github.com/sindresorhus/awesome/blob/master/awesome.md) -- Should we create an `awesome-medical-data` list ? +- Should we create an `awesome-medical-data` list ? # Illustrations diff --git a/PW27_2018_Boston/Projects/SlicerSALT/README.md b/PW27_2018_Boston/Projects/SlicerSALT/README.md index a2b58fe7c..9101047d1 100644 --- a/PW27_2018_Boston/Projects/SlicerSALT/README.md +++ b/PW27_2018_Boston/Projects/SlicerSALT/README.md @@ -5,7 +5,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Key Investigators - Beatriz Paniagua (Kitware) -- Jean-Christophe Fillion-Robin (Kitware) +- Jean-Christophe Fillion-Robin (Kitware) - Jared Vicory (Kitware) - Laura Pascal (Kitware) - Junpyo Hong (University of North Carolina) @@ -18,13 +18,13 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -Slicer Shape AnaLysis Toolbox (SlicerSALT) is the dissemination vehicle of powerful shape analysis methodology based on 3D Slicer open-source software. SlicerSALT will enhance the intuitiveness and ease of use as well as allow researchers to find shape changes with higher statistical power. Altogether this constitutes a crucial resource for the imaging field that will enable many and important new findings in biomedical imaging studies. +Slicer Shape AnaLysis Toolbox (SlicerSALT) is the dissemination vehicle of powerful shape analysis methodology based on 3D Slicer open-source software. SlicerSALT will enhance the intuitiveness and ease of use as well as allow researchers to find shape changes with higher statistical power. Altogether this constitutes a crucial resource for the imaging field that will enable many and important new findings in biomedical imaging studies. -SlicerSALT will be used to: +SlicerSALT will be used to: + Compute Point Distributed Models (PDM) using Spherical Harmonic Representation on spherical topology objects (Extension alrealdy available in SlicerSALT: [SPHARM-PDM Extension](https://www.slicer.org/wiki/Documentation/4.8/Extensions/SpharmPdm)) + Compute [estimating shape correspondence for population of objects with complex topology](https://github.com/laurapascal/slicerprojectweek2018/blob/master/Estimation_of_shape_correspondence_for_population_of_objects_with_complex_topology.pdf) + Run 4D regression in a collection of 3D PDMs associated to a linear variable (i.e. age) (Extension soon available in SlicerSALT: Shape Regression Extension) -+ Perform correspondence optimization using study-wise shape analysis ++ Perform correspondence optimization using study-wise shape analysis + Fit skeletal representations (s-reps) to a collection of binary volumes + Compute image-based correspondence in binary volumes of different topologies @@ -37,27 +37,27 @@ SlicerSALT will be used to: ## Approach and Plan -1. SlicerSALT's Project: +1. SlicerSALT's Project: - Update of the Slicer version used by SlicerSALT to the new release - Incorporation of Shape Regression Extension in the new SlicerSALT release - Test of the new extensions 2. Project for the estimation of shape correspondence for population of objects with complex topology: - Comparison of the three methods already existing -- Investigation in order to universalize the Deformetrica method (issue with the use of Cuda/GPU) +- Investigation in order to universalize the Deformetrica method (issue with the use of Cuda/GPU) - Investigation in order to find a more efficient method (new tools, new algorithms, etc..) ## Progress and Next Steps - Update of the Slicer version used by SlicerSALT to the new release -- Shape Regression Extension: +- Shape Regression Extension: - Fixing of some bugs on the shape regression computation - Adding of some tests - Test of the slicer extension package on Windows and Mac (Issue on Linux) -> Almost ready to be integrated in SlicerSALT -- Estimation of shape correspondence for population of objects with complex topology: +- Estimation of shape correspondence for population of objects with complex topology: - Comparison of the three methods already existing - Abandon of the ThinShellDemon method due to the generated results not enough accurate - - Next Steps: + - Next Steps: - Improvement of the Deformetrica method in order to accelerate the computation - Tests on new data @@ -65,11 +65,11 @@ SlicerSALT will be used to: # Illustrations -Shape Regression Extension: +Shape Regression Extension: -Estimation of shape correspondence for population of objects with complex topology: +Estimation of shape correspondence for population of objects with complex topology: diff --git a/PW27_2018_Boston/Projects/SlicerTraining/README.md b/PW27_2018_Boston/Projects/SlicerTraining/README.md index 0cfd6cb3b..c51d2a43b 100644 --- a/PW27_2018_Boston/Projects/SlicerTraining/README.md +++ b/PW27_2018_Boston/Projects/SlicerTraining/README.md @@ -12,8 +12,8 @@ Back to [Projects List](../../README.md#ProjectsList) - [Presentation](https://medtec4susdev.github.io/3DSlicerTrainingNetwork/projectdemo) -The main objective is the creation of a training plan that could be adapted for 3DSlicer new users with both: medical and engineer profiles. This plan should consider the needs and particular characteristics in the developing countries of Cape Verde, Senegal and Mauritania. -In any case the training material sould be oriented to the creation of digital learning objects. +The main objective is the creation of a training plan that could be adapted for 3DSlicer new users with both: medical and engineer profiles. This plan should consider the needs and particular characteristics in the developing countries of Cape Verde, Senegal and Mauritania. +In any case the training material sould be oriented to the creation of digital learning objects. ## Objective @@ -33,7 +33,7 @@ In any case the training material sould be oriented to the creation of digital l + [3D Slicer Clinicians](https://github.com/NA-MIC/ProjectWeek/blob/master/PW27_2018_Boston/Projects/SlicerTraining/TrainingPlan_3DSlicer%2BPlus_en_v1_U.pdf) + [3D Slicer Engineers](https://github.com/NA-MIC/ProjectWeek/blob/master/PW27_2018_Boston/Projects/SlicerTraining/TrainingPlan_ImagingTechnologies_en_v1_T.pdf) 1. There will be a first training workshop in March for professionals from Cape Verde, Senegal and Mauritania -1. It is planned as the start point of future collaborations with the team of the MACbioIDi project +1. It is planned as the start point of future collaborations with the team of the MACbioIDi project # Illustrations diff --git a/PW27_2018_Boston/Projects/Template/README.md b/PW27_2018_Boston/Projects/Template/README.md index 3dcb8e916..69642d07c 100644 --- a/PW27_2018_Boston/Projects/Template/README.md +++ b/PW27_2018_Boston/Projects/Template/README.md @@ -43,4 +43,3 @@ Back to [Projects List](../../README.md#ProjectsList) - Source code: https://github.com/YourUser/YourRepository - Documentation: https://link.to.docs - Test data: https://link.to.test.data - diff --git a/PW27_2018_Boston/Projects/TrajectoryPlanning/README.md b/PW27_2018_Boston/Projects/TrajectoryPlanning/README.md index 0b8373616..343f4ec79 100644 --- a/PW27_2018_Boston/Projects/TrajectoryPlanning/README.md +++ b/PW27_2018_Boston/Projects/TrajectoryPlanning/README.md @@ -34,7 +34,7 @@ Progress: 5. Started playing with the "manual" modification of nodes and vtkpolydatas 6. Started the process of building the core files of PILOT plugin along with Slicer (ongoing...) -Next steps: +Next steps: 1. Finish the build 2. Link UI widgets to functions in the PILOT code diff --git a/PW27_2018_Boston/Projects/dynamic-needle-tracking/README.md b/PW27_2018_Boston/Projects/dynamic-needle-tracking/README.md index 8bf1a6f86..12879ed2f 100644 --- a/PW27_2018_Boston/Projects/dynamic-needle-tracking/README.md +++ b/PW27_2018_Boston/Projects/dynamic-needle-tracking/README.md @@ -58,4 +58,3 @@ See [Closed-loop Autonomous Needle Steering during Cooperatively Controlled Need [3] F. Zijlstra, J. G. Bouwman, I. Braškutė, M. A. Viergever, and P. R. Seevinck, “Fast Fourier-based simulation of off-resonance artifacts in steady-state gradient echo MRI applied to metal object localization,” Magn Reson Med. 2017 Nov;78(5):2035-41. [4] A. Mastmeyer, G. Pernelle, R. Ma, L. Barber, and T. Kapur, “Accurate Model-based Segmentation of Gynecologic Brachytherapy Catheter Collections in MRI-images,” Med Image Anal. 2017 Dec;42:173-88 - diff --git a/PW27_2018_Boston/README.md b/PW27_2018_Boston/README.md index da29dc6fb..6268d306c 100644 --- a/PW27_2018_Boston/README.md +++ b/PW27_2018_Boston/README.md @@ -1,7 +1,7 @@ ![ProjectWeekLogo](PW27-logo-240.png) Stata Center ## Welcome to the web page for the 27th Project Week! -The 27th NA-MIC Project Week was held during the week of January 8-12, 2018 at the Stata Center at MIT. +The 27th NA-MIC Project Week was held during the week of January 8-12, 2018 at the Stata Center at MIT. It recorded 72 registered attendees, who worked on 53 projects. These attendees represented 25 academic sites: Boston Children's Hospital, Harvard Medical School (USA), Brigham and Women's Hospital, Harvard Medical School (USA), Children's Hospital of Philadelphia (USA), Concordia University (Canada), Fraunhofer Institute for Medical Image Computing MEVIS (Germany), Geneva University Hospital (Switzerland), Imperial College (UK), Instituto de Astrofísica de Canarias (Spain), Massachusetts General Hospital, Harvard Medical School (USA), Massachusetts Institute of Technology (USA), Montreal Neurological Institute (Canada), Netherlands Cancer Institute (The Netherlands), NYU School of Medicine (USA), Oslo University Hospital (Norway), Otto von Guericke University (Germany), Queen's University (Canada), Robarts Research Institute (Canada), University of Bremen (Germany), University of British Columbia (Canada), University of Las Palmas de Gran Canaria (Spain), University of Magdeburg (Germany), University of Michigan (USA), University of Strasbourg (France), Western University (Canada), Worcester Polytechnic Institute (USA). 8 companies attended the event: Augmented Intelligence Inc. (USA), Fraunhofer MEVIS (Germany), GE Research (USA), Harmonus Inc. (USA), Isomics, Inc. (USA), Kitware Inc. (USA), KnowledgeVis, LLC (USA) Radical Imaging, LLC (USA). diff --git a/PW28_2018_GranCanaria/Breakouts/TutorialsReview/README.md b/PW28_2018_GranCanaria/Breakouts/TutorialsReview/README.md index 8157e7050..8fa084d57 100644 --- a/PW28_2018_GranCanaria/Breakouts/TutorialsReview/README.md +++ b/PW28_2018_GranCanaria/Breakouts/TutorialsReview/README.md @@ -19,7 +19,7 @@ * Downloads * Jupyter Notebooks * Some experiments on making Slicer work as a kernel for Jupyter - + ## Desired Features * Standardized formats using well designed templates * Globally accessible @@ -55,9 +55,8 @@ * References to publications * Funding sources * Sample data - + ## Ideas - + * Collect feedback from outsiders * Tutorial Generator extension - diff --git a/PW28_2018_GranCanaria/Breakouts/WebTechnologies/README.md b/PW28_2018_GranCanaria/Breakouts/WebTechnologies/README.md index 2db2ebd73..eb2bc7bd1 100644 --- a/PW28_2018_GranCanaria/Breakouts/WebTechnologies/README.md +++ b/PW28_2018_GranCanaria/Breakouts/WebTechnologies/README.md @@ -1,6 +1,6 @@ # Web Technologies Breakout * Overall goal is to leverage latest generations of hardware and software technologies for our medical research applications -* +* # OHIF Demo * [OHIFViewer demo (temporary development server)](http://ohifviewer-staging.herokuapp.com/studylist) @@ -23,4 +23,3 @@ # See Also [Bio Image Suite Web](https://bioimagesuiteweb.github.io/webapp/) - diff --git a/PW28_2018_GranCanaria/PW28InTheMedia.md b/PW28_2018_GranCanaria/PW28InTheMedia.md index f279a6286..6ddf09a06 100644 --- a/PW28_2018_GranCanaria/PW28InTheMedia.md +++ b/PW28_2018_GranCanaria/PW28InTheMedia.md @@ -14,7 +14,7 @@ - [Azarplus](http://www.azarplus.com/2018-06-26/la-genetica-es-fundamental-en-el-desarrollo-de-la-ludopatia/16615/noticia/) -- [20180625 RTVC - 21'10''](http://www.rtvc.es/television/multimedia/Telenoticias%201-46/25-06-18-1876.aspx#.WzYOONJKjIV) +- [20180625 RTVC - 21'10''](http://www.rtvc.es/television/multimedia/Telenoticias%201-46/25-06-18-1876.aspx#.WzYOONJKjIV) - [Casa África](http://www.casafrica.es/agenda_europa_africa.jsp?DS318.PROID=903669) diff --git a/PW28_2018_GranCanaria/PreparatoryMeetingsNotes.md b/PW28_2018_GranCanaria/PreparatoryMeetingsNotes.md index 1abf7979a..52f4215ed 100644 --- a/PW28_2018_GranCanaria/PreparatoryMeetingsNotes.md +++ b/PW28_2018_GranCanaria/PreparatoryMeetingsNotes.md @@ -27,19 +27,19 @@ These are notes from the Project Week Preparation Meetings. ## Meeting #3: May 08 -- Brief review of the assistant’s projects: Francisco Marcano (Spectroscopy with MRI and infrared), Simon D. (3D printing), Till, Luke and James (segmentation), Paolo (Containers). It will be interesting to propose a “segment editor” session. -- Pages to check about MRI and spectroscopy projects: +- Brief review of the assistant’s projects: Francisco Marcano (Spectroscopy with MRI and infrared), Simon D. (3D printing), Till, Luke and James (segmentation), Paolo (Containers). It will be interesting to propose a “segment editor” session. +- Pages to check about MRI and spectroscopy projects: - [https://sourceforge.net/p/sivic/sivicwiki/Home/](https://sourceforge.net/p/sivic/sivicwiki/Home/) - [https://na-mic.org/wiki/2011_Winter_Project_Week:MRSI_module_and_SIVIC_interface](https://na-mic.org/wiki/2011_Winter_Project_Week:MRSI_module_and_SIVIC_interface) - [https://github.com/SIVICLab/sivic/tree/master/applications/slicer_plugin/MRSpectroscopy](https://github.com/SIVICLab/sivic/tree/master/applications/slicer_plugin/MRSpectroscopy) -- To include the MACbioIDi project parallel agenda in google calendar. -- To write a few lines about the projects proposed. They should be ready next week. -- The project git pages deadline is may, 28th. +- To include the MACbioIDi project parallel agenda in google calendar. +- To write a few lines about the projects proposed. They should be ready next week. +- The project git pages deadline is may, 28th. ## Meeting #2: May 01 Topic: Segmentation -- Segmentation tasks using slicer. +- Segmentation tasks using slicer. - Introduction to some projects: - Efficient approach to the automatization of segmentation tasks. It could have some interest the use of CIP module _Body composition_. - The use of CIP with tuberculosis cases, some of them provided by the African countries. @@ -51,7 +51,7 @@ Topic: Segmentation ## Meeting #1: April 24 - Tina’s brief introduction of zoom and the host of a meeting. -- Juan’s presentation of the venue, hotels, some logistics and the African countries involved in the MACbioIDi project. +- Juan’s presentation of the venue, hotels, some logistics and the African countries involved in the MACbioIDi project. - Presentation of each participant interests. - Mike’s proposal of a breakout session - It could be interesting to discuss about the creation a segmentation-only slicelet for Slicer, kind of ITK-Snap style, which novices can start to use very easily. diff --git a/PW28_2018_GranCanaria/Projects/3DSlicerModelsForSeriousGames/README.md b/PW28_2018_GranCanaria/Projects/3DSlicerModelsForSeriousGames/README.md index 7fde220cc..0b96650a6 100644 --- a/PW28_2018_GranCanaria/Projects/3DSlicerModelsForSeriousGames/README.md +++ b/PW28_2018_GranCanaria/Projects/3DSlicerModelsForSeriousGames/README.md @@ -60,4 +60,3 @@ Unity Scenario video examples. + [Virtual Reality with Customized Positive Stimuli in a Cognitive-Motor Rehabilitation Task: A feasibility study with subacute stroke patients with mild cognitive impairment Mónica S. Cameirão, Fábio Pereira, Sergi Bermúdez i Badia](https://neurorehabilitation.m-iti.org/lab/wp-content/plugins/zotpress/lib/request/request.dl.php?api_user_id=161215&key=4ZQMQFB3&content_type=application/pdf) - diff --git a/PW28_2018_GranCanaria/Projects/3DSlicerTrainingPrograms/README.md b/PW28_2018_GranCanaria/Projects/3DSlicerTrainingPrograms/README.md index 7e4aa1dae..6bb6cd616 100644 --- a/PW28_2018_GranCanaria/Projects/3DSlicerTrainingPrograms/README.md +++ b/PW28_2018_GranCanaria/Projects/3DSlicerTrainingPrograms/README.md @@ -31,8 +31,8 @@ After the first edition of the training program (Slicer Ecosystems Education for 27thPW NA-MIC we will be studying results and prepare a new proposals according to the necessities of the involved countries: Cape Verde, Senegal, Mauritania, plus Mozambique and Azores. This plan will consider previous results and feedback from the professionals of these countries. We must not forget that in their -countries some of them have already started with their own training programs. All of this will be considered in new plans, as well -as collaboation in other research projects. +countries some of them have already started with their own training programs. All of this will be considered in new plans, as well +as collaboation in other research projects. ## Objectives @@ -44,15 +44,15 @@ as collaboation in other research projects. ## Approach and Plan -1. Study the results of the training sessions already completed. +1. Study the results of the training sessions already completed. ## Progress and Next Steps -1. Plan a new training program for Mauritania. +1. Plan a new training program for Mauritania. - Train the trainers 1. Plan a new training program for Mozambique - - Train the trainers + - Train the trainers - Students at the university - Residents - profiles: General / Images / Surgeons 1. Continue working in the development of training contents - learning objects - in: [English, French, Portuguese, Arabic, Spanish](https://mt4sd.ulpgc.es/slicer-int/index.php/P%C3%A1gina_principal) @@ -81,4 +81,3 @@ Senegal training sessions. ## Background and References + [A Medical Imaging Computing Learning Program: Transitioning Towards The Medical Technology In Africa](https://library.iated.org/view/AFONSOSUAREZ2018AME) - diff --git a/PW28_2018_GranCanaria/Projects/3DViewsLinking/README.md b/PW28_2018_GranCanaria/Projects/3DViewsLinking/README.md index 0d1c9a210..860c6ce05 100644 --- a/PW28_2018_GranCanaria/Projects/3DViewsLinking/README.md +++ b/PW28_2018_GranCanaria/Projects/3DViewsLinking/README.md @@ -16,7 +16,7 @@ Back to [Projects List](../../README.md#ProjectsList) Adding 3D views linking functionalities such as the 2D one. ## Objective -The 3D view controller widget should have GUI for synchronizing the following proprieties: +The 3D view controller widget should have GUI for synchronizing the following proprieties: * displayed content (what models, volumes, segmentations, etc. are visible in each view) * view properties (show/hide ruler, orientation marker, background color, etc) @@ -33,11 +33,11 @@ The 3D view controller widget should have GUI for synchronizing the following pr * Design the GUI and feedback (Done!): * do we what a popup menu for some buttons (either old or new ones)? or show all of them? or put part of them in the adavnced control in the View Controller module? - + let's leave as it is * shall we add GUI for recent volume rendering variables moved from the MRMLVolumeRendering to the MRMLView node (Csaba mod to volume rendering)? Probably adding also this will be confusing (i.e. duplication of GUI and sync with volume rendering GUI). - + not necessary! @@ -45,10 +45,10 @@ The 3D view controller widget should have GUI for synchronizing the following pr no double mode, leave it very simple. i.e., click linking and the cameras are all updated, GUI, etc. Implementation to do as the MRMLSliceLogic one. - + ![](https://raw.githubusercontent.com/Punzo/SlicerAstroWikiImages/master/3Dviewlinking1.png) - * (b) different angle of view for second (third, etc.) linked camera? different camera motion, etc... + * (b) different angle of view for second (third, etc.) linked camera? different camera motion, etc... specialized interface in the cameras module. It will be designed and implemented later on @@ -56,15 +56,15 @@ The 3D view controller widget should have GUI for synchronizing the following pr add pick from 3d view when right click. It will show a menu with the edit action. -* Implementation: +* Implementation: * (a) : Done! pull request at https://github.com/Slicer/Slicer/pull/980 - + ![](https://github.com/NA-MIC/ProjectWeek/blob/master/PW28_2018_GranCanaria/Projects/3DViewsLinking/myimage.gif?raw=true) - + next step: for Slicer 4.10 (no backward compatibility): merge vtkMRMLCameraNode into vtkMRMLViewNode - + * (b) : Done! added commit https://github.com/Slicer/Slicer/pull/980/commits/295d4cd7f5ff2a3c25a33c4017b15c78d89ccced - in pull request https://github.com/Slicer/Slicer/pull/980 + in pull request https://github.com/Slicer/Slicer/pull/980 videos: https://www.dropbox.com/s/dgjg8raozt851m6/3dviewlink.mp4?dl=0 https://www.dropbox.com/s/dgjg8raozt851m6/3dviewlink.mp4?dl=0 diff --git a/PW28_2018_GranCanaria/Projects/4D_MRI_via_retrospectiv_stacking/README.md b/PW28_2018_GranCanaria/Projects/4D_MRI_via_retrospectiv_stacking/README.md index 5fba7f5af..4b8edc235 100644 --- a/PW28_2018_GranCanaria/Projects/4D_MRI_via_retrospectiv_stacking/README.md +++ b/PW28_2018_GranCanaria/Projects/4D_MRI_via_retrospectiv_stacking/README.md @@ -15,11 +15,11 @@ - Christian Hansen (University of Magdeburg) # Project Description -Creating a program to generate 4D MRI sequences applying the retrospectiv stacking method on 2D MR slices. -The available data is comprised of an time resolved alternating sequence of navigator and data slices and a pure sequence of time resolved navigator slices. -All navigator slices were acquired at the exact same location and the data slices were "scanning" the complete liver in a cyclic manner. -To generate a 4D MR sequence from that the program has to collect all data frames that were acquired during the same breating phase, i.e. not at the same time but at different times during the same breathing phase. -To find these, the navigator slices are utilized. Finding correspondences between the navigator slices of the pure navigator sequence and the once of the alternating sequence means to find similar or same breathing phases. +Creating a program to generate 4D MRI sequences applying the retrospectiv stacking method on 2D MR slices. +The available data is comprised of an time resolved alternating sequence of navigator and data slices and a pure sequence of time resolved navigator slices. +All navigator slices were acquired at the exact same location and the data slices were "scanning" the complete liver in a cyclic manner. +To generate a 4D MR sequence from that the program has to collect all data frames that were acquired during the same breating phase, i.e. not at the same time but at different times during the same breathing phase. +To find these, the navigator slices are utilized. Finding correspondences between the navigator slices of the pure navigator sequence and the once of the alternating sequence means to find similar or same breathing phases. Thus we find all corresponding data slices giving the 3D liver at the specific breathing phase. ## Objective @@ -33,12 +33,12 @@ Thus we find all corresponding data slices giving the 3D liver at the specific b ## Progress and Next Steps **previous steps** -- got insight in available and relevant DICOM tags for the data at hand (big thanks to Joost for the DICOM Explorer) +- got insight in available and relevant DICOM tags for the data at hand (big thanks to Joost for the DICOM Explorer) - hit a roadblock when data appeared to be faulty - wrote a python script sorting the data by acquisition time, turns out data is faulty after all (thanks to Joost again) - figured out how to change the MRI sequence back in Germany - acquired lots of data -- started program to stack the volumes +- started program to stack the volumes **progress during project week** - made huge progress on the code front @@ -51,7 +51,7 @@ Thus we find all corresponding data slices giving the 3D liver at the specific b **next steps** - finish the program - annotate 3D samples of the 4D Data for ground truth using 3D Slicer -- actually use the data +- actually use the data # Illustrations @@ -59,7 +59,7 @@ Data Acquisition ![Data acquisition](dataAquisition.png) Data Sorting scheme -![Data Sorting](dataSorting.png) +![Data Sorting](dataSorting.png) vessle tracking ![vessle tracking](vessleTracking.png) diff --git a/PW28_2018_GranCanaria/Projects/AdaptiveMIS/README.md b/PW28_2018_GranCanaria/Projects/AdaptiveMIS/README.md index 0988e3b0d..99f636566 100644 --- a/PW28_2018_GranCanaria/Projects/AdaptiveMIS/README.md +++ b/PW28_2018_GranCanaria/Projects/AdaptiveMIS/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Intelligent and Adapative Minimally Invasive Surgery Simulation +# Intelligent and Adapative Minimally Invasive Surgery Simulation ## Key Investigators @@ -31,8 +31,8 @@ The objective is to create a MIS training simulation system, allowing an individ ## Approach and Plan -1. Designing user-interface and Unity based games, to simulate MIS conditions -1. Implementation and use of the metrices from the PerkTutor and SlicerIGT +1. Designing user-interface and Unity based games, to simulate MIS conditions +1. Implementation and use of the metrices from the PerkTutor and SlicerIGT 1. Synchronisation of the MIS instruments (optical tracker) with the Unity based game 1. Exploration and implementation of AI methods (Student modelling, intelligent tutoring system) for the game application @@ -41,7 +41,7 @@ The objective is to create a MIS training simulation system, allowing an individ 1. Brainstorming the methods for the optimal instrument calibration of the optical tracker 1. Liasing with the Simulation Lab at the University Complejo Hospital, for optimising the proposed simulation for clinical use 1. Next step: Work closely with the PerkLab using PerkTutor and SlicerIGT for laparoscopy CBME training for the Unity-based application for simulation “games” using 3D-printed laparoscopy tools. Incorporating information about respiratory organ movement into the simulation. -1. Find a way to accurately track the MIS instruments, using the metrics collected and calculated by the PerkTutor +1. Find a way to accurately track the MIS instruments, using the metrics collected and calculated by the PerkTutor # Illustrations @@ -50,5 +50,3 @@ The objective is to create a MIS training simulation system, allowing an individ ![The 3D Unity Based Game, using the Polhemus Tracker](IMG_2713.JPG) ![The 3D Unity Based Game, using the Polhemus Tracker](IMG_2716.JPG) ![The 3D Unity Based Game, using the Polhemus Tracker](IMG_2760.JPG) - - diff --git a/PW28_2018_GranCanaria/Projects/AtlasDevelopmentForEducation/README.md b/PW28_2018_GranCanaria/Projects/AtlasDevelopmentForEducation/README.md index 4c5de8f77..6ea2b1b76 100644 --- a/PW28_2018_GranCanaria/Projects/AtlasDevelopmentForEducation/README.md +++ b/PW28_2018_GranCanaria/Projects/AtlasDevelopmentForEducation/README.md @@ -20,7 +20,7 @@ Back to [Projects List](../../README.md#ProjectsList) This project aims to create different anatomical atlas. During this week it is planned the creation of an anatomic atlas of the pelvis of the human body, and a lung atlas. It will be used the 3DSlicer segment editor to create the models that later on will be displayed with the Open Anatomy Atlas for -research and training purposes. It will also be evaluated the Chest Imaging Platform to segment lungs with +research and training purposes. It will also be evaluated the Chest Imaging Platform to segment lungs with tuberculosis disease, also for training purposes. (Data: pelvis -CT-, prostate -MRI- and tuberculosis -CT-) @@ -28,7 +28,7 @@ tuberculosis disease, also for training purposes. (Data: pelvis -CT-, prostate - 1. To segment the pelvis of the human body of a male subject (Images provided by the African countries). 1. To segment lungs with tuberculosis disease (Images provided by the African countries). -1. To create an anatomical atlas to be exported and use the Open Anatomy Atlas project to visualize the elements. +1. To create an anatomical atlas to be exported and use the Open Anatomy Atlas project to visualize the elements. 1. To create a tool that helps putting all the files needed for the web atlas to work, toguether @@ -55,7 +55,7 @@ tuberculosis disease, also for training purposes. (Data: pelvis -CT-, prostate - * Atlas assembly web tool - + ## Illustrations diff --git a/PW28_2018_GranCanaria/Projects/CIP_DeepLearning/README.md b/PW28_2018_GranCanaria/Projects/CIP_DeepLearning/README.md index be5a0f4df..7bda81f27 100644 --- a/PW28_2018_GranCanaria/Projects/CIP_DeepLearning/README.md +++ b/PW28_2018_GranCanaria/Projects/CIP_DeepLearning/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Chest Imaging Platform: segmentation of lungs and pectoralis based on deep learning +# Chest Imaging Platform: segmentation of lungs and pectoralis based on deep learning ## Key Investigators diff --git a/PW28_2018_GranCanaria/Projects/CIP_Python3/README.md b/PW28_2018_GranCanaria/Projects/CIP_Python3/README.md index c112b3294..a64ef58e2 100644 --- a/PW28_2018_GranCanaria/Projects/CIP_Python3/README.md +++ b/PW28_2018_GranCanaria/Projects/CIP_Python3/README.md @@ -12,7 +12,7 @@ Back to [Projects List](../../README.md#ProjectsList) New release of the Chest Imaging Platform (last one supporting Python 2!). -Chest Imaging Platform migration to Python 3. +Chest Imaging Platform migration to Python 3. ## Objective diff --git a/PW28_2018_GranCanaria/Projects/CustomGUIForUSSimulator/Readme.md b/PW28_2018_GranCanaria/Projects/CustomGUIForUSSimulator/README.md similarity index 96% rename from PW28_2018_GranCanaria/Projects/CustomGUIForUSSimulator/Readme.md rename to PW28_2018_GranCanaria/Projects/CustomGUIForUSSimulator/README.md index 80368b95f..529b554ba 100644 --- a/PW28_2018_GranCanaria/Projects/CustomGUIForUSSimulator/Readme.md +++ b/PW28_2018_GranCanaria/Projects/CustomGUIForUSSimulator/README.md @@ -32,11 +32,11 @@ The main goal of this project is to develop a custom graphic user interface (GUI We defined several sketches and chose the better sketch which satisfied the user requirements: - US images in 2D viewer - 3D scene for the training system (tools and phantom) - - IGT connection management (buttons and text) + - IGT connection management (buttons and text) -In first proposal, we implemented the chosen user interface in slicelet module. The slicelet provides a simple way to customize the user interface but our real-time requirement is not fulfilled. +In first proposal, we implemented the chosen user interface in slicelet module. The slicelet provides a simple way to customize the user interface but our real-time requirement is not fulfilled. -In second proposal, we developed the user interface in Guidelet module after the Perklab team's suggestion. This guidelet is better suited to real-time applications and, hence, we decided to implement this alternative solution. The user interface is based on the template provided by the Guidelet module. +In second proposal, we developed the user interface in Guidelet module after the Perklab team's suggestion. This guidelet is better suited to real-time applications and, hence, we decided to implement this alternative solution. The user interface is based on the template provided by the Guidelet module. In this week, we developed a fully operational interface that can be used as GUI for our US simulator training system instead of the standard 3D Slicer one. We consider that the new, simpler interface is more suitable to include the simulator in training course for students. Moreover, we learnt the way to modify sensitive aspects such as viewer layout, buttons, check boxes and other widgets, in anticipation of the changes that clinicians and users may request or suggest in the future. @@ -66,7 +66,7 @@ GUI in Slicelet version GUI in Guidelet version - + diff --git a/PW28_2018_GranCanaria/Projects/DocumentingSlicerUsingReadTheDocs/README.md b/PW28_2018_GranCanaria/Projects/DocumentingSlicerUsingReadTheDocs/README.md index 0e5348a63..7424cac47 100644 --- a/PW28_2018_GranCanaria/Projects/DocumentingSlicerUsingReadTheDocs/README.md +++ b/PW28_2018_GranCanaria/Projects/DocumentingSlicerUsingReadTheDocs/README.md @@ -28,7 +28,7 @@ Ultimately, all modules would have their documentation managed along side the Sl 2. Document process to install Qt5 and build Slicer using it 2. Discuss and work on migration of developer documentation. 3. Setup documentation of [KitwareMedical/SlicerCustomAppTemplate](https://github.com/KitwareMedical/SlicerCustomAppTemplate) on readthedocs -4. Create `sphinx-cmake` python package. A sphinx documentation framework plugin enabling project to easily document their build system. +4. Create `sphinx-cmake` python package. A sphinx documentation framework plugin enabling project to easily document their build system. ## Progress and Next Steps diff --git a/PW28_2018_GranCanaria/Projects/EvaluationOfProjects/README.md b/PW28_2018_GranCanaria/Projects/EvaluationOfProjects/README.md index 120dde47c..15d75dfb9 100644 --- a/PW28_2018_GranCanaria/Projects/EvaluationOfProjects/README.md +++ b/PW28_2018_GranCanaria/Projects/EvaluationOfProjects/README.md @@ -13,12 +13,12 @@ Back to [Projects List](../../README.md#ProjectsList) ### Evaluation of projects: augmented reality system to be used in surgeries, software for orthognatic planning, simulator of bone sliding. -The Canary Islands Institute of Technology (ITC) (http://www.itccanarias.org/web/) is an applied research Institute belonging to the Regional Government of Canary Islands (Spain). The ITC is active in different research fields. In particular, the Biomedical Engineering Department focuses on 3D-printed, custom-made implants for bone loss reconstruction and regeneration. +The Canary Islands Institute of Technology (ITC) (http://www.itccanarias.org/web/) is an applied research Institute belonging to the Regional Government of Canary Islands (Spain). The ITC is active in different research fields. In particular, the Biomedical Engineering Department focuses on 3D-printed, custom-made implants for bone loss reconstruction and regeneration. Biomedical Engineering Department Workflow - + During this workshop we plan to explore, together with the GTMA (Group of Medical Technology and Audiovisuals), the possibilities of 3DSlicer and its environment in three possible projects. Proposals: @@ -53,17 +53,17 @@ SlicerCMF (CranioMaxiloFacial app) was preliminary evaluated, capabilities and e + Project #2: 'Augmented reality for patient-specific implant surgery' -SlicerVR was considered. Our roadmap: +SlicerVR was considered. Our roadmap: - [x] Focus on spine surgery (worst-case scenario). - [ ] Assess intraoperative ultrasound for spine surgery, and perform 3D reconstruction using trackers. - [ ] Build an ultrasound phantom to validate the setup. It should include the spine and surrounding main vessels (aorta!) - [ ] Use VR to navigate the model and US-3D volume to locate blood vessels, muscle or cartilage during tumor resection - + + Project #3: 'A simulator for the humerus and ulna articular surfaces' Postponed for reevaluation. + Project #4 (Bonus!): -Slicer has proven to be very useful for "voxel printing" anatomical models, which is a 3D-printing technique that can provide very realistic results - 3D in full color, transparency, various degrees of elasticity - and that does not require segmentation, nor the creation of an STL file. Other multi-color 3D-printing techniques require one STL per color, and the colors are therefore "solid". Slicer can *effortlessly* perform the required 3D interpolation and slicing that is required for this technique. +Slicer has proven to be very useful for "voxel printing" anatomical models, which is a 3D-printing technique that can provide very realistic results - 3D in full color, transparency, various degrees of elasticity - and that does not require segmentation, nor the creation of an STL file. Other multi-color 3D-printing techniques require one STL per color, and the colors are therefore "solid". Slicer can *effortlessly* perform the required 3D interpolation and slicing that is required for this technique. ## Illustrations The following are real 3d-printed anatomical models - not renders - using "voxel printing" @@ -79,4 +79,3 @@ Credit: C. Bader, D. Kolb, J. C. Weaver, S. Sharma, A. Hosny, J. Costa, N. Oxman ## Background and References + [Osteobionix](https://github.com/NA-MIC/ProjectWeek/blob/master/PW28_2018_GranCanaria/Projects/EvaluationOfProjects/presentation%20letter%20ITC.pdf) - diff --git a/PW28_2018_GranCanaria/Projects/FWFintegration/README.md b/PW28_2018_GranCanaria/Projects/FWFintegration/README.md index 58e3906f9..67154641d 100644 --- a/PW28_2018_GranCanaria/Projects/FWFintegration/README.md +++ b/PW28_2018_GranCanaria/Projects/FWFintegration/README.md @@ -10,7 +10,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description Tensor-valued diffusion encoding is an emerging technique within diffusion MRI. The novelty lies in the multidimensional encoding of the diffussion process. -To support such encoding, we must replace the conventional diffusion encoding sequence with arbitrary gradient waveforms that facilitate a large range of encoding strategies. +To support such encoding, we must replace the conventional diffusion encoding sequence with arbitrary gradient waveforms that facilitate a large range of encoding strategies. In doing so, we also need to store new kinds of experimental information to support traceability. In this project, we will extend the MRI pulse sequence to calculate and embed several necessary parameters in the DICOM header. # Constraints and Challenges @@ -37,8 +37,8 @@ In doing so, we also need to store new kinds of experimental information to supp Objective 1 was prepared during Hackathon in Sweden by Isaiah Norton, and is almost complete. -Next step: -Introduce C-code into pulse sequence, and test compilation conflicts. +Next step: +Introduce C-code into pulse sequence, and test compilation conflicts. Translate "Load" type waveform, without rotations, into the WIP parameter space Complement waveform parameters (motion, acceleration, k0 nulling with maxwell terms, etc) and store in header diff --git a/PW28_2018_GranCanaria/Projects/Implementing1HTimeResolvedFunctionalMagneticResonanceSpectroscopy/README.md b/PW28_2018_GranCanaria/Projects/Implementing1HTimeResolvedFunctionalMagneticResonanceSpectroscopy/README.md index 2eba4468f..07114b99c 100644 --- a/PW28_2018_GranCanaria/Projects/Implementing1HTimeResolvedFunctionalMagneticResonanceSpectroscopy/README.md +++ b/PW28_2018_GranCanaria/Projects/Implementing1HTimeResolvedFunctionalMagneticResonanceSpectroscopy/README.md @@ -10,9 +10,9 @@ Back to [Projects List](../../README.md#ProjectsList) ## Project Description -The aim of this project is to implement signal separation algorithms to process single voxel or chemical shift imaging time sequences. A sequence is compose of frames with a period of NEX+TR sec each. The output of the processing is an estimated sequence of spectra with increased signal-to-noise ratio (SNR). SNR-improved sequence could be used to extract the chemical kinetic information of metabolites might be found in the original sequence in ideal absence of noise, and improve quantification of low-concentration metabolites in each frame. +The aim of this project is to implement signal separation algorithms to process single voxel or chemical shift imaging time sequences. A sequence is compose of frames with a period of NEX+TR sec each. The output of the processing is an estimated sequence of spectra with increased signal-to-noise ratio (SNR). SNR-improved sequence could be used to extract the chemical kinetic information of metabolites might be found in the original sequence in ideal absence of noise, and improve quantification of low-concentration metabolites in each frame. Quantification of processed sequence is performed by third-party software. -Algorithms have previous been coded in Matlab, and it is proposed their translation to 3D Slicer numeric and scientific library framework. +Algorithms have previous been coded in Matlab, and it is proposed their translation to 3D Slicer numeric and scientific library framework. It is also encouraged to implement secure communication routines to interact with quantification software from 3D Slicer application. @@ -45,14 +45,11 @@ Voxel location and average spectrum. -Simulation of the voxel value (molecule concentration) changing in time (sequence). +Simulation of the voxel value (molecule concentration) changing in time (sequence). -Denoised spectrum sequence (Matlab) +Denoised spectrum sequence (Matlab) - - - diff --git a/PW28_2018_GranCanaria/Projects/Insula_segmentation_with_3DSlicer/README.md b/PW28_2018_GranCanaria/Projects/Insula_segmentation_with_3DSlicer/README.md index e913a99f3..f428c2103 100644 --- a/PW28_2018_GranCanaria/Projects/Insula_segmentation_with_3DSlicer/README.md +++ b/PW28_2018_GranCanaria/Projects/Insula_segmentation_with_3DSlicer/README.md @@ -15,7 +15,7 @@ To parcellate accurately the Insula into each principal anatomic units. Specific ## Objective Using a T1 MRI dataset of the Human Connectome Project (HCP) we volumetrically measured the aINS and pINS. -This was achieved using the T1 MRI and the Slicer 3D software in two steps. First, by defining the precise anatomy of the insula by identifying the morphology of the insular sulci which are visible and second, by parcellating the two subunits of the insula, labeling and measuring their volumes them. +This was achieved using the T1 MRI and the Slicer 3D software in two steps. First, by defining the precise anatomy of the insula by identifying the morphology of the insular sulci which are visible and second, by parcellating the two subunits of the insula, labeling and measuring their volumes them. 1. To segment in each coronal slice the aINS and pINS ROIs in order to measure the number of voxels in each ROI per coronal slice. @@ -44,10 +44,10 @@ Implement a method to accurately parcellate the anatomical structure Insula of R Procedure: -To optimize accuracy, reliability and overall time needed, we studied several combinations of the tools available in the Segment Editor, interacting with the developers. +To optimize accuracy, reliability and overall time needed, we studied several combinations of the tools available in the Segment Editor, interacting with the developers. After several attempts using Segment editor tools 'threshold painting', 'grow from seeds', 'watershed', 'fill between the slices', 'tracing level' the actual results were obtained as follows: -1) On a T1 MRI image, using the Segment Editor module, we used 3 segments (via "add" icon). The first segment was for the background, the second for the whole Insula and the third for the separation between the two subunits of the insula, i.e., aINS and pINS. +1) On a T1 MRI image, using the Segment Editor module, we used 3 segments (via "add" icon). The first segment was for the background, the second for the whole Insula and the third for the separation between the two subunits of the insula, i.e., aINS and pINS. To establish accurately the borders of aINS and pINS we traced the Circular sulcus and the Central sulcus of the Insula as follows. First, we used three sagittal images where these sulci were visually well-identifiable. Consequently, these sulci were detected on coronal sections as a series of dots, which served as our key anatomical landmarks for the segmentation of aINS and pINS. More specifically, the circular sulcus of the insula determined the outer boarder of the insula in its entirety and the the Central sulcus (cesi) determined the for border between anterior and posterior Insula (coronal view). diff --git a/PW28_2018_GranCanaria/Projects/MatlabToPython/README.md b/PW28_2018_GranCanaria/Projects/MatlabToPython/README.md index 6a14b3406..f6f7b07cf 100644 --- a/PW28_2018_GranCanaria/Projects/MatlabToPython/README.md +++ b/PW28_2018_GranCanaria/Projects/MatlabToPython/README.md @@ -18,7 +18,7 @@ Matlab Bridge is a very quick and convenient way to combine the display features ## Approach and Plan -1. Investigate freeware to convert Matlab code to Python. +1. Investigate freeware to convert Matlab code to Python. 1. Document a complete pipeline for Matlab-proficient non-developers on the process of creating a Python module. ## Progress and Next Steps @@ -27,11 +27,11 @@ Progress - A couple of examples of Matlab Bridge modules with all their dependent functions were collected here: https://www.dropbox.com/sh/36vvhsi90z90arq/AACTInsRPBkQdhaH8jf_qml9a?dl=0 -- It was concluded that conversion needs to be done manually - Matlab to Python converters would not work well for this purpose. +- It was concluded that conversion needs to be done manually - Matlab to Python converters would not work well for this purpose. - The tutorial “Programming in Slicer4” by Sonia Pujol and Steve Pieper (https://www.dropbox.com/s/wrhrvvmplosiis1/Slicer4_ProgrammingTutorial_SPujol-SPieper_Nightly.pdf?dl=0#) was worked through and summarized in the following document for quick reference: https://www.dropbox.com/s/0wukoaesndf3ug4/SlicerPython.pdf?dl=0 -Next steps: +Next steps: - Add a summary of the tutorial “Developing and contributing extensions for 3D Slicer” by Andrey Fedorov, Jean-Christophe Fillion-Robin, and Steve Pieper (https://docs.google.com/presentation/d/1JXIfs0rAM7DwZAho57Jqz14MRn2BIMrjB17Uj_7Yztc/edit#slide=id.g420896289_0216) to the document. - Summarize essential Python for Matlab users and add to the document. diff --git a/PW28_2018_GranCanaria/Projects/MedicalInfraredImagingWithSlicer/README.md b/PW28_2018_GranCanaria/Projects/MedicalInfraredImagingWithSlicer/README.md index 44486a9ee..d041d4152 100644 --- a/PW28_2018_GranCanaria/Projects/MedicalInfraredImagingWithSlicer/README.md +++ b/PW28_2018_GranCanaria/Projects/MedicalInfraredImagingWithSlicer/README.md @@ -36,12 +36,12 @@ It is intended mainly for monitoring of foot ulcers in diabetic patients. The in ## Progress and Next Steps - 1. Progress + 1. Progress - Acquisition module has been finished - Initial version of the segmentation & registration modules have been developed - The new Slicer module for semi-automatic foot ulcer assessment is in progress - 2. Next steps + 2. Next steps - To finish the module for semi-automatic foot ulcer assessment - To improve the camera calibration procedure - To test machine learning approaches for feet segmentation @@ -62,7 +62,7 @@ Temperature Comparisons. ![](https://gph.is/2KwjY1v) -## Acquisition and automatic registration & segmentation modules +## Acquisition and automatic registration & segmentation modules diff --git a/PW28_2018_GranCanaria/Projects/MultiVolumeRendering/README.md b/PW28_2018_GranCanaria/Projects/MultiVolumeRendering/README.md index 381cc9125..3d873dd86 100644 --- a/PW28_2018_GranCanaria/Projects/MultiVolumeRendering/README.md +++ b/PW28_2018_GranCanaria/Projects/MultiVolumeRendering/README.md @@ -14,7 +14,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Objective -* Review the newly exposed GLSL hooks in VTK as a mechanism to add features to Slicer's Volume Rendering +* Review the newly exposed GLSL hooks in VTK as a mechanism to add features to Slicer's Volume Rendering * Possible features to explore * Optimized performance/quality for multiple overlapping volumes * Custom clipping or other rendering features @@ -66,7 +66,7 @@ Back to [Projects List](../../README.md#ProjectsList) | ![](edge-and-shading.png) | ![](decluttered-crop.png) | ![](volume-carving-crop.png) | -| Blood flow animation in PRISM | | | +| Blood flow animation in PRISM | | | | --- | --- | --- | | ![](flow-illustration-crop.png) | | | diff --git a/PW28_2018_GranCanaria/Projects/NeedleSegmentationDeployment/README.md b/PW28_2018_GranCanaria/Projects/NeedleSegmentationDeployment/README.md index caafcf969..3a2a19186 100644 --- a/PW28_2018_GranCanaria/Projects/NeedleSegmentationDeployment/README.md +++ b/PW28_2018_GranCanaria/Projects/NeedleSegmentationDeployment/README.md @@ -9,7 +9,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Maria Francesca Spadea (Magna Graecia University, Catanzaro, Italy) ## Participating remotely -- Guillaume Pernelle +- Guillaume Pernelle - Alireza Mehrtash # Project Description diff --git a/PW28_2018_GranCanaria/Projects/OpenIGTLinkIODevelopment/README.md b/PW28_2018_GranCanaria/Projects/OpenIGTLinkIODevelopment/README.md index 458584dfc..472037e37 100644 --- a/PW28_2018_GranCanaria/Projects/OpenIGTLinkIODevelopment/README.md +++ b/PW28_2018_GranCanaria/Projects/OpenIGTLinkIODevelopment/README.md @@ -33,7 +33,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Approach and Plan * Complete and improve refactoring of command messages. -* Embed information about images in the image message itself (currently sent as string messages to CustusX, see [PlusDeviceSet_Server_BkProFocusOem.xml](https://github.com/PlusToolkit/PlusLibData/blob/d2dcc2d2b8ad84eea14bd6147dcf289da1e4f405/ConfigFiles/PlusDeviceSet_Server_BkProFocusOem.xml) ) +* Embed information about images in the image message itself (currently sent as string messages to CustusX, see [PlusDeviceSet_Server_BkProFocusOem.xml](https://github.com/PlusToolkit/PlusLibData/blob/d2dcc2d2b8ad84eea14bd6147dcf289da1e4f405/ConfigFiles/PlusDeviceSet_Server_BkProFocusOem.xml) ) * Fix limited length device names. Troncate long names and put complete name in meta-data. * Create a command-line example in OpenIGTLinkIO that implements a simple but complete tracked US session: * launch a PlusServer with a config that simulates US and includes tracking data @@ -44,7 +44,7 @@ Back to [Projects List](../../README.md#ProjectsList) * Export acquired images before shutting down * Improve Plus server launcher * Possible extension: Add functionality for combining streams in the OpenIGTLinkIO client: - * Let's say you got one or more servers streaming several streams. Some of these streams may have to be combined in the client. It may be possible to add this functionality to OpenIGTLinkIO, so that users of the library don't have to create their own solutions. These combined tools may be similar to the combinations set up in the PLUS config file, but that may not be necssary, as the client may have other preferences, and OpenIGTLinkIO should not rely on the internal structures in PLUS. Examples: + * Let's say you got one or more servers streaming several streams. Some of these streams may have to be combined in the client. It may be possible to add this functionality to OpenIGTLinkIO, so that users of the library don't have to create their own solutions. These combined tools may be similar to the combinations set up in the PLUS config file, but that may not be necssary, as the client may have other preferences, and OpenIGTLinkIO should not rely on the internal structures in PLUS. Examples: * PLUS streams ultrasound video, and some tracking hardware streams the positions of all the tools. * The setup uses a tool that supplies several positions at the same time. * Different hardware may all stream OpenIGTLink/OpenIGTLinkIO messages directly, and PLUS may not be used at all. @@ -69,7 +69,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Future work * Define a standard for Plus to timestamp every data item send it in the metadata. -* vtkPlusDevice should include a container for metadata to be send. This way, each device type can register data that it sends with every message. Currently, this is done with FrameFields in vtkPlusUSDevice, but it should be generalized to all device types. The list of available properties is now defined in the new OpenIGTLinkIO igtlioUsSectorDefinitions. This should be moved to a more generic file that contains properties for all device types. +* vtkPlusDevice should include a container for metadata to be send. This way, each device type can register data that it sends with every message. Currently, this is done with FrameFields in vtkPlusUSDevice, but it should be generalized to all device types. The list of available properties is now defined in the new OpenIGTLinkIO igtlioUsSectorDefinitions. This should be moved to a more generic file that contains properties for all device types. * Create a handshaking protocol in OpenIGTLink that gives the users of the library information about the capabilities of both the client and the server. Like: * Max OpenIGTLink version * Max OpenIGTLink header version @@ -77,7 +77,7 @@ Back to [Projects List](../../README.md#ProjectsList) * Initial suggestion: Create a deviceNameLong field in the OpenIGTLink meta information field that provides the device name in a format that is not truncated. This is really a hack to fix a bug introduced by Plus. At the moment Plus don't follow the OpenIGTLink standard by not making sure that the deviceName is unique? * A better solution would probably be that Plus makes sure that deviceName is unique. Additional information should be added as meta data instead (transform pipeline etc.). * Implement support in OpenIGTLinkIO for combining streams into tools. The information needed for this can be set as meta data information. This recreation of tools combining multiple streams don't necessarily have to mirror the structures in the PLUS config file, and it have to be possible to combine streams from several sources at the same time (also from sources outside PLUS). - + # Illustrations diff --git a/PW28_2018_GranCanaria/Projects/PreclinicalDataImport/README.md b/PW28_2018_GranCanaria/Projects/PreclinicalDataImport/README.md index f0db26b46..3e90909f3 100644 --- a/PW28_2018_GranCanaria/Projects/PreclinicalDataImport/README.md +++ b/PW28_2018_GranCanaria/Projects/PreclinicalDataImport/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Improve/Test multivolume preclinical MRI data import (DCE, DTI). +# Improve/Test multivolume preclinical MRI data import (DCE, DTI). ## Key Investigators @@ -21,7 +21,7 @@ Multivolume DICOM data from Bruker preclinical MRI scanners (Paravision version ## Approach and Plan 1. Collect examples of preclinical data. -1. The first correction for DCE is to make sure the frame time in DCE MRI is not merely copied from the 'RepetitionTime' field in the DICOM files. Instead, this should be multiplied by the number of phase encoding steps. +1. The first correction for DCE is to make sure the frame time in DCE MRI is not merely copied from the 'RepetitionTime' field in the DICOM files. Instead, this should be multiplied by the number of phase encoding steps. 1. As a prototype, implement a Matlab Bridge module "LoadBruker" that correctly loads all the various Bruker data sets. 1. Implement the fixes in Python, either as one module for Bruker MRI data, or within the existing data loading modules, multivolumeimporter and DWIconvert. diff --git a/PW28_2018_GranCanaria/Projects/ProstateZoneSegmentation/README.md b/PW28_2018_GranCanaria/Projects/ProstateZoneSegmentation/README.md index 1392316fb..83872be36 100644 --- a/PW28_2018_GranCanaria/Projects/ProstateZoneSegmentation/README.md +++ b/PW28_2018_GranCanaria/Projects/ProstateZoneSegmentation/README.md @@ -15,14 +15,14 @@ The goal of this project is to evaluate and extend variants of a volumetric CNN ## Objective -1. Results have to get better and become more stable to be reliable. +1. Results have to get better and become more stable to be reliable. ## Approach and Plan -1. Extend data augmentation (translation and scaling). +1. Extend data augmentation (translation and scaling). 1. Explore network extensions (deeper network, more filters). 1. Add more regulatrizers to prevent overfitting and evaluate their effect on the outcome. -1. Initialize a new approach: Generative Adverserial Network? +1. Initialize a new approach: Generative Adverserial Network? ## Progress and Next Steps 1. split up data into training and validation sets so that they have more equally distributed anatomy characteristics -> did not improve output much diff --git a/PW28_2018_GranCanaria/Projects/RegistrationUncertainty/README.md b/PW28_2018_GranCanaria/Projects/RegistrationUncertainty/README.md index 75a754176..9b5c7fcb9 100644 --- a/PW28_2018_GranCanaria/Projects/RegistrationUncertainty/README.md +++ b/PW28_2018_GranCanaria/Projects/RegistrationUncertainty/README.md @@ -10,7 +10,7 @@ Back to [Projects List](../../README.md#ProjectsList) The overall goal is to use Gaussian process regression to estimate both deformations, as well as posterior distributions on deformations, from pairs of corresponding image -features. +features. @@ -24,7 +24,7 @@ This week we will implement a radial basis function spline using convolution and -1. +1. ## Progress and Next Steps diff --git a/PW28_2018_GranCanaria/Projects/STIM_ICM_Project/README.md b/PW28_2018_GranCanaria/Projects/STIM_ICM_Project/README.md index e8472063d..63117a20b 100644 --- a/PW28_2018_GranCanaria/Projects/STIM_ICM_Project/README.md +++ b/PW28_2018_GranCanaria/Projects/STIM_ICM_Project/README.md @@ -14,9 +14,9 @@ To improve and update the Slicer plug-ins we have developed for pyDBS and EpiLOC ## Objective -1. PyDBS PostOperative Report Plug-in. pyDBS is use mostly to localize the electrodes implanted in some regions of the Basal Ganglia, to tune the stimulation with regard to the surrounded anatomy. We process a big amount of data coming from multicentric research projects and for clinicians. We provide to our users a quantitative and qualitative report but also a visual report for each subject processed, with some almost-standar views. +1. PyDBS PostOperative Report Plug-in. pyDBS is use mostly to localize the electrodes implanted in some regions of the Basal Ganglia, to tune the stimulation with regard to the surrounded anatomy. We process a big amount of data coming from multicentric research projects and for clinicians. We provide to our users a quantitative and qualitative report but also a visual report for each subject processed, with some almost-standar views. 2. EpiPlan slicer Plug-in. We have developed a prototype to help the neurologists and anatomists of our center to plan the surgical procedure to perform intracranial EEG exploration of certain epileptic patients. We want to enhance the plug-in by adding new features and making the graphical interface and user interface interactions more robust -3. MrTrix Tracking on PyDBS. +3. MrTrix Tracking on PyDBS. ## Approach and Plan 1. Get feedback from slicer team diff --git a/PW28_2018_GranCanaria/Projects/SegmentEditor/README.md b/PW28_2018_GranCanaria/Projects/SegmentEditor/README.md index 4ac5e1e11..89826ac2a 100644 --- a/PW28_2018_GranCanaria/Projects/SegmentEditor/README.md +++ b/PW28_2018_GranCanaria/Projects/SegmentEditor/README.md @@ -38,7 +38,7 @@ Back to [Projects List](../../README.md#ProjectsList) * Consider speeding up segmentation by using a stylus instead of a mouse, and explore faster ways of slice navigation (3D mouse, webcam tracking, etc?) * Brachytherapy needle segmentation in MRI for training deep learning models * Possible solution: Add "tube" mode from Markups to Models extension to Segment Editor, as an option to surface cut or a new effect in a new extension - * Deep learning training support: need cohort segmentation slicelet that allows configuring segment editor before segmenting first patient for simpler, less distracting user interface for easier usage and smoother and more reliable segmentation workflow + * Deep learning training support: need cohort segmentation slicelet that allows configuring segment editor before segmenting first patient for simpler, less distracting user interface for easier usage and smoother and more reliable segmentation workflow * Minor help to multiple projects involving segmentation # Illustrations diff --git a/PW28_2018_GranCanaria/Projects/SegmentationGeometryWidget/README.md b/PW28_2018_GranCanaria/Projects/SegmentationGeometryWidget/README.md index c77187b71..157d82ead 100644 --- a/PW28_2018_GranCanaria/Projects/SegmentationGeometryWidget/README.md +++ b/PW28_2018_GranCanaria/Projects/SegmentationGeometryWidget/README.md @@ -12,7 +12,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Objective * Control over the geometry of the labelmap edited within the segmentation in Segment Editor is important, but so far the only possibility to specify it was by selecting the master volume before adding the first segment. This caused a lot of confusion. -* If we had a widget to specify labelmap geometry from existing nodes and some advanced settings, then it would facilitate using finer segmentations without the Crop Volumes workaround, editing on oblique planes, and other cases of using special geometries. +* If we had a widget to specify labelmap geometry from existing nodes and some advanced settings, then it would facilitate using finer segmentations without the Crop Volumes workaround, editing on oblique planes, and other cases of using special geometries. ## Approach and Plan @@ -25,7 +25,7 @@ Back to [Projects List](../../README.md#ProjectsList) * Model node * ROI node * Add button to Segment Editor module in the row of the master volume. When pressed, a dialog containing the widget is opened, and when OK'd, then the labelmaps are resampled -* Replace reference image geometry button in segmentation conversion parameters dialog. It only saves the referfene image geometry conversion parameter, and doesn't do actual resampling. This means that it's only applied when explicitly converting to labelmap from other representation - as expected from how the advanced conversion parameters are set. +* Replace reference image geometry button in segmentation conversion parameters dialog. It only saves the referfene image geometry conversion parameter, and doesn't do actual resampling. This means that it's only applied when explicitly converting to labelmap from other representation - as expected from how the advanced conversion parameters are set. ## Progress and Next Steps diff --git a/PW28_2018_GranCanaria/Projects/SlicerCIPQuantitativeReportsTool/README.md b/PW28_2018_GranCanaria/Projects/SlicerCIPQuantitativeReportsTool/README.md index 0f9374c1d..89064bade 100644 --- a/PW28_2018_GranCanaria/Projects/SlicerCIPQuantitativeReportsTool/README.md +++ b/PW28_2018_GranCanaria/Projects/SlicerCIPQuantitativeReportsTool/README.md @@ -23,7 +23,7 @@ Slicer CIP has been conceived as a workstation for radiologists, but is also sui ## Objective -1. Extend and generalize last year report tool to be used in all SlicerCIP modules. +1. Extend and generalize last year report tool to be used in all SlicerCIP modules. ## Approach and Plan diff --git a/PW28_2018_GranCanaria/Projects/TBI/README.md b/PW28_2018_GranCanaria/Projects/TBI/README.md index 8741017ec..ff8c24f01 100644 --- a/PW28_2018_GranCanaria/Projects/TBI/README.md +++ b/PW28_2018_GranCanaria/Projects/TBI/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -### Longitudinal analysis of white matter connectivity changes prompted by concussions +### Longitudinal analysis of white matter connectivity changes prompted by concussions ## Key Investigators @@ -19,7 +19,7 @@ Concussions (also known as mild traumatic brain injuries, or mTBIs) have relativ ## Approach and Plan -The two PIs held preliminary discussions to identify Slicer modules and other software which can be used to automatically trace white matter fasciculi in dMRI volumes acquired from patients with concussions. It was agreed that the methodologies developed by the O'Donnell research group could have substantial potential for the longitudinal analysis being undertaken by the Irimia Laboratory at USC. One direction of particular interest involves the application of streamline analysis and atlas-based fiber labeling to identify perilesional fibers whose integrity could be affected by the breakdown of the blood-brain barrier (BBB). +The two PIs held preliminary discussions to identify Slicer modules and other software which can be used to automatically trace white matter fasciculi in dMRI volumes acquired from patients with concussions. It was agreed that the methodologies developed by the O'Donnell research group could have substantial potential for the longitudinal analysis being undertaken by the Irimia Laboratory at USC. One direction of particular interest involves the application of streamline analysis and atlas-based fiber labeling to identify perilesional fibers whose integrity could be affected by the breakdown of the blood-brain barrier (BBB). ## Progress and Next Steps @@ -35,7 +35,7 @@ The PIs agreed to perform a feasibility assessment in order to determine the bes # Background and References -More information on research being conducted in the Irimia Laboratory at USC can be found at [www.andrei-irimia.com]. +More information on research being conducted in the Irimia Laboratory at USC can be found at [www.andrei-irimia.com]. More information about the white matter atlas and clustering from Dr. O'Donnell's group can be found at [http://dmri.slicer.org/atlases/] diff --git a/PW28_2018_GranCanaria/README.md b/PW28_2018_GranCanaria/README.md index dd8fb01a4..334db4a57 100644 --- a/PW28_2018_GranCanaria/README.md +++ b/PW28_2018_GranCanaria/README.md @@ -18,7 +18,7 @@ To receive information about this and future events please join the [Project Wee - **Dates:** June 25-29, 2018. - **Location:** - - Hosted by: [Universidad de Las Palmas de Gran Canaria](https://www.google.com/maps/place/University+of+Las+Palmas+de+Gran+Canaria/@28.0990225,-16.5409312,8z/data=!4m5!3m4!1s0xc409514173e77eb:0xbda0edfa5e221aaa!8m2!3d28.0990178!4d-15.4203257) + - Hosted by: [Universidad de Las Palmas de Gran Canaria](https://www.google.com/maps/place/University+of+Las+Palmas+de+Gran+Canaria/@28.0990225,-16.5409312,8z/data=!4m5!3m4!1s0xc409514173e77eb:0xbda0edfa5e221aaa!8m2!3d28.0990178!4d-15.4203257) - [3D View/Map of venue location](https://bit.ly/2Hkm6Mi) - **REGISTRATION:** [Register here](https://www.fulp.es/na-mic-summer-event-2018). Registration Fee: €300. - **Hotel:** [Marriott Iberia](http://achotels.marriott.com/hotels/ac-hotel-iberia-las-palmas), [Parque](http://hotelparqueenlaspalmas.com/en/). Deadline to book at preferential rates is March 30, 2018. @@ -28,11 +28,11 @@ To receive information about this and future events please join the [Project Wee - Taxi (line at the airport) - [Bus -line 60-](https://www.guaguasglobal.com/en/lineas-horarios/linea/) - [Airport-San Telmo bus station](https://www.google.es/maps/dir/Aeropuerto+de+Gran+Canaria,+Autopista+GC-1,+s%2Fn,+35230,+Las+Palmas/Estacion+De+Guaguas+SAN+TELMO,+Av.+Rafael+Cabrera,+30,+35002+Las+Palmas+de+Gran+Canaria,+Las+Palmas/@28.0183155,-15.5470931,11z/data=!4m17!4m16!1m5!1m1!1s0xc40a266c3662d1d:0x824bcf7e159f85d4!2m2!1d-15.3877066!2d27.9331848!1m5!1m1!1s0xc4095850670520b:0x5eef5b2e4c79e9e3!2m2!1d-15.415777!2d28.1092527!2m2!7e2!8j1529431200!3e3) - - [Paths-distances: San Telmo bus station - Hotels](https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW28_2018_GranCanaria/Station-Hotels.png) - + - [Paths-distances: San Telmo bus station - Hotels](https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW28_2018_GranCanaria/Station-Hotels.png) + ## Local Organizing Committee - + - Host: Juan Ruiz-Alzola, PhD, Professor of Imaging Technologies at [Universidad de Las Palmas de Gran Canaria](http://www.ulpgc.es), Director of Medical and Audiovisual Technology Group (GTMA for Grupo de Tecnología Médica y Audiovisual), Research Institute in Biomedical and Health Sciences (IUIBS for Instituto Universitario de Investigación Biomédica y Sanitaria), Research Affiliate at the Canary Islands Instiute of Astrophysics. - Email Local Organzing Committee: [Juan Ruiz Alzola](mailto:juan.ruiz@ulpgc.es?cc=tkapur@bwh.harvard.edu&subject=ProjectWeek28), [Maria Dolores Afonso Suarez](mailto:marilola.afonso@ulpgc.es?cc=tkapur@bwh.harvard.edu&subject=ProjectWeek28), [Asmaa Skareb](mailto:asmaa.skareb@ulpgc.es?cc=tkapur@bwh.harvard.edu&subject=ProjectWeek28) @@ -40,7 +40,7 @@ To receive information about this and future events please join the [Project Wee - [Information about the Venue](https://medtec4susdev.github.io/ProjectDemoProgressing/) -## **VENUE LOCATION** +## **VENUE LOCATION** ![#f03c15](https://placehold.it/15/f03c15/000000?text=+) [3D View/Map of venue location 25th June](https://www.google.com/maps/place/AC+Hotel+Iberia+Las+Palmas/@28.1112024,-15.4139154,240a,35y,279.49h,55.22t/data=!3m1!1e3!4m7!3m6!1s0x0:0xd874c662b7a1c59a!5m1!1s2018-06-30!8m2!3d28.1120498!4d-15.4172581) @@ -48,7 +48,7 @@ To receive information about this and future events please join the [Project Wee ## INFOPACK -- [Infopack](https://github.com/NA-MIC/ProjectWeek/blob/master/PW28_2018_GranCanaria/Infopack%20-%20NAMIC%20SUMMER%20EVENT%202018.pdf) +- [Infopack](https://github.com/NA-MIC/ProjectWeek/blob/master/PW28_2018_GranCanaria/Infopack%20-%20NAMIC%20SUMMER%20EVENT%202018.pdf) ## Frequently Asked Questions @@ -63,7 +63,7 @@ To receive information about this and future events please join the [Project Wee ## Preparatory Videoconferences -- Zoom video conference: To join the videoconference, click [here](https://zoom.us/j/920891732) on Tuesdays, 10am Boston time, starting April 24, 2018 +- Zoom video conference: To join the videoconference, click [here](https://zoom.us/j/920891732) on Tuesdays, 10am Boston time, starting April 24, 2018 - Conference call notes: To access these, click [here](PreparatoryMeetingsNotes.md). ## Program @@ -86,7 +86,7 @@ Visualization and Interoperability 1. [OpenIGTLinkIO Development](Projects/OpenIGTLinkIODevelopment/README.md) (Simon Drouin, Csaba Pinter, Andras Lasso, Ole Vegard Solberg, Geir Arne Tangen) 1. [Add 3D views linking capabilities](Projects/3DViewsLinking/README.md) (Davide Punzo, Andras Lasso, Steve Pieper, Jean-Christophe Fillion-Robin, Simon Drouin) 1. [DICOMweb related projects: OHIFViewer, Siemens teamplay, ctkDICOMweb, using secure DICOMweb](Projects/DICOMweb/README.md) (Michael Kelm, Steve Pieper, Erik Ziegler, Marco Nolden, Jonas Scherer, Tina Kapur) -1. [Raw Image Read and Display](Projects/RawImageGuess/README.md) (Attila Nagy, Csaba Pinter) +1. [Raw Image Read and Display](Projects/RawImageGuess/README.md) (Attila Nagy, Csaba Pinter) 1. [Improve/Test multivolume preclinical MRI data import (DCE, DTI, ASL, T1 mapping](Projects/PreclinicalDataImport/README.md) (Sharon Peled, Andras Lasso, Lauren O'Donnell) 1. [Conversion of Matlab Bridge modules to integrated 3DSlicer modules](Projects/MatlabToPython/README.md) (Sharon Peled, Andras Lasso) @@ -102,7 +102,7 @@ New Applications and Customizations of 3D Slicer 1. [Evaluation of projects: simulator of bone sliding, augmented reality system to be used in surgeries, software for orthognatic planning](Projects/EvaluationOfProjects/README.md) (Donato Monopoli, Javier González, Juan Ruiz-Alzola) 1. [Slicer Ecosystems Education for Newcomers & Developing Countries](Projects/3DSlicerTrainingPrograms/README.md) (GTMA Group, Juan Ruiz-Alzola) 1. [Atlas development for education](Projects/AtlasDevelopmentForEducation/README.md) (Babacar Diao, Ahmedou Moulaye Idriss, Mohamed Septy, Alexandra Fernandes Rodrigues, Cheick Tidiane, Nayra Pumar, Xerach Suárez, Juan Ruiz-Alzola) -1. [Custom GUI for an US simulator training system](Projects/CustomGUIForUSSimulator/Readme.md) (José Carlos Ruiz-Luque, Guillermo Valentín Socorro-Marrero, Juan Ruiz-Alzola) +1. [Custom GUI for an US simulator training system](Projects/CustomGUIForUSSimulator/README.md) (José Carlos Ruiz-Luque, Guillermo Valentín Socorro-Marrero, Juan Ruiz-Alzola) 1. [Medical Infrared Imaging with Slicer](Projects/MedicalInfraredImagingWithSlicer/README.md) (Yolanda Martin-Hernando, Abián Hernández, Jorge Quintero Nehrkorn, Enrique Villa, José-Carlos Ruiz-Luque, Natalia Arteaga-Marrero, Juan Ruiz-Alzola) 1. [3DSlicer models for serious games](Projects/3DSlicerModelsForSeriousGames/README.md) (Sergi Bermudez i Badia, Yuri Almedia, Artemisa Moreno, Abián Hernández, María Dolores Afonso-Suárez, Juan Ruiz-Alzola) 1. [Implementing 1H Time resolved Functional Magnetic Resonance Spectroscopy with quantification of broad metabolite spectrum in 3D Slicer](Projects/Implementing1HTimeResolvedFunctionalMagneticResonanceSpectroscopy/README.md) (Francisco-José Marcano-Serrano, José Luis González Mora, Juan Ruiz-Alzola) @@ -129,7 +129,7 @@ Do not add your name to this list - it is maintained by the organizers based on 1. Sharon Peled, (@speled)(speled@bwh.harvard.edu) - Brigham and Women's Hospital and Harvard Medical School - USA 1. Davide Punzo, (@punzo)(punzodavide@hotmail.it) - Kapteyn Astronomical Institute, University of Groningen - Netherlands -1. Csaba Pinter, (@cpinter)(csaba.pinter@queensu.ca) - Queen’s University - Canada +1. Csaba Pinter, (@cpinter)(csaba.pinter@queensu.ca) - Queen’s University - Canada 1. Filip Szczepankiewicz, (filip.szczepankiewicz@gmail.com) - Brigham and Women's Hospital and Harvard Medical School - USA 1. Tina Kapur, (@tkapur)(tkapur@bwh.harvard.edu) - Brigham and Women's Hospital and Harvard Medical School - USA 1. Mohamed El Moctar Septy (@msepty)(moksepty@yahoo.fr) - Faculté de Médecine, Université de Nouakchott Al Aasriya - Mauritania @@ -138,7 +138,7 @@ Do not add your name to this list - it is maintained by the organizers based on 1. Joseane Alexandre Da Rosa de Pina Ferreira (joseane.ferreira@han.gov.cv) - Hospital Agostinho Neto de Praia - Cabo Verde 1. Artemisa Mendes Moreno (artemisa.moreno@docente.unicv.edu.cv) - Faculdade de Ciências e Tecnologia, Universidade de Cabo Verde - Cabo Verde 1. Cheikh Tidiane Diop (chtdiop81@gmail.com) - Centre Hospitalier National Fann Dakar - Senegal -1. Oumar Kane (droumarkane@gmail.com) - Centre Hospitalier National Fann Dakar - Senegal +1. Oumar Kane (droumarkane@gmail.com) - Centre Hospitalier National Fann Dakar - Senegal 1. Babacar Diao (babacardiao104uro@yahoo.fr) - Ecole Militaire de Santé Dakar - Senegal 1. Juan Ruiz Alzola (@jruizalz)(juan.ruiz@ulpgc.es) - University of Las Palmas de Gran Canaria / Instituto de Astrofísica de Canarias - Spain 1. Jose Carlos Ruiz Luque (carlos.luque@ulpgc.es) - University of Las Palmas de Gran Canaria - Spain @@ -167,15 +167,15 @@ Do not add your name to this list - it is maintained by the organizers based on 1. Yolanda Martín Hernando (yolanda.martin@iac.es) - Instituto de Astrofísica de Canarias - Spain 1. Enrique Villa Benito (evilla@iac.es) - Instituto de Astrofísica de Canarias - Spain 1. Natalia Arteaga Marrero (narteaga@iac.es) - Instituto de Astrofísica de Canarias - Spain -1. Michael Halle (@mhalle)(mhalle@bwh.harvard.edu) - Brigham and Women's Hospital and Harvard Medical School - USA +1. Michael Halle (@mhalle)(mhalle@bwh.harvard.edu) - Brigham and Women's Hospital and Harvard Medical School - USA 1. Yuri Ameida (yuri.almeida@m-iti.org) Madeira Madeira Interactive Technologies Institute - Portugal 1. Marco Nolden (M.Nolden@Dkfz-Heidelberg.de) - German Cancer Research Center (DKFZ) - Germany -1. Carl-Fredrik Westin (westin@bwh.harvard.edu) - Brigham and Women's Hospital and Harvard Medical School - USA +1. Carl-Fredrik Westin (westin@bwh.harvard.edu) - Brigham and Women's Hospital and Harvard Medical School - USA 1. Marko Rak (rak@isg.cs.ovgu.de) - University of Magdeburg - Germany 1. Andrei Irimia (irimia@usc.edu) - University of Southern California - USA 1. Anneke Meyer (anneke@isg.cs.uni-magdeburg.de) - University of Magdeburg - Germany 1. Tina Vajsbaher (tina.vajsbaher@gmail.com) - University of Bremen - Germany -1. Gino Gulamhussene (gino@isg.cs.ovgu.de) - Institute for Simulation and Graphics OvGU University Magdeburg - Germany +1. Gino Gulamhussene (gino@isg.cs.ovgu.de) - Institute for Simulation and Graphics OvGU University Magdeburg - Germany 1. Sara Fernández Vidal (sara.fdezvidal@gmail.com) - ICM Institute - France 1. Eric Bardinet (eric.bardinet@upmc.fr) - ICM Institute - France 1. José Luis González-Mora (jlgonzal@ull.edu.es) - University of La Laguna - Spain @@ -183,7 +183,7 @@ Do not add your name to this list - it is maintained by the organizers based on 1. Donato Monopoli (dmonopoli@itccanarias.org) - Intituto Tecnológico de Canarias - Spain 1. Javier González Fernández (jgonzalez@itccanarias.org) - Intituto Tecnológico de Canarias - Spain 1. Michael Kelm (michael.kelm@siemens-healthineers.com) - Siemens Healthcare GmbH - Germany -1. William Wells (sw@bwh.harvard.edu) - Brigham and Women's Hospital and Harvard Medical School - USA +1. William Wells (sw@bwh.harvard.edu) - Brigham and Women's Hospital and Harvard Medical School - USA 1. Jonas Scherer (jonas.scherer@dkfz.de) - German Cancer Research Center (DKFZ) - Germany ## Statistics diff --git a/PW29_2018_London_Canada/Projects/BrainVentricleSegment/README.md b/PW29_2018_London_Canada/Projects/BrainVentricleSegment/README.md index 7cbd188ec..9b00c2ea3 100644 --- a/PW29_2018_London_Canada/Projects/BrainVentricleSegment/README.md +++ b/PW29_2018_London_Canada/Projects/BrainVentricleSegment/README.md @@ -8,7 +8,7 @@ Back to [Projects List](../../README.md#ProjectsList) * To divide the brain ventricles into a number of sub-reions and segment them by drawing a number of planes based on the known anatomical structures in the brain ## Objective -1. Segmentation of the brain lateral ventricles. +1. Segmentation of the brain lateral ventricles. 2. Segemenation of the sub-regions within laterla ventricles. ## Approach and Plan diff --git a/PW29_2018_London_Canada/Projects/Brain_Tumour_Segment/README.md b/PW29_2018_London_Canada/Projects/Brain_Tumour_Segment/README.md index 796d27613..f53476be5 100644 --- a/PW29_2018_London_Canada/Projects/Brain_Tumour_Segment/README.md +++ b/PW29_2018_London_Canada/Projects/Brain_Tumour_Segment/README.md @@ -1,7 +1,7 @@ -## Brain Tumour Segmentation +## Brain Tumour Segmentation # Key Investigators -- Daiana Pur (Biomedical Engineering, Western University) +- Daiana Pur (Biomedical Engineering, Western University) # Project Description 1. Learn and improve existing code for extension @@ -9,24 +9,24 @@ 2. Adding a Brain Tumour Segmentation feature to an existing Neurosurgical Planning tool ## Objective -1. Visualize Tumour +1. Visualize Tumour 2. Learn the features of SegmentEditor -3. Learn features of module_segment_statistics +3. Learn features of module_segment_statistics 4. Introduce SegmentEditor features into existing scripted extension 5. Troubleshoot - + ## Approach and Plan 1. Work through different versions of NeuroPath extension (Adam Rankin) 2. Visualize different types of Tumours (high grade glioma vs low grade glioma etc) by obtaining sample data -3. Different types of tumours require different approaches depending on size, visiblility of contrast +3. Different types of tumours require different approaches depending on size, visiblility of contrast ## Progress and Next Steps -1. Troubleshooting existing extension +1. Troubleshooting existing extension -2. Obtained Sample Data, tried different settings for creating mask over tumors +2. Obtained Sample Data, tried different settings for creating mask over tumors 3. module_segment_statistics module computes volume, surface, mean intensity, and various other metrics for each segment. @@ -41,6 +41,3 @@ https://sites.duke.edu/pcqiba/2018/05/13/new-protocol-for-tumor-segmentation-usi http://www2.imm.dtu.dk/projects/BRATS2012/Jakab_TumorSegmentation_Manual.pdf https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3991434/ http://slicer.readthedocs.io/en/latest/user_guide/module_segmenteditor.html - - - diff --git a/PW29_2018_London_Canada/Projects/CenterlinesVMTK/README.md b/PW29_2018_London_Canada/Projects/CenterlinesVMTK/README.md index baf435c82..320ae0ad4 100644 --- a/PW29_2018_London_Canada/Projects/CenterlinesVMTK/README.md +++ b/PW29_2018_London_Canada/Projects/CenterlinesVMTK/README.md @@ -7,7 +7,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Michael Schumaker (Sunnybrook Research Institute) - Eman Arnout (LHSC) - Olga Trichtchenko (Western) -- Jean-Christophe Fillion-Robin (Kitware) +- Jean-Christophe Fillion-Robin (Kitware) # Project Description @@ -33,9 +33,9 @@ Our interest is in using and enhancing Slicer's Vascular Modelling Toolkit (VMTK 2. Made a testing module to try different centerline calculation options, though there's still a lot of things to try. 3. Found that VMTK centreline extraction does not automatically force starting and ending at the defined source and endpoints. See tutorial at [http://www.vmtk.org/tutorials/Centerlines.html](http://www.vmtk.org/tutorials/Centerlines.html) 4. Found Slicer VMTK code from 2007-2010 project weeks, started looking at differences with current module. -5. Can use [TubeTK](https://github.com/KitwareMedical/ITKTubeTK) instead for extracting center lines [http://www.tubetk.org/](http://www.tubetk.org/). -6. One option for computing fluid flow, is to export an STL file in OpenFOAM. This can be done simply by using segmentation and creating the appropriate surface. -7. Second option for fluid flow is to use the meshing algorithm in VMTK [http://www.vmtk.org/tutorials/MeshGeneration.html](http://www.vmtk.org/tutorials/MeshGeneration.html) which has more specific features for blood vessels. This has not yet been implemented in Slicer. For obtaining a finer mesh, this algorithm relies on accurate computation of centerlines that include endpoints. +5. Can use [TubeTK](https://github.com/KitwareMedical/ITKTubeTK) instead for extracting center lines [http://www.tubetk.org/](http://www.tubetk.org/). +6. One option for computing fluid flow, is to export an STL file in OpenFOAM. This can be done simply by using segmentation and creating the appropriate surface. +7. Second option for fluid flow is to use the meshing algorithm in VMTK [http://www.vmtk.org/tutorials/MeshGeneration.html](http://www.vmtk.org/tutorials/MeshGeneration.html) which has more specific features for blood vessels. This has not yet been implemented in Slicer. For obtaining a finer mesh, this algorithm relies on accurate computation of centerlines that include endpoints. # Illustrations @@ -54,4 +54,3 @@ Our interest is in using and enhancing Slicer's Vascular Modelling Toolkit (VMTK - Source code: [https://github.com/SunnybrookAngio/ProjectWeek2018.git](https://github.com/SunnybrookAngio/ProjectWeek2018.git) - diff --git a/PW29_2018_London_Canada/Projects/CurveTool/README.md b/PW29_2018_London_Canada/Projects/CurveTool/README.md index bcfe387a7..972aa81eb 100644 --- a/PW29_2018_London_Canada/Projects/CurveTool/README.md +++ b/PW29_2018_London_Canada/Projects/CurveTool/README.md @@ -36,7 +36,7 @@ Common markup requirements: 1. Collect requirements 1. Prepare infrastructure in Markups module for a new markup type. -1. Implement markup curve +1. Implement markup curve ## Progress and Next Steps @@ -53,4 +53,4 @@ Common markup requirements: - Source code: https://github.com/lassoan/Slicer (branch TBD) -- Markups to model extension: https://github.com/SlicerIGT/SlicerMarkupsToModel \ No newline at end of file +- Markups to model extension: https://github.com/SlicerIGT/SlicerMarkupsToModel diff --git a/PW29_2018_London_Canada/Projects/DeformableTransformTest/README.md b/PW29_2018_London_Canada/Projects/DeformableTransformTest/README.md index 380651a5f..acc4c4323 100644 --- a/PW29_2018_London_Canada/Projects/DeformableTransformTest/README.md +++ b/PW29_2018_London_Canada/Projects/DeformableTransformTest/README.md @@ -45,4 +45,3 @@ We have noticed in one application that registration using deformable transforms - Source code: https://github.com/ungi/DeformablePerformanceTest - [Test data (skull model)](https://1drv.ms/u/s!AhiABcbe1DBygplqTSr_rYWPhdOQeQ) - diff --git a/PW29_2018_London_Canada/Projects/FetalBrainSegmentationAndVolumization/README.md b/PW29_2018_London_Canada/Projects/FetalBrainSegmentationAndVolumization/README.md index 67769988d..3a785430c 100644 --- a/PW29_2018_London_Canada/Projects/FetalBrainSegmentationAndVolumization/README.md +++ b/PW29_2018_London_Canada/Projects/FetalBrainSegmentationAndVolumization/README.md @@ -1,15 +1,15 @@ Back to [Projects List](../../README.md#ProjectsList) -## Fetal Brain Segmentation and Volumization +## Fetal Brain Segmentation and Volumization ## Key Investigators - Estee Goldberg (Biomedical Engineering, Western University) -- Denis Kikinov (Software Engineering, Western University) -- Wenyao Xia (Medical Biophysics, Robarts Research Institute) +- Denis Kikinov (Software Engineering, Western University) +- Wenyao Xia (Medical Biophysics, Robarts Research Institute) # Project Description - + This will be a tool to segment the fetal brain from the a fetal MRI. Afterwards the fetal brain will be compiled into a brain volume for later comparisons. @@ -17,7 +17,7 @@ This will be a tool to segment the fetal brain from the a fetal MRI. Afterwards 1. Take any fetal MRI image 1. Semiautomatically segment the fetal brain -1. Semiautomatically produce a brain volume for the segmented brain +1. Semiautomatically produce a brain volume for the segmented brain ## Approach and Plan @@ -31,12 +31,12 @@ This will be a tool to segment the fetal brain from the a fetal MRI. Afterwards ### Progress -After some search we found no automatic modules or extensions that work for fetal MRIs. As such we have experimented with ways to manually segment and get a volume. We had some success with segment editor's semiautomatic segmentation, just a lot of corrections were required. +After some search we found no automatic modules or extensions that work for fetal MRIs. As such we have experimented with ways to manually segment and get a volume. We had some success with segment editor's semiautomatic segmentation, just a lot of corrections were required. ### Next Steps -Implement program as a module in 3D Slicer. +Implement program as a module in 3D Slicer. # Illustrations diff --git a/PW29_2018_London_Canada/Projects/InteractiveSegmentation/README.md b/PW29_2018_London_Canada/Projects/InteractiveSegmentation/README.md index 695842871..923880a69 100644 --- a/PW29_2018_London_Canada/Projects/InteractiveSegmentation/README.md +++ b/PW29_2018_London_Canada/Projects/InteractiveSegmentation/README.md @@ -3,11 +3,11 @@ Back to [Projects List](../../README.md#ProjectsList) ## Interactive Segmentation Using the SegmentEditor ## Key Investigators -- Houssem Gueziri (MNI) +- Houssem Gueziri (MNI) # Project Description - -Fast Delineation by Random Walker (FastDRaW) is a graph-based interactive segmentation approach implemented in Python. + +Fast Delineation by Random Walker (FastDRaW) is a graph-based interactive segmentation approach implemented in Python. This project aims at implementing a plugin in the SegmentEditor to perform FastDRaW in 3D Slicer. ## Objective @@ -48,4 +48,4 @@ This project aims at implementing a plugin in the SegmentEditor to perform FastD - Source code: https://github.com/hgueziri/FastDRaW-Segmentation - Documentation: [FastDRaW paper](http://www.hifiv.ca/wp-houssem/wp-content/uploads/2016/09/FastDRaW_camera-ready.pdf) -- Test data: +- Test data: diff --git a/PW29_2018_London_Canada/Projects/Learn_Slicer_to_make_QC_reports/README.md b/PW29_2018_London_Canada/Projects/Learn_Slicer_to_make_QC_reports/README.md index 797308ad8..3692f0a66 100644 --- a/PW29_2018_London_Canada/Projects/Learn_Slicer_to_make_QC_reports/README.md +++ b/PW29_2018_London_Canada/Projects/Learn_Slicer_to_make_QC_reports/README.md @@ -3,14 +3,14 @@ Back to [Projects List](../../README.md#ProjectsList) Learn 3d Slicer to make QC reports ## Key Investigators -- Investigator 1 Dimuthu Hemachandra +- Investigator 1 Dimuthu Hemachandra # Project Description My goal of the workshop to learn 3D slicer and use it to make a Quality Control (QC) reports for image registration used in BidsApps. ## Objective -1. Objective A. To learn 3D Slicer. -1. Objective B. Learn how to make a QC report using Slicer. +1. Objective A. To learn 3D Slicer. +1. Objective B. Learn how to make a QC report using Slicer. ## Approach and Plan @@ -29,6 +29,6 @@ My goal of the workshop to learn 3D slicer and use it to make a Quality Control -- Source code: -- Documentation: -- Test data: +- Source code: +- Documentation: +- Test data: diff --git a/PW29_2018_London_Canada/Projects/Learning3DSlicer/README.md b/PW29_2018_London_Canada/Projects/Learning3DSlicer/README.md index e30c020ac..2ad740390 100644 --- a/PW29_2018_London_Canada/Projects/Learning3DSlicer/README.md +++ b/PW29_2018_London_Canada/Projects/Learning3DSlicer/README.md @@ -20,7 +20,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Approach and Plan 1. Attend the segmentation and IGT sessions. -1. Try making a module for data acquisition through TEE +1. Try making a module for data acquisition through TEE 1. Implement my Jupyter notebook code directly into Slicer @@ -31,5 +31,3 @@ Back to [Projects List](../../README.md#ProjectsList) # Illustrations # Background and References - - diff --git a/PW29_2018_London_Canada/Projects/Scanner Remote Control/README.md b/PW29_2018_London_Canada/Projects/Scanner Remote Control/README.md index 77246363a..ffa627a5b 100644 --- a/PW29_2018_London_Canada/Projects/Scanner Remote Control/README.md +++ b/PW29_2018_London_Canada/Projects/Scanner Remote Control/README.md @@ -2,7 +2,7 @@ # Key Investigators - Ahmed Mahran (Brigham and Women's Hospital) -- Franklin King (Brigham and Women's Hospital) +- Franklin King (Brigham and Women's Hospital) - Nobuhiko Hata (Brigham and Women's Hospital) - Jean-Christophe Fillion-Robin (Kitware Inc.) @@ -11,7 +11,7 @@ Establish network communication between SRC and 3D Slicer to control MR's scan p Python script running a JSON-based proprietary is used to communicate between the MR scanner and 3Dslicer. ## Objective -1. Establish Communication between MRI simulator and 3Dslicer +1. Establish Communication between MRI simulator and 3Dslicer 2. Use volume reslice driver to Set a new scan position and orientation 3. Reterieve DICOM images from simulator to 3Dslicer @@ -28,7 +28,7 @@ Python script running a JSON-based proprietary is used to communicate between th * update infrastructure to * compute coverage * execute test using pytest, start/stop scanner simulator automatically - + * Discussed approach to integrate the package with Slicer. Few options: * Switch the implementation to use [websocket-client](https://github.com/websocket-client/websocket-client) instead of [websockets](https://pypi.org/project/websockets/) so that it works with Python 2 * Leverage OpenIGTLink to send data to Slicer diff --git a/PW29_2018_London_Canada/Projects/Scanner Remote Control/README.me b/PW29_2018_London_Canada/Projects/Scanner Remote Control/README.me index de935815d..3802bc8fb 100644 --- a/PW29_2018_London_Canada/Projects/Scanner Remote Control/README.me +++ b/PW29_2018_London_Canada/Projects/Scanner Remote Control/README.me @@ -3,8 +3,8 @@ +#Key Investigators ++- Ahmed Mahran (Brigham and Women's Hospital) ++- Junichi Tokuda (Brigham and Women's Hospital) -++- Nobuhiko Hata (Brigham and Women's Hospital) -++- Franklin King (Brigham and Women's Hospital) +++- Nobuhiko Hata (Brigham and Women's Hospital) +++- Franklin King (Brigham and Women's Hospital) ++ ++ ++# Project Description @@ -12,9 +12,9 @@ ++Python script running a JSON-based proprietary is used to communicate between the MR scanner and 3Dslicer. ++ ++## Objective -++1. Objective A. Describe it in 1-2 sentences. -++1. Objective B. Describe it in 1-2 sentences. -++1. Objective C. Describe it in 1-2 sentences. +++1. Objective A. Describe it in 1-2 sentences. +++1. Objective B. Describe it in 1-2 sentences. +++1. Objective C. Describe it in 1-2 sentences. ++ ++## Approach and Plan ++ diff --git a/PW29_2018_London_Canada/Projects/SlicerAR/README.md b/PW29_2018_London_Canada/Projects/SlicerAR/README.md index 7a88efd23..0a04d52ad 100644 --- a/PW29_2018_London_Canada/Projects/SlicerAR/README.md +++ b/PW29_2018_London_Canada/Projects/SlicerAR/README.md @@ -17,7 +17,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Approach and Plan -1. +1. ## Progress and Next Steps @@ -38,4 +38,3 @@ Back to [Projects List](../../README.md#ProjectsList) - Source code: https://github.com/YourUser/YourRepository - Documentation: https://link.to.docs - Test data: https://link.to.test.data - diff --git a/PW29_2018_London_Canada/Projects/SlicerCamera/README.md b/PW29_2018_London_Canada/Projects/SlicerCamera/README.md index 0b4ee6f71..0c4533c01 100644 --- a/PW29_2018_London_Canada/Projects/SlicerCamera/README.md +++ b/PW29_2018_London_Canada/Projects/SlicerCamera/README.md @@ -10,7 +10,7 @@ Back to [Projects List](../../README.md#ProjectsList) Further developments in the SlicerCamera extension. Adding camera ray intersection capabilities and polishing up existing modules. ## Objective -1. Finalize the camera ray intersection module (for calculating minimum distance between rays). +1. Finalize the camera ray intersection module (for calculating minimum distance between rays). 1. Add documentation, icons, etc... to existing calibration module ## Approach and Plan @@ -26,4 +26,3 @@ Further developments in the SlicerCamera extension. Adding camera ray intersecti # Background and References - Source code: https://github.com/VASST/SlicerCamera - Documentation: https://github.com/VASST/SlicerCamera - diff --git a/PW29_2018_London_Canada/Projects/SlicerCustomApp/README.md b/PW29_2018_London_Canada/Projects/SlicerCustomApp/README.md index f6a5b6f66..97ebd31bc 100644 --- a/PW29_2018_London_Canada/Projects/SlicerCustomApp/README.md +++ b/PW29_2018_London_Canada/Projects/SlicerCustomApp/README.md @@ -36,7 +36,7 @@ SlicerCustomAppTemplate is a starting point for creating a custom 3D Slicer-base * Improved `SlicerCustomAppTemplate` to support packaging of Superbuild based extension -* Fixed [openigtlink/SlicerOpenIGTLink](https://github.com/openigtlink/SlicerOpenIGTLink) to support integration in Custom application. +* Fixed [openigtlink/SlicerOpenIGTLink](https://github.com/openigtlink/SlicerOpenIGTLink) to support integration in Custom application. * Next steps: * Move `qSlicerMainWindow`, `qSlicerAboutDialog` into `Slicer/Base/QtApp` for easier re-use diff --git a/PW29_2018_London_Canada/Projects/SlicerMitralValve/README.md b/PW29_2018_London_Canada/Projects/SlicerMitralValve/README.md index 54d0d67a1..52d5e710e 100644 --- a/PW29_2018_London_Canada/Projects/SlicerMitralValve/README.md +++ b/PW29_2018_London_Canada/Projects/SlicerMitralValve/README.md @@ -3,7 +3,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## SlicerMitralValve ## Key Investigators -- Patrick Carnahan (Robarts) +- Patrick Carnahan (Robarts) # Project Description This extension contains a collection of tools for aiding in patient-specific mitral valve modelling. Currently consists of components for registering bi-plane colour ultrasound into 3D, and interactive-automatically segmenting the mitral valve from 3D TEE data. @@ -50,5 +50,3 @@ This extension contains a collection of tools for aiding in patient-specific mit - Source code: https://github.com/pcarnah/SlicerMitralValve - - diff --git a/PW29_2018_London_Canada/Projects/SlicerPython/README.md b/PW29_2018_London_Canada/Projects/SlicerPython/README.md index 8cd2ce763..afeaeb06f 100644 --- a/PW29_2018_London_Canada/Projects/SlicerPython/README.md +++ b/PW29_2018_London_Canada/Projects/SlicerPython/README.md @@ -11,7 +11,7 @@ Back to [Projects List](../../README.md#ProjectsList) Python is getting the most widely used platform for medical image computing. It is important to make it easy to use and extend Slicer using Python. -[Jupyter notebook](https://en.wikipedia.org/wiki/IPython) is an interactive shell for executing scripts and viewing execution results. During last project week in Gran Canaria, we have implemented [SlicerJupyter](https://github.com/Slicer/SlicerJupyter) extension, which allows creating Jupyter notebooks that use Slicer. +[Jupyter notebook](https://en.wikipedia.org/wiki/IPython) is an interactive shell for executing scripts and viewing execution results. During last project week in Gran Canaria, we have implemented [SlicerJupyter](https://github.com/Slicer/SlicerJupyter) extension, which allows creating Jupyter notebooks that use Slicer. ## Objective diff --git a/PW29_2018_London_Canada/Projects/TEECalibration/README.md b/PW29_2018_London_Canada/Projects/TEECalibration/README.md index c12e1d45c..9226d28c3 100644 --- a/PW29_2018_London_Canada/Projects/TEECalibration/README.md +++ b/PW29_2018_London_Canada/Projects/TEECalibration/README.md @@ -1,26 +1,25 @@ Back to [Projects List](../../README.md#ProjectsList) -IGT module tutorial blog development +IGT module tutorial blog development ## Key Investigators - Leah Groves (Robarts) - Goli (Robarts) # Project Description -1. Finalize IGT module development blog +1. Finalize IGT module development blog 2. Present and get feedback on a IGT module development blog. ## Objective 1. Finalize the blog. -2. Get and apply feedback +2. Get and apply feedback ## Approach and Plan -1. Describe the general architecture of Slicer and bring together all the required resources for developing a Slicer IGT module. This includes providing video tutorial examples, skeleton code, and clear written descriptions. -2. Develop an easy to navigate blog. +1. Describe the general architecture of Slicer and bring together all the required resources for developing a Slicer IGT module. This includes providing video tutorial examples, skeleton code, and clear written descriptions. +2. Develop an easy to navigate blog. ## Progress and Next Steps -1. This blog is now completed, achieving all afforementioned objectives. -2. This blog is currently available online at http://computerassistedsurgery.robarts.ca. -3. Moving forward, this website will be hosted as part of the Slicer documentation. -4. The blog will become more general, including tutorials on other topics. - +1. This blog is now completed, achieving all afforementioned objectives. +2. This blog is currently available online at http://computerassistedsurgery.robarts.ca. +3. Moving forward, this website will be hosted as part of the Slicer documentation. +4. The blog will become more general, including tutorials on other topics. diff --git a/PW29_2018_London_Canada/Projects/Template/README.md b/PW29_2018_London_Canada/Projects/Template/README.md index f966f7994..762c86964 100644 --- a/PW29_2018_London_Canada/Projects/Template/README.md +++ b/PW29_2018_London_Canada/Projects/Template/README.md @@ -3,17 +3,17 @@ Back to [Projects List](../../README.md#ProjectsList) ## Write full project title here ## Key Investigators -- Investigator 1 (Affiliation) -- Investigator 2 (Affiliation) +- Investigator 1 (Affiliation) +- Investigator 2 (Affiliation) - Investigator 3 (Affiliation) # Project Description - + ## Objective -1. Objective A. Describe it in 1-2 sentences. -1. Objective B. Describe it in 1-2 sentences. -1. Objective C. Describe it in 1-2 sentences. +1. Objective A. Describe it in 1-2 sentences. +1. Objective B. Describe it in 1-2 sentences. +1. Objective C. Describe it in 1-2 sentences. ## Approach and Plan @@ -40,4 +40,3 @@ Back to [Projects List](../../README.md#ProjectsList) - Source code: https://github.com/YourUser/YourRepository - Documentation: https://link.to.docs - Test data: https://link.to.test.data - diff --git a/PW29_2018_London_Canada/Projects/UltrasoundKidneySimulator/README.md b/PW29_2018_London_Canada/Projects/UltrasoundKidneySimulator/README.md index 416d7804a..82b6da51f 100644 --- a/PW29_2018_London_Canada/Projects/UltrasoundKidneySimulator/README.md +++ b/PW29_2018_London_Canada/Projects/UltrasoundKidneySimulator/README.md @@ -38,4 +38,3 @@ Create a low-cost simulator for ultraosund-guided needle insertions with simulat - Source code: https://github.com/Lyla-M/SlicerWeek - Documentation: https://uwoca-my.sharepoint.com/:p:/g/personal/ymu2_uwo_ca/EXJa9eaoqh1KicOWnQV8bWsBKeiZLQC7Th-wl4dSaBbBJw?e=4BBl4O - Test data: https://link.to.test.data - diff --git a/PW29_2018_London_Canada/Projects/UsingExtensions/README.md b/PW29_2018_London_Canada/Projects/UsingExtensions/README.md index 6ac215f7f..a0b473553 100644 --- a/PW29_2018_London_Canada/Projects/UsingExtensions/README.md +++ b/PW29_2018_London_Canada/Projects/UsingExtensions/README.md @@ -22,7 +22,7 @@ Supervised machine learning methods require an input image with a corresponding ## Progress and Next Steps - 3D images were loaded in as a Vector Volume, 2 points were used as inputs for tip and trajectory computation, then a needle model was created -- The Volume Clip to Model module was used on the needle model to change the image to be white inside and black outside +- The Volume Clip to Model module was used on the needle model to change the image to be white inside and black outside - Since 2D images were loaded into Slicer as a Vector Volume, the Vector to Scalar Volume module was used to convert the images to use with 3D logic - 2D images do not have pixel spacing encoded, so a few "ctk.ctkDoubleSpinBox()" entry fields were added to accept user input diff --git a/PW29_2018_London_Canada/README.md b/PW29_2018_London_Canada/README.md index e30498725..c08719b12 100644 --- a/PW29_2018_London_Canada/README.md +++ b/PW29_2018_London_Canada/README.md @@ -22,7 +22,7 @@ To receive information and announcements about the events please join the [Slack + **Parking:** Visitor parking is available at $12/day (sorry!) in the nearby [hospital visitor parking garage](https://www.google.ca/maps/@43.0142363,-81.2750746,3a,75y,186.07h,100.01t/data=!3m5!1e1!3m3!1sVo_3mg5ibFW_kPnYjVOrPQ!2e0!6s%2F%2Fgeo0.ggpht.com%2Fcbk%3Fpanoid%3DVo_3mg5ibFW_kPnYjVOrPQ%26output%3Dthumbnail%26cb_client%3Dmaps_sv.tactile.gps%26thumb%3D2%26w%3D203%26h%3D100%26yaw%3D62.77921%26pitch%3D0%26thumbfov%3D100). University of Western Ontario parking information is available [here](https://www.uwo.ca/parking/find/visitor/index.html) ## Local Organizing Committee - + - Host: [Terry Peters](http://www.robarts.ca/terry-peters), Robarts Scientist; Professor Medical Imaging; Medical Biophysics; Biomedical Engineering - Email Local Organizing Committee: @@ -40,7 +40,7 @@ To receive information and announcements about the events please join the [Slack ## Program | = |Monday July 16 | Tuesday July 17 | Wednesday July 18 | Thursday July 19 | Friday July 20 -|:---: | :---: | :---: | :---: | :---: | :---: +|:---: | :---: | :---: | :---: | :---: | :---: |9:00 | | | | | |10:00 | | Segmentation tutorial/breakout | Augmented/virtual reality demo/breakout | | |11:00 | | | | | Project reviews diff --git a/PW30_2019_GranCanaria/BreakoutSessions/MachineLearning.md b/PW30_2019_GranCanaria/BreakoutSessions/MachineLearning.md index 56691d689..2a64f2a60 100644 --- a/PW30_2019_GranCanaria/BreakoutSessions/MachineLearning.md +++ b/PW30_2019_GranCanaria/BreakoutSessions/MachineLearning.md @@ -101,8 +101,8 @@ Deepak: - ITK-based tools could be added for translation, rotation, bspline deformation etc. - Others provide some data augmentation: - Niftynet - - NVidia DALI: https://github.com/NVIDIA/DALI - - DKFZ BatchGenerator. See https://github.com/MIC-DKFZ/batchgenerators#readme + - NVidia DALI: https://github.com/NVIDIA/DALI + - DKFZ BatchGenerator. See https://github.com/MIC-DKFZ/batchgenerators#readme ![](./MachineLearning_BreakoutSession.jpg) @@ -132,4 +132,3 @@ Related efforts: * For questions, michael.grauer@kitware.com, jcfr@kitware.com * Poster available [here](https://data.kitware.com/api/v1/file/5c4ef2628d777f072b1a5324/download). * This is along the lines of annotation creation, display, inter annotator agreement, spatiotemporal clustering, audits, workflows, crowdsourcing, cloud hosting with scalability and availability. - diff --git a/PW30_2019_GranCanaria/BreakoutSessions/SlicerCoreUpdateAndPlans.md b/PW30_2019_GranCanaria/BreakoutSessions/SlicerCoreUpdateAndPlans.md index fc03c83c9..4827c9797 100644 --- a/PW30_2019_GranCanaria/BreakoutSessions/SlicerCoreUpdateAndPlans.md +++ b/PW30_2019_GranCanaria/BreakoutSessions/SlicerCoreUpdateAndPlans.md @@ -14,5 +14,3 @@ Lookup day and time on the [calendar](../README.md#program-calendar). ## Meeting minutes Meeting minutes will be added here. - - diff --git a/PW30_2019_GranCanaria/Logistics.md b/PW30_2019_GranCanaria/Logistics.md index ecdfd6ecb..2382cfad6 100644 --- a/PW30_2019_GranCanaria/Logistics.md +++ b/PW30_2019_GranCanaria/Logistics.md @@ -42,7 +42,7 @@ To get notifications about new posts automatically, [sign in with Google, GitHub ## ERASMUS mobility information -1. Please, contact your ERASMUS Office at your home institution to check how to prepare an application. At the same time, please, +1. Please, contact your ERASMUS Office at your home institution to check how to prepare an application. At the same time, please, let us know that you're willing to apply and what project you'd like to work on and your motivation to come in an email to Juan Ruiz Alzola, Maria Dolores Afonso Suarez, Asmaa Skareb. @@ -54,7 +54,7 @@ Juan Ruiz Alzola, Maria Dolores Afonso Suarez, Asmaa Skareb. * The academic authority signing on behalf the host center is *Prof. Félix Tobajas, Subdirector de Estudiantes, Movilidad y Prácticas Externas, Escuela de Ingeniería de Telecomunicación y Electrónica (Mobility Deputy Director, Telecommunication and Electrical Engineering School), Universidad de Las Palmas de Gran Canaria*. - * Once properly prepared and signed the application at the applicant's home institution, it should be sent by email to: SempeEite 3. The application will be processed by the host center and the outcome will be reported in a few days. diff --git a/PW30_2019_GranCanaria/PW30InTheMedia.md b/PW30_2019_GranCanaria/PW30InTheMedia.md index 1b58f852d..5c477c0ad 100644 --- a/PW30_2019_GranCanaria/PW30InTheMedia.md +++ b/PW30_2019_GranCanaria/PW30InTheMedia.md @@ -7,12 +7,12 @@ # Twitter: 1. [Cueva Pintada Visit](https://twitter.com/medtec4susdev/status/1091397136355393537) 1. [Cueva Pintada Museum](https://twitter.com/medtec4susdev/status/1091393571675127809) -1. [Work Sessions](https://twitter.com/medtec4susdev/status/1091349996442714112) +1. [Work Sessions](https://twitter.com/medtec4susdev/status/1091349996442714112) 1. [Interviews](https://twitter.com/medtec4susdev/status/1091314493735518208) 1. [Elder Museum](https://twitter.com/medtec4susdev/status/1091110227498463233) 1. [Elder Museum African Dance](https://twitter.com/medtec4susdev/status/1091045023502819328) 1. [Morning Session. Prof. Luis Serra](https://twitter.com/medtec4susdev/status/1090634151919788032) -1. [Morning Session. Prof. Luis López](https://twitter.com/medtec4susdev/status/1090632688095121409) +1. [Morning Session. Prof. Luis López](https://twitter.com/medtec4susdev/status/1090632688095121409) # Instagram: 1. [Work Session](https://www.instagram.com/p/BtV3XR3DCls/?utm_source=ig_web_copy_link) @@ -22,4 +22,3 @@ # Facebook: 1. [Projects Update](https://www.facebook.com/medtec4susdev/posts/2229206357353268) 1. [Interviews](https://www.facebook.com/medtec4susdev/posts/2229163147357589?__xts__%5B0%5D=68.ARC4klytiuAohkt3mU0KGSeWXu-5Ru3kROLxZbdY4O_aau7V7PD0avwCLuYjfb7_FNUZd467n55tGclNHHoFcATnNb9nXq3zTk4BnrfKTkg7R_vVMyXry1HbPQgusJCXC0k82CwNC3kxvcnGBeMmp8JlsvnhWKvgmTnFPL4BSL1qvPmwA8vMi62AUEjQmIk8Mp5nufWZe_koDEQ_W1mcKm3YCWPfHgm6W--U_cD5CVdyHvoCheCLcJj6HG5_m0-LUhGxBKHuf_s-ofKvJWe-i-EuA39GWa49aiRrBv3juwFr3IldupRFfYH4DL-CZuTNVcg0PSptet9eApfg2XF4rw1Vtzn_&__tn__=-R) - diff --git a/PW30_2019_GranCanaria/PreparatoryMeetingsNotes.md b/PW30_2019_GranCanaria/PreparatoryMeetingsNotes.md index 72bcef601..953ad5da0 100644 --- a/PW30_2019_GranCanaria/PreparatoryMeetingsNotes.md +++ b/PW30_2019_GranCanaria/PreparatoryMeetingsNotes.md @@ -20,5 +20,3 @@ Proposal about the areas to establish in order to organize the projects: - DICOM It could be interesting during the next preparatory meetings to have a brainstorming on breakout sessions topics - - diff --git a/PW30_2019_GranCanaria/Projects/3DSlicerModelsforBrainQuiz/README.md b/PW30_2019_GranCanaria/Projects/3DSlicerModelsforBrainQuiz/README.md index e0d91bac2..22cae929b 100644 --- a/PW30_2019_GranCanaria/Projects/3DSlicerModelsforBrainQuiz/README.md +++ b/PW30_2019_GranCanaria/Projects/3DSlicerModelsforBrainQuiz/README.md @@ -42,7 +42,7 @@ This proposal is a joint collaboration work: [M-ITI](https://www.m-iti.org/) and ## Illustrations -| Prototype from PW28th| 3D Model for 30PW| +| Prototype from PW28th| 3D Model for 30PW| | ---------------------|------------------| | | | @@ -54,11 +54,11 @@ Unity Scenario video examples. 30 PW NA-MIC Implementation. -| Main menu| Highscore| +| Main menu| Highscore| | ---------|----------| | | | -| Question example | End game + New High Score Entry | +| Question example | End game + New High Score Entry | | -----------------|---------------------| | | | @@ -73,4 +73,3 @@ Unity Scenario video examples. Mónica S. Cameirão, Fábio Pereira, Sergi Bermúdez i Badia](https://neurorehabilitation.m-iti.org/lab/wp-content/plugins/zotpress/lib/request/request.dl.php?api_user_id=161215&key=4ZQMQFB3&content_type=application/pdf) + Review work from [Summer Project Week 2013](https://na-mic.org/wiki/2013_Project_Week:WebbasedAnatomicalTeachingFrameworkSummer2013). [Live Demo](http://fnndsc.github.com/babybrain) - diff --git a/PW30_2019_GranCanaria/Projects/AutomSegmentFreeSurfer/README.md b/PW30_2019_GranCanaria/Projects/AutomSegmentFreeSurfer/README.md index 5cb984b63..77851af2f 100644 --- a/PW30_2019_GranCanaria/Projects/AutomSegmentFreeSurfer/README.md +++ b/PW30_2019_GranCanaria/Projects/AutomSegmentFreeSurfer/README.md @@ -33,11 +33,11 @@ This project aims to improve the automatic segmentation results generated from t ## Progress and Next Steps -1. The image used on this project is the MIR of the brain labelled 103414. +1. The image used on this project is the MIR of the brain labelled 103414. 1. The two structures that have been segmented are: - Subcallosal area (SC) - Medial border: hemispheric margin - - Lateral border: grey-white matter border + - Lateral border: grey-white matter border - Superior border: corpus callosum - Inferior border: inferior hemispheric curvature (45º line) - Anterior border: slice A 31.900mm @@ -53,7 +53,7 @@ This project aims to improve the automatic segmentation results generated from t We have taken, for the OFC, the traditional approach, following the olfatory surcus. Fiducial points were placed on the slices for guidance. ## Procedure -The Slicer module used was the Segment Editor. Once the fiducial points were placed, marking the boundaries, slice by slice, the draw/paint and erase tools (with an 1 to 3% diameter) were used to manually trace and fill the corresponding areas for each segment. No other segmentation tools were used, as the work was done in an entirely manual way. +The Slicer module used was the Segment Editor. Once the fiducial points were placed, marking the boundaries, slice by slice, the draw/paint and erase tools (with an 1 to 3% diameter) were used to manually trace and fill the corresponding areas for each segment. No other segmentation tools were used, as the work was done in an entirely manual way. To ensure the correct overlap of the segment boundaries, when using the paint or draw tool, the setting for masking was set to editable area: outside all segments. ## Illustrations @@ -75,7 +75,7 @@ To ensure the correct overlap of the segment boundaries, when using the paint or - + Superior view @@ -91,6 +91,3 @@ To ensure the correct overlap of the segment boundaries, when using the paint or ## Background and References - - - diff --git a/PW30_2019_GranCanaria/Projects/DICOM4QI/README.md b/PW30_2019_GranCanaria/Projects/DICOM4QI/README.md index f5885f2eb..5e37d6bb7 100644 --- a/PW30_2019_GranCanaria/Projects/DICOM4QI/README.md +++ b/PW30_2019_GranCanaria/Projects/DICOM4QI/README.md @@ -58,7 +58,7 @@ Several relevant projects were presented during the DICOM breakout session on We * Andrey: Imaging Data Standardization for AI and Big Data applications [slides](http://bit.ly/2Wt9AxX) * Steve: Demonstrations of OHIF DICOM interoperability * Srikrishna Prasad: Use of DICOM in Siemens Teamplay -* Marco Nolden, Tobias Stein: Use of DICOM in the *DKTK Joint Imaging Platform* and the [Segmentation Review System](https://drive.google.com/file/d/1NXiu18mCFXrIaEgQ1WdzBbmq9igIyZNN/view?usp=sharing) +* Marco Nolden, Tobias Stein: Use of DICOM in the *DKTK Joint Imaging Platform* and the [Segmentation Review System](https://drive.google.com/file/d/1NXiu18mCFXrIaEgQ1WdzBbmq9igIyZNN/view?usp=sharing) * Markus Herrmann: DICOM for Digital Pathology * Peter Oppermann and Hans Meine: DICOM on FIHR [notes](https://docs.google.com/document/d/1INqLOu4xOQN59_ifdMc7P8qhqb08SiY5LRs59kgCCRw), see [this project page](https://projectweek.na-mic.org/PW30_2019_GranCanaria/Projects/DICOMSRTID1500-FHIR/) for more info Both Markus and "DICOM on FIHR" use the [DICOM4QI](https://dicom4qi.readthedocs.io) datasets as reference for development. diff --git a/PW30_2019_GranCanaria/Projects/DICOMSRTID1500-FHIR/README.md b/PW30_2019_GranCanaria/Projects/DICOMSRTID1500-FHIR/README.md index 529abe3a5..784e17856 100644 --- a/PW30_2019_GranCanaria/Projects/DICOMSRTID1500-FHIR/README.md +++ b/PW30_2019_GranCanaria/Projects/DICOMSRTID1500-FHIR/README.md @@ -73,4 +73,3 @@ Relevant FHIR resources for the diagnostic report and reported observations: Other FHIR-related pointers: * [#FHIR and confusion about the 80/20 rule](http://www.healthintersections.com.au/?p=1924), and interesting discussion on this rule in the context of DICOM: [FHIR Extensions, the 80/20 rule, DICOM and the LONG tail](http://www.healthintersections.com.au/?cat=39), by Grahame Grieve - diff --git a/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/README.md b/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/README.md index 6c728c6e0..3ea2de6b1 100644 --- a/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/README.md +++ b/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Lightweight DICOMweb Server with CouchDB +# Lightweight DICOMweb Server with CouchDB ## Key Investigators @@ -9,11 +9,11 @@ Back to [Projects List](../../README.md#ProjectsList) - Andrey Fedorov (BWH) - interested to deploy and test the resulting platform - Markus Herrmann (CCDS) - Tobias Stein (DKFZ) -- Marco Nolden (DKFZ) - optional CTK/C++ based client side testing +- Marco Nolden (DKFZ) - optional CTK/C++ based client side testing # Project Description -This project aims to work on a prototype lightweight DICOM server that will support DICOMweb standard and host the DICOM files as JSON objects. It will leverage previous work on SlicerChronicle and dcmjs. +This project aims to work on a prototype lightweight DICOM server that will support DICOMweb standard and host the DICOM files as JSON objects. It will leverage previous work on SlicerChronicle and dcmjs. ## Objective @@ -29,7 +29,7 @@ This project aims to work on a prototype lightweight DICOM server that will supp 1. Review and define project ideas 1. Consider how to encapsulate DICOMweb logic so that it facilitates use in servers, but also in testing and other scenarios 1. Define a reasonable subset of DICOMweb that can be supported easily (at least basic QIDO/WADO/STOW RS) - 1. Pick a nice web server infastructure that can easily implement rest api proxies + 1. Pick a nice web server infastructure that can easily implement rest api proxies 1. maybe build on [express/pouchdb-server](https://github.com/pouchdb/pouchdb-server) 1. also look at [fastify](https://www.fastify.io/) 1. Find an easy way to host the service for development, testing, and demos @@ -49,9 +49,9 @@ This project aims to work on a prototype lightweight DICOM server that will supp 1. {s}/studies/{study}/series endpoint: Query for series 1. {s}/studies/{study}/series/{series}/instances endpoint: Query for instances 1. WADO - 1. {s}/studies/{study}/series/{series}/instances/{instance} endpoint : WADO-URI Retrieve Instance + 1. {s}/studies/{study}/series/{series}/instances/{instance} endpoint : WADO-URI Retrieve Instance 1. {s}/studies/{study}/metadata endpoint: Retrieve study metadata - + 1. JSON schemas defined for QIDO query responses for studies, series and instances level 1. Implemented STOW using some parts from Dicomweb-client (https://github.com/dcmjs-org/dicomweb-client) and dcmjs (https://github.com/dcmjs-org/dcmjs) diff --git a/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_instances_output.schema.json b/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_instances_output.schema.json index f6a1b26e3..e7afb4efa 100644 --- a/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_instances_output.schema.json +++ b/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_instances_output.schema.json @@ -249,4 +249,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_series_output_schema.json b/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_series_output_schema.json index cbd0fb557..e4dcdee98 100644 --- a/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_series_output_schema.json +++ b/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_series_output_schema.json @@ -209,4 +209,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_studies_output_schema.json b/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_studies_output_schema.json index 55ac14f9b..a72d35b1f 100644 --- a/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_studies_output_schema.json +++ b/PW30_2019_GranCanaria/Projects/DICOMweb-CouchDB/dicomweb_studies_output_schema.json @@ -366,6 +366,6 @@ "00201206", "00201208" ] - } + } ] -} \ No newline at end of file +} diff --git a/PW30_2019_GranCanaria/Projects/Data-glove_for_virtual_operations/README.md b/PW30_2019_GranCanaria/Projects/Data-glove_for_virtual_operations/README.md index f5889b690..97132d646 100644 --- a/PW30_2019_GranCanaria/Projects/Data-glove_for_virtual_operations/README.md +++ b/PW30_2019_GranCanaria/Projects/Data-glove_for_virtual_operations/README.md @@ -46,7 +46,7 @@ We built a data-glove that tracks hand movements. Our goal is to connect it to S Result: -![Result](20190201_095221.gif) +![Result](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW30__Data-glove_for_virtual_operations__20190201_095221.gif) # Background and References @@ -62,4 +62,3 @@ Result: - diff --git a/PW30_2019_GranCanaria/Projects/MRINeedleGuidance/README.md b/PW30_2019_GranCanaria/Projects/MRINeedleGuidance/README.md index 55b9cb09e..33805c37b 100644 --- a/PW30_2019_GranCanaria/Projects/MRINeedleGuidance/README.md +++ b/PW30_2019_GranCanaria/Projects/MRINeedleGuidance/README.md @@ -12,7 +12,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -MRI guided devices have been designed for biopsies or other interventions. +MRI guided devices have been designed for biopsies or other interventions. The aim is to adapt Slicer for MR-guided needle placement procedures using MRI compatible robots. ## Objective @@ -78,4 +78,3 @@ Development of a shoulder-mounted robot for MRI-guided needle placement + [SlicerIGT](http://www.slicerigt.org/wp/) + [OpenIGTLink](http://openigtlink.org/) + [SlicerDevelopmentToolbox](https://www.slicer.org/wiki/Documentation/Nightly/Extensions/SlicerDevelopmentToolbox), and [Documentation](https://sdt.readthedocs.io/en/latest/index.html) - diff --git a/PW30_2019_GranCanaria/Projects/MarkupsRedesign/README.md b/PW30_2019_GranCanaria/Projects/MarkupsRedesign/README.md index b70a3c329..152886ec8 100644 --- a/PW30_2019_GranCanaria/Projects/MarkupsRedesign/README.md +++ b/PW30_2019_GranCanaria/Projects/MarkupsRedesign/README.md @@ -33,14 +33,14 @@ Plan is to integrate the reworked markups infrastructure and new markups during - Davide Punzo has reworked markup point list widget: - [Pull request](https://github.com/Slicer/Slicer/pull/1079) - + [Pull request](https://github.com/Slicer/Slicer/pull/1079) + [Points video](https://www.dropbox.com/s/p8v1m7mgopsnrp4/Widget-rework.mkv?dl=0) - + and added angle, line, open curve, and closed curve widgets. - + [Open Curve video](https://www.dropbox.com/s/ap67lmxo0xh77h0/OpenCurve.mkv?dl=0) - + - Andras and Sara tested and reported errors Davide fixed them - 17 of 27 known issues have been fixed. Open issues are mostly enhancements or problems in new features (not regressions in existing features). - Necessary VTK updates (double-click and selection event support) merged into Slicer/VTK diff --git a/PW30_2019_GranCanaria/Projects/Microscopy/README.md b/PW30_2019_GranCanaria/Projects/Microscopy/README.md index 3113a8fca..8f42705b2 100644 --- a/PW30_2019_GranCanaria/Projects/Microscopy/README.md +++ b/PW30_2019_GranCanaria/Projects/Microscopy/README.md @@ -46,12 +46,12 @@ Microscopy module. | Thresholding | Watershed | | -------------|-----------| -| | | +| | | -The other. +The other. | Watershed with markers for the different regions | | -----| @@ -62,4 +62,3 @@ The other. + [Previous work in 3DSlicer](https://www.slicer.org/wiki/Documentation/Nightly/Extensions/IASEM) + [Segmentation of Microscopic Images](https://ieeexplore.ieee.org/document/6745404) - diff --git a/PW30_2019_GranCanaria/Projects/MorphoSourceDataRetrieval/README.md b/PW30_2019_GranCanaria/Projects/MorphoSourceDataRetrieval/README.md index 778e817b8..5d010941f 100644 --- a/PW30_2019_GranCanaria/Projects/MorphoSourceDataRetrieval/README.md +++ b/PW30_2019_GranCanaria/Projects/MorphoSourceDataRetrieval/README.md @@ -39,7 +39,7 @@ MorphoSource (M/S) is a publicly available repository for 3D media representing ## Results 1. Tested prototype design -2. Identified an issue with the unpackaging of zip files that contains image sequence: When importing into Slicer, each image in the sequence is treated as an individual file. This can be changed using the load options, but getting to this step is very slow when the number of files is large. +2. Identified an issue with the unpackaging of zip files that contains image sequence: When importing into Slicer, each image in the sequence is treated as an individual file. This can be changed using the load options, but getting to this step is very slow when the number of files is large. 3. Next steps: Resolve zip file import issue and refine display of metadata. diff --git a/PW30_2019_GranCanaria/Projects/NeuroNames-HOA/README.md b/PW30_2019_GranCanaria/Projects/NeuroNames-HOA/README.md index a491df5ab..356db7e09 100644 --- a/PW30_2019_GranCanaria/Projects/NeuroNames-HOA/README.md +++ b/PW30_2019_GranCanaria/Projects/NeuroNames-HOA/README.md @@ -25,12 +25,12 @@ This project focuses on linking the anatomical definitions of the Harvard Oxford -1. +1. ## Progress and Next Steps -1. +1. # Illustrations diff --git a/PW30_2019_GranCanaria/Projects/NeuroNetworkSegmentationofNeck/Img_14/Segmentation.seg.nrrd b/PW30_2019_GranCanaria/Projects/NeuroNetworkSegmentationofNeck/Img_14/Segmentation.seg.nrrd index 507ec4a90..bdec9a2bc 100644 Binary files a/PW30_2019_GranCanaria/Projects/NeuroNetworkSegmentationofNeck/Img_14/Segmentation.seg.nrrd and b/PW30_2019_GranCanaria/Projects/NeuroNetworkSegmentationofNeck/Img_14/Segmentation.seg.nrrd differ diff --git a/PW30_2019_GranCanaria/Projects/NeuroNetworkSegmentationofNeck/README.md b/PW30_2019_GranCanaria/Projects/NeuroNetworkSegmentationofNeck/README.md index 228593d06..a7f6be00c 100644 --- a/PW30_2019_GranCanaria/Projects/NeuroNetworkSegmentationofNeck/README.md +++ b/PW30_2019_GranCanaria/Projects/NeuroNetworkSegmentationofNeck/README.md @@ -31,7 +31,7 @@ We want to implement a neural network based automatic segmentation algorithm to ## Progress and Next Steps -We made some segmentation mask in Slicer, using Segment editor's fill between slices tool. +We made some segmentation mask in Slicer, using Segment editor's fill between slices tool. - Few manually segmented images ![align="left"](minta.PNG) ![align="left"](image_00008.png) @@ -45,10 +45,10 @@ We made some segmentation mask in Slicer, using Segment editor's fill between sl # Illustrations Keras, a neural network package in python: -- https://keras.io/ +- https://keras.io/ The U-net: -- https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/ +- https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/ @@ -62,4 +62,3 @@ The U-net: - Source code: https://github.com/YourUser/YourRepository - Documentation: https://link.to.docs - Test data: https://link.to.test.data - diff --git a/PW30_2019_GranCanaria/Projects/OHIF.AI/README.md b/PW30_2019_GranCanaria/Projects/OHIF.AI/README.md index 0253d395b..e350a1298 100644 --- a/PW30_2019_GranCanaria/Projects/OHIF.AI/README.md +++ b/PW30_2019_GranCanaria/Projects/OHIF.AI/README.md @@ -10,10 +10,10 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -The aim of this project is to prepare OHIF Viewer for AI use cases. +The aim of this project is to prepare OHIF Viewer for AI use cases. Specifically, 1. To visualise encapsulated pdf in OHIF Viewer -2. To provide editable UI interface for segmentation and measurement objects generated by AI Algorithms. +2. To provide editable UI interface for segmentation and measurement objects generated by AI Algorithms. 3. To create audit for any correction applied to generated results. 4. Provision an option to push corrected results back to pacs @@ -51,7 +51,7 @@ Specifically, 6. Visualise the pdf in OHIF Viewer - completed ## Next Steps -OHIF SR Report Plugin +OHIF SR Report Plugin - Dicom SR plugin to visualise and Edit SR report results is needed - 3D volume rendering of Dicom SEG object and editing diff --git a/PW30_2019_GranCanaria/Projects/OpenIGTLinkIODevelopment/README.md b/PW30_2019_GranCanaria/Projects/OpenIGTLinkIODevelopment/README.md index 852c8931f..b567ae8a6 100644 --- a/PW30_2019_GranCanaria/Projects/OpenIGTLinkIODevelopment/README.md +++ b/PW30_2019_GranCanaria/Projects/OpenIGTLinkIODevelopment/README.md @@ -35,7 +35,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Future work -* Improve robustness of pyIGTLink implementation and ensure support for both Python 2 and 3 +* Improve robustness of pyIGTLink implementation and ensure support for both Python 2 and 3 * Continue to maintain and develop Plus and OpenIGTLinkIO * If you have any questions or issues, feel free to submit an issue on [GitHub](https://github.com/PlusToolkit/PlusLib/issues) diff --git a/PW30_2019_GranCanaria/Projects/PelvicAnatomyAtlases/README.md b/PW30_2019_GranCanaria/Projects/PelvicAnatomyAtlases/README.md index 77581d7b2..2ceea4b5b 100644 --- a/PW30_2019_GranCanaria/Projects/PelvicAnatomyAtlases/README.md +++ b/PW30_2019_GranCanaria/Projects/PelvicAnatomyAtlases/README.md @@ -63,5 +63,3 @@ This project focuses on the development of anatomical atlases of the pelvic area ## Background and References - - diff --git a/PW30_2019_GranCanaria/Projects/PointSetRegistration/README.md b/PW30_2019_GranCanaria/Projects/PointSetRegistration/README.md index 491a16c86..bd8701249 100644 --- a/PW30_2019_GranCanaria/Projects/PointSetRegistration/README.md +++ b/PW30_2019_GranCanaria/Projects/PointSetRegistration/README.md @@ -10,7 +10,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -This project focuses on applying a point set registration in 2D multichannel images and integrating it in a new 3D Slicer +This project focuses on applying a point set registration in 2D multichannel images and integrating it in a new 3D Slicer module. In order to use this kind of registration, it is necessary to obtain spatial features from the image and represent them as a point cloud. @@ -21,27 +21,27 @@ as a point cloud. 1. Explore strategies to extract a set of features from a 2D multichannel image and convert it to point cloud 1. Select a registration algorithm for two point clouds and validate it with test cases 1. Adapt the whole flow (features extraction and point cloud registration) to the original image -1. Design and implement the 3D Slicer user interface for the proposed registration module +1. Design and implement the 3D Slicer user interface for the proposed registration module ## Approach and Plan -1. Extract features based on contour information +1. Extract features based on contour information 1. Select, integrate and validate a point set registration algorithm 1. Apply registration result in the whole data imag ## Progress and Next Steps - Progress: * Points clouds have been created from the contour information of the images that we wanted to register. - * As point set registration algorithm the Coherent Point Drift (CPD) one has been chosen. + * As point set registration algorithm the Coherent Point Drift (CPD) one has been chosen. * A simple interpolation has been applied in order to make the deformation of the points set affects all the pixels of the image. * The visualization of the obtained results has been performed through ParaView. - * Some trials have been carried out with both simple and real images. + * Some trials have been carried out with both simple and real images. Next steps: * Improve the points cloud sampling strategy and the interpolation applied. @@ -73,7 +73,7 @@ Point set registration example from a blue point set M to the red point set S: ### Input images: ![fixed](hand_1.png) ![moving](hand_0.png) - + ![Hand example](https://media.giphy.com/media/3BMs85RdITDXTQ6icq/giphy.gif) ### Result: diff --git a/PW30_2019_GranCanaria/Projects/PyRadiomics/README.md b/PW30_2019_GranCanaria/Projects/PyRadiomics/README.md index 505a54963..86587d1b3 100644 --- a/PW30_2019_GranCanaria/Projects/PyRadiomics/README.md +++ b/PW30_2019_GranCanaria/Projects/PyRadiomics/README.md @@ -40,7 +40,7 @@ This project aims to develop and maintain the open source software PyRadiomics, 1. Done, voxel-wise extraction now available from the command line by setting switch `--mode` (`-m`) to `voxel`, stores feature maps as NRRD images. Merged into PyRadiomics in [PR #457](https://github.com/Radiomics/pyradiomics/pull/457) 1. Radiomics features comparison sub-project. * developed a Jupyter Notebook for comparing USF radiomics results with pyradiomics (see [https://github.com/Radiomics/pyradiomics/tree/master/labs/pyradiomics-reproduce](https://github.com/Radiomics/pyradiomics/tree/master/labs/pyradiomics-reproduce)). Observed good agreement in one case, but very poor in another, where volume of the structure is very small (see fig. below). We think this is due to the different approaches to rasterizing RTSTRUCT contours. Comparison of texture features underway. -* learned about the radiomics module in MITK [tutorial](http://docs.mitk.org/nightly/org_mitk_views_radiomicstutorial_gui_portal.html) and [detailed info](http://mitk.org/wiki/Phenotyping), will investigate consistency and agreement with IBSI, to be continued! +* learned about the radiomics module in MITK [tutorial](http://docs.mitk.org/nightly/org_mitk_views_radiomicstutorial_gui_portal.html) and [detailed info](http://mitk.org/wiki/Phenotyping), will investigate consistency and agreement with IBSI, to be continued! # Illustrations diff --git a/PW30_2019_GranCanaria/Projects/RawImageGuess/README.md b/PW30_2019_GranCanaria/Projects/RawImageGuess/README.md index a4d72fc3e..cf839dfab 100644 --- a/PW30_2019_GranCanaria/Projects/RawImageGuess/README.md +++ b/PW30_2019_GranCanaria/Projects/RawImageGuess/README.md @@ -25,7 +25,7 @@ Sometimes unknown file formats are encountered that contain images. There are fr 1. Add some more functionality. 2. Add ideas so they can be implemented later. -## Progress +## Progress All basic image parameters can be set: @@ -59,4 +59,4 @@ This is how the moddule looks as of 2019. 02. 01. - Documentation: In progress - Test data: - Available in the github repository - - 3D ultrasound from [Slicer forum](https://discourse.slicer.org/t/could-not-load-ultrasound-from-mvl-medison-file-format/3928/6?u=lassoan): [download from dropbox](https://www.dropbox.com/sh/azdck7h9e7b71dq/AACiHg-m-XPOhj2vs2_CFGK5a?dl=0) + - 3D ultrasound from [Slicer forum](https://discourse.slicer.org/t/could-not-load-ultrasound-from-mvl-medison-file-format/3928/6?u=lassoan): [download from dropbox](https://www.dropbox.com/sh/azdck7h9e7b71dq/AACiHg-m-XPOhj2vs2_CFGK5a?dl=0) diff --git a/PW30_2019_GranCanaria/Projects/SegmentEditor/README.md b/PW30_2019_GranCanaria/Projects/SegmentEditor/README.md index be81a9153..afe7fe77e 100644 --- a/PW30_2019_GranCanaria/Projects/SegmentEditor/README.md +++ b/PW30_2019_GranCanaria/Projects/SegmentEditor/README.md @@ -27,7 +27,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Progress and Next Steps * Liver Data-set (Scheherazade Kraß): Segmentation of lesion and vessels. Used for printing gel phantom for robotic intervention testing. Lesions are segmented using Grow from seeds and separated using Islands effect. Vessels segmented using global thresholding. -** Test data: https://www.dropbox.com/sh/c28lajfutpnsjvp/AACvIhtMNJnsU9XHLPaAEZKMa?dl=0 +** Test data: https://www.dropbox.com/sh/c28lajfutpnsjvp/AACvIhtMNJnsU9XHLPaAEZKMa?dl=0 * Finger segmentation on MRI (Ahmedou Moulaye Idriss): for anatomical atlas purposes. Bones are OK to segment using Grow from seeds, but quite noisy, so maybe other effects could be as good or better. Vessel: contrast not very high, but there is local contrast - it can be tracked with a sphere brush with thresholding. * Pelvic organ segmentation (Babacar Diao): Updates on how to use new segmentation features in Slicer-4.10.1. diff --git a/PW30_2019_GranCanaria/Projects/SlicerMorphGeometricMorphometricToolset/README.md b/PW30_2019_GranCanaria/Projects/SlicerMorphGeometricMorphometricToolset/README.md index 85c934754..7912fe191 100644 --- a/PW30_2019_GranCanaria/Projects/SlicerMorphGeometricMorphometricToolset/README.md +++ b/PW30_2019_GranCanaria/Projects/SlicerMorphGeometricMorphometricToolset/README.md @@ -20,7 +20,7 @@ This project aims to produce a flexible toolset for analysis of shape and form f -1. Provide image import tools that support a broad range of image modalities and filetypes, including non-DICOM image outputs from research microCT scanners. +1. Provide image import tools that support a broad range of image modalities and filetypes, including non-DICOM image outputs from research microCT scanners. 2. Support common preprocessing steps, that may be required before analysis, including downsampling very large files produced by research microCT scanners. 3. Develop a module to implement Generalized Procrustes Analysis (GPA) and Principal Component Analysis (PCA) and visualize output statistics. @@ -28,11 +28,11 @@ This project aims to produce a flexible toolset for analysis of shape and form f -1. Custom image import tool +1. Custom image import tool 2. Custom landmark file import tool 3. Module to downsize images on import, optimized for speed. 4. Module to downsize images on import, optimized for memory usage. -5. Module to load large images, crop blank space, and saving ROI. +5. Module to load large images, crop blank space, and saving ROI. 6. Module to perform GPA, PCA, and visualize statistical output ## Results diff --git a/PW30_2019_GranCanaria/Projects/TrainingPrograms/README.md b/PW30_2019_GranCanaria/Projects/TrainingPrograms/README.md index a494b44d1..510c8485d 100644 --- a/PW30_2019_GranCanaria/Projects/TrainingPrograms/README.md +++ b/PW30_2019_GranCanaria/Projects/TrainingPrograms/README.md @@ -15,7 +15,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Project Description -The training project is, in this 30PW, in its third edition. New clinician professionals will work in order to develop new training material and study how to enhance and improve the use of 3DSlicer as a training tool. A common program and specific ones, for different countries. In the first edition (27PW NA-MIC) a common training programs was proposed and after a month it was implemented in a tricontinental workshop. In the second edition (28PW NA-MIC) the professionals that attended the first one shared their experiences and planned specific training programs for their countries working in the transalaton of contents. +The training project is, in this 30PW, in its third edition. New clinician professionals will work in order to develop new training material and study how to enhance and improve the use of 3DSlicer as a training tool. A common program and specific ones, for different countries. In the first edition (27PW NA-MIC) a common training programs was proposed and after a month it was implemented in a tricontinental workshop. In the second edition (28PW NA-MIC) the professionals that attended the first one shared their experiences and planned specific training programs for their countries working in the transalaton of contents. ## Objectives @@ -34,8 +34,8 @@ The training project is, in this 30PW, in its third edition. New clinician profe 1. We have delivered a Slicer course for African collaborators 1. We have gathered information for each country to introduce the use of 3D Slicer as a tool for clinicians -1. Since African countries have different needs and their students different profiles, different programs will be proposed. -1. We will develop reports for these different proposals and will study the impact of the whole process. +1. Since African countries have different needs and their students different profiles, different programs will be proposed. +1. We will develop reports for these different proposals and will study the impact of the whole process. ## Illustrations diff --git a/PW30_2019_GranCanaria/Projects/UltrasoundSimulatorTraining/README.md b/PW30_2019_GranCanaria/Projects/UltrasoundSimulatorTraining/README.md index b21d095fb..dc5c168b4 100644 --- a/PW30_2019_GranCanaria/Projects/UltrasoundSimulatorTraining/README.md +++ b/PW30_2019_GranCanaria/Projects/UltrasoundSimulatorTraining/README.md @@ -18,11 +18,11 @@ This project is a next step in the one presented during 28th PW NA-MIC and it ai ## Objectives -1. Supporting the multi-language (Spanish, French, English, Portuguese, and Arabic). -2. Registering several custom layouts. +1. Supporting the multi-language (Spanish, French, English, Portuguese, and Arabic). +2. Registering several custom layouts. 3. Calculating the angle between a plane of the US image and a needle and it will be shown in a 3D scene or a 2D viewer. 4. Visualizing the image orientation marker symbol in an US image. -5. Selection of various clinical procedures for training. +5. Selection of various clinical procedures for training. ## Approach and Plan @@ -30,11 +30,11 @@ This project is a next step in the one presented during 28th PW NA-MIC and it ai 1. A multi-language proposal was implemented in a Guidelet-based GUI for Slicer 4.8 using to the module “gettext” in Python. We would like to implement this proposal in SlicerIGT for Slicer 4.10. 2. The design of new custom layouts in Guidelet-based GUIs was implemented in collaboration with the Perklab team. 3. Designing a proposal for the angle between a plane of the US image and a needle. At first, we will study the VTK if there is a filter for this objective or other 3D Slicer modules. -4. Studying the MicrUs SDK to visualize the image orientation marker symbol and it will be implemented in the Plus Toolkit or 3D Slicer. +4. Studying the MicrUs SDK to visualize the image orientation marker symbol and it will be implemented in the Plus Toolkit or 3D Slicer. ## Progress and Next Steps 1. The image orientation marker symbol - - We discussed the image orientation marker symbol with PerkLab team + - We discussed the image orientation marker symbol with PerkLab team - We developed a prototype (see [Figure 6](ImageOrientationMarkerSymbol_Result.png)) - The symbol is a new actor in a viewport (2D scene) - We´ll integrate a stable version in our system @@ -42,25 +42,25 @@ This project is a next step in the one presented during 28th PW NA-MIC and it ai - We worked with Jean-Christophe Fillion-Robin (@jcfr) to improve the current implementation in 3D Slicer - We fixed a bug in CMake file - Creating a Spanish version in a part of the 3D Slicer GUI (see [Figure 7](SpanishVersion3DSlicerGUI.png) ) - - We are working the multilanguage in the python support + - We are working the multilanguage in the python support 1. Angle - We evaluated several strategies: - Module: angle measurement by Andras Lasso - Defining various vectors of the needle and image axes from the MRML - A selected proposal will be developed in outgoing - - + + # Illustrations Guidelet muli-language interface: -Figure1. Language selection for the Guidelet-based GUI +Figure1. Language selection for the Guidelet-based GUI -Figure 2. Guidelet-based GUI in Arabic +Figure 2. Guidelet-based GUI in Arabic @@ -68,7 +68,7 @@ Figure 3. Guidelet-based GUI in Spanish Sketches: - + Figure 4. An angle between a plane of the US image and a needle or a needle model overlays the US image @@ -78,7 +78,7 @@ Figure 5. An image orientation marker symbol (in blue color) **Outcome:** - + Figure 6. A prototype for the image orientation marker symbol (M) and the depth (green rectangles) diff --git a/PW30_2019_GranCanaria/Projects/UpperAirwayAirflowSimulation/README.md b/PW30_2019_GranCanaria/Projects/UpperAirwayAirflowSimulation/README.md index fec16cecf..6f46eb376 100644 --- a/PW30_2019_GranCanaria/Projects/UpperAirwayAirflowSimulation/README.md +++ b/PW30_2019_GranCanaria/Projects/UpperAirwayAirflowSimulation/README.md @@ -10,11 +10,11 @@ Back to [Projects List](../../README.md#ProjectsList) - Endre Vecsernyés (University of Szeged) - Andras Lasso (Queen’s University, Canada) - Jean-Christophe Fillion-Robin (Kitware Inc.) - + # Project Description -We want to do some airflow simulations in a finite element software but we didn't managed to import the 3D upper airway models which we made in the Slicer. - +We want to do some airflow simulations in a finite element software but we didn't managed to import the 3D upper airway models which we made in the Slicer. + ## Objective diff --git a/PW30_2019_GranCanaria/Projects/Useof3DSlicerinTrainig/README.md b/PW30_2019_GranCanaria/Projects/Useof3DSlicerinTrainig/README.md index 1a0870fef..4d3b02f1e 100644 --- a/PW30_2019_GranCanaria/Projects/Useof3DSlicerinTrainig/README.md +++ b/PW30_2019_GranCanaria/Projects/Useof3DSlicerinTrainig/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Using 3D Slicer in University Biomedical Engineering Degrees +# Using 3D Slicer in University Biomedical Engineering Degrees ## Key Investigators @@ -23,7 +23,7 @@ There are many possible approaches to get an understanding of such multidiscipli For example, programming and software engineering skills can be addressed by developing some Python modules within 3D Slicer. Image and data processing algorithms can also be implemented as laboratory classes within it. Image guided therapy applications could give an opportunity to better understand and leverage the convergence of image computing, electric and mechanical engineering around practical problems. The available DICOM support shows in a practical way the need for standardization and how to deal with it in the specific case of medical images. For more specific topics, such as medical imaging, medical image computing, surgical planning or computer-assisted medical intervention 3D Slicer provides an advanced platform where many different engineering aspects can be trained. Moreover, 3D Slicer also provides an excellent tool to teach specific medical topics, both in the medical and the biomedical engineering schools, such as for example, anatomy or some surgeries. -The discussion should be kept as open as possible. Some universities might benefit by using Slicer in only one specific subject, while others could do it for different subjects across a university degree at any level (bachelor, master, PhD). +The discussion should be kept as open as possible. Some universities might benefit by using Slicer in only one specific subject, while others could do it for different subjects across a university degree at any level (bachelor, master, PhD). As a first element for discussion, a draft proposal is provided of a reference bachelor level degree in biomedical engineering. Its structure has been adopted by reviewing programs in different World universities. As a reference model, it should be adapted to the specific needs of each university and environment. @@ -43,4 +43,3 @@ As a first element for discussion, a draft proposal is provided of a reference b 3. Identify the proper role of 3D Slicer to improve biomedical engineering education. 4. Agree on a strategy to provide high quality open access content and identify funding sources to develop it. 5. Share continuously local experiences as they happen. - diff --git a/PW30_2019_GranCanaria/Projects/VisibleHuman/README.md b/PW30_2019_GranCanaria/Projects/VisibleHuman/README.md index 72effd0a1..abbd39f3c 100644 --- a/PW30_2019_GranCanaria/Projects/VisibleHuman/README.md +++ b/PW30_2019_GranCanaria/Projects/VisibleHuman/README.md @@ -11,15 +11,15 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -The Visible Human Project produced a large data set of physical cross sections, MR, and CT of a human cadaver. -This data has been freely available but not redistributable for more almost two decades. Karl Heinz Hoehne from University of Hamburg -segmented the data and made beautiful rendering of the data in his VoxelMan system. He has made his segmentation available for open use, and we hope it +The Visible Human Project produced a large data set of physical cross sections, MR, and CT of a human cadaver. +This data has been freely available but not redistributable for more almost two decades. Karl Heinz Hoehne from University of Hamburg +segmented the data and made beautiful rendering of the data in his VoxelMan system. He has made his segmentation available for open use, and we hope it will allow us to redistribute the original data. -See: +See: * [Visible Human Project home page](https://www.nlm.nih.gov/research/visible/visible_human.html) * [VHP description from Wikipedia](https://en.wikipedia.org/wiki/Visible_Human_Project) -* [VoxelMan gallery](https://www.voxel-man.com/gallery/visible-human/) +* [VoxelMan gallery](https://www.virtual-body.org/gallery/visible-human/) ## Objective diff --git a/PW30_2019_GranCanaria/Projects/ohif_dcm4chee_kubernetes/README.md b/PW30_2019_GranCanaria/Projects/ohif_dcm4chee_kubernetes/README.md index 24ed02359..1d0c421f1 100644 --- a/PW30_2019_GranCanaria/Projects/ohif_dcm4chee_kubernetes/README.md +++ b/PW30_2019_GranCanaria/Projects/ohif_dcm4chee_kubernetes/README.md @@ -6,7 +6,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Jonas Scherer (DKFZ) - Andrey Fedorov (BWH) -- Erik Ziegler (Radical Imaging) +- Erik Ziegler (Radical Imaging) # Project Description @@ -22,7 +22,7 @@ The aim of this project is to provide a simple environment in which a PACS (Dcm4 ## Approach and Plan -1. Setup Minikube with infrastructure components (Calico,Traefik...) +1. Setup Minikube with infrastructure components (Calico,Traefik...) 2. Deploy OHIF and DCM4CHEe 3. Add authentication with Keycloak and Security Proxy 4. Create Helm chart for easy installation @@ -119,12 +119,12 @@ You can find the project deployment in a **[GitHub Repo](https://github.com/jona ```cd /deployment``` ```kubectl apply -f ./* ``` -3) Watch ```kubectl get pods --all-namespaces``` +3) Watch ```kubectl get pods --all-namespaces``` -> all pods should be in the "running" state. This could take some time - don't worry if something is crashing - it will be restarted automatically.. 4) Go to -You should see the the login-page. +You should see the the login-page. The default credentials are: - For the normal login: username: **namic** @@ -155,7 +155,7 @@ To push an example image to DCM4CHE with the **dcm4che-tools docker container**: - Right now, this will just work in Minikube (IPs, ports etc. are hard-coded) - Minikube is pretty slow We should make this compatible with any Kubernetes setup -- OHIF viewer will be updated +- OHIF viewer will be updated - Instructions how to add a tls certificate diff --git a/PW30_2019_GranCanaria/Projects/ohif_web_components/README.md b/PW30_2019_GranCanaria/Projects/ohif_web_components/README.md index 488a05c9a..717f6558e 100644 --- a/PW30_2019_GranCanaria/Projects/ohif_web_components/README.md +++ b/PW30_2019_GranCanaria/Projects/ohif_web_components/README.md @@ -60,7 +60,7 @@ create plugins for OHIF, and also to create custom projects like [prostatecancer 1. Create a HelloWorld Extension with all types of modules and documentation 1. Improve testing and continuous integration 1. DICOM SR HTML Display Extension -1. DICOM TID1500 Measurement Report Table extension +1. DICOM TID1500 Measurement Report Table extension 1. Extract cornerstone plugin and add VTK extension # Background and References diff --git a/PW30_2019_GranCanaria/README.md b/PW30_2019_GranCanaria/README.md index 39218eab2..b43e9657a 100644 --- a/PW30_2019_GranCanaria/README.md +++ b/PW30_2019_GranCanaria/README.md @@ -6,7 +6,7 @@ - + @@ -27,11 +27,11 @@ Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](htt ## Photo Album -This is our second Project Week in Las Palmas. [Photos Project Week 30 in a Google Drive folder](https://drive.google.com/drive/folders/19nuPjpcHGNKOfJZozDwPeHQaYxmU3Mjq?usp=sharing) +This is our second Project Week in Las Palmas. [Photos Project Week 30 in a Google Drive folder](https://drive.google.com/drive/folders/19nuPjpcHGNKOfJZozDwPeHQaYxmU3Mjq?usp=sharing) [Google Photos album](https://photos.app.goo.gl/qv1mgzwbugnhf1iw8) -30 PW NA-MIC in [the media](PW30InTheMedia.md) +30 PW NA-MIC in [the media](PW30InTheMedia.md) First project wek in Las Palmas. [Project Week 28](https://projectweek.na-mic.org/PW28_2018_GranCanaria/) was held in this location as well, and captured in [Photos Project Week 28](https://drive.google.com/drive/folders/1Gh2L26K96d3jzpT7LReaPWAbc1Ao815p?usp=sharing), and [coverage by the local media](../PW28_2018_GranCanaria/PW28InTheMedia.md). diff --git a/PW31_2019_Boston/Breakouts/DataManagement/README.md b/PW31_2019_Boston/Breakouts/DataManagement/README.md index bb67144fb..705e643f7 100644 --- a/PW31_2019_Boston/Breakouts/DataManagement/README.md +++ b/PW31_2019_Boston/Breakouts/DataManagement/README.md @@ -30,4 +30,3 @@ Review options for hosting datasets. # Results of Discussion - diff --git a/PW31_2019_Boston/Breakouts/HumanBrainAtlas/README.md b/PW31_2019_Boston/Breakouts/HumanBrainAtlas/README.md index 684ee6dd6..cf6639a08 100644 --- a/PW31_2019_Boston/Breakouts/HumanBrainAtlas/README.md +++ b/PW31_2019_Boston/Breakouts/HumanBrainAtlas/README.md @@ -11,15 +11,15 @@ Back to [Project Week](../../README.md) # Breakout Description -The goal of the project is to develop and disseminate state-of-the-art, high-resolution full brain anatomical atlases, based on the manual parcellation of 200 MRI images provided by the Human Connectome Project. -This atlas will be made compatible with anatomical nomenclature, easily portable to the majority of neuroscience tools and software platforms, and editable, so other experts can contribute their anatomical knowledge to the tool. +The goal of the project is to develop and disseminate state-of-the-art, high-resolution full brain anatomical atlases, based on the manual parcellation of 200 MRI images provided by the Human Connectome Project. +This atlas will be made compatible with anatomical nomenclature, easily portable to the majority of neuroscience tools and software platforms, and editable, so other experts can contribute their anatomical knowledge to the tool. ## Agenda - 9:00-9:10 – Sylvain Bouix – Welcome – Status Update -- 9:10-9:30 – Nikos Makris – Anatomical Brain Mapping +- 9:10-9:30 – Nikos Makris – Anatomical Brain Mapping - 9:30-9:50 – Jarrett Rushmore – Harvard Oxford Atlas in Human and Macaque - 9:50-10:15 - Discussion - 10:15-10:30 - Break diff --git a/PW31_2019_Boston/Breakouts/Infrastructure/README.md b/PW31_2019_Boston/Breakouts/Infrastructure/README.md index ae06cac89..d04ac1b6e 100644 --- a/PW31_2019_Boston/Breakouts/Infrastructure/README.md +++ b/PW31_2019_Boston/Breakouts/Infrastructure/README.md @@ -3,7 +3,7 @@ Back to [Project Week](../../README.md) # Slicer Infrastructure Brainstorming * 4-5 pm Wednesday Jun 26, 2019 -* Room 32D-407 +* Room 32D-407 ## Organizers @@ -59,13 +59,12 @@ to address new use cases and user communities. - better error / warning messages - on-the-fly resampling to fit in memory * Sweet spot seems to be looking at pyramid encoding options - - file formats like HDF 5 + - file formats like HDF 5 - servers like dicomweb for larger data - there's some precedent but nothing completely standard at the moment - + ## Keyframing * Good to build on ScreenCapture and Sequences * See if we can build something 'simple' something like powerpoint build effects in ScreenCapture * Longer term project to build a nice keyframe/timeline slider (Kitware will follow up with Murat on that) - diff --git a/PW31_2019_Boston/Breakouts/Template.md b/PW31_2019_Boston/Breakouts/Template.md index 98b2f99f5..d126463c2 100644 --- a/PW31_2019_Boston/Breakouts/Template.md +++ b/PW31_2019_Boston/Breakouts/Template.md @@ -29,4 +29,3 @@ Back to [Project Week](../../README.md) # Results of Discussion - diff --git a/PW31_2019_Boston/PreparatoryMeetingsNotes.md b/PW31_2019_Boston/PreparatoryMeetingsNotes.md index b3d69dd94..6b29e711a 100644 --- a/PW31_2019_Boston/PreparatoryMeetingsNotes.md +++ b/PW31_2019_Boston/PreparatoryMeetingsNotes.md @@ -1,4 +1 @@ These are notes from the Project Week Preparation Meetings. - - - diff --git a/PW31_2019_Boston/Projects/BronchoscopeLocalizationFromDepthMaps/README.md b/PW31_2019_Boston/Projects/BronchoscopeLocalizationFromDepthMaps/README.md index 798cfd3d5..5c88d1523 100644 --- a/PW31_2019_Boston/Projects/BronchoscopeLocalizationFromDepthMaps/README.md +++ b/PW31_2019_Boston/Projects/BronchoscopeLocalizationFromDepthMaps/README.md @@ -11,7 +11,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -The goal is to localize a bronchoscope through the use of depth maps generated from bronchoscopy images using neural networks. +The goal is to localize a bronchoscope through the use of depth maps generated from bronchoscopy images using neural networks. ## Objective @@ -34,7 +34,7 @@ The goal is to localize a bronchoscope through the use of depth maps generated f 1. The steps we have already completed is the training and testing of the neural networks used to generate depth maps. -2. We have converted a depth map into a point cloud. +2. We have converted a depth map into a point cloud. 3. We have fixed the issue regarding the size and location of the point cloud relative to its actual position in the phantom lung. 4. We also were able to register the point cloud to the CT scan in Slicer using Model/Surface Registration. 5. The next step is to improve training so the predicted depth maps are more accurate. diff --git a/PW31_2019_Boston/Projects/ClubfootCasts/README.md b/PW31_2019_Boston/Projects/ClubfootCasts/README.md index 16eb82a7c..14125540f 100644 --- a/PW31_2019_Boston/Projects/ClubfootCasts/README.md +++ b/PW31_2019_Boston/Projects/ClubfootCasts/README.md @@ -15,7 +15,7 @@ Back to [Projects List](../../README.md#ProjectsList) About 1-2 in every 1000 babies are born with what's called clubfoot, the most common skeletal deformity in children. Clubfoot is well treated using a plaster casting method developed in the 1960s, but there is a potential to use -3D scanning and printing techniques to make the process more efficient and cost-effective. Also easier activities of +3D scanning and printing techniques to make the process more efficient and cost-effective. Also easier activities of daily living for families with plastic instead of plaster casts. @@ -39,7 +39,7 @@ daily living for families with plastic instead of plaster casts. 1. 3D printing the deformed clubfoot model 1. Repeating the process until printing the normal cast -![Approach](Approach.png) +![Approach](Approach.png) ## Progress and Next Steps @@ -51,7 +51,7 @@ daily living for families with plastic instead of plaster casts. - Ideas: - Using off the shelf 3D scanners - Currently we are using this approach but the result is not good - - Molding the foot and CT scan the mold + - Molding the foot and CT scan the mold - Using motion capture cameras/techniques - Using a parametrized generic 3D model (No need to scan) @@ -68,7 +68,7 @@ daily living for families with plastic instead of plaster casts. |--------|------| | Before | After | -- **3D Print** +- **3D Print** - Cost Effectiveness - Easy to apply/clean/remove @@ -83,8 +83,8 @@ daily living for families with plastic instead of plaster casts. # Files 3D Models: -- [Stage 0](Models/stage0.vtk) -- [Stage 3](Models/stage3.vtk) +- [Stage 0](Models/stage0.vtk) +- [Stage 3](Models/stage3.vtk) # Background and References diff --git a/PW31_2019_Boston/Projects/ConfocalMicroscpy/README.md b/PW31_2019_Boston/Projects/ConfocalMicroscpy/README.md index 40ca5983b..d6fea90e7 100644 --- a/PW31_2019_Boston/Projects/ConfocalMicroscpy/README.md +++ b/PW31_2019_Boston/Projects/ConfocalMicroscpy/README.md @@ -37,7 +37,7 @@ See how Slicer can be used for microscopy images. Click image to see movie: -[![Confocal images in Slicer](http://img.youtube.com/vi/JJJVzvVtwtw/0.jpg)](https://youtu.be/JJJVzvVtwtw "Confocal volume rendering") +[![Confocal images in Slicer](https://img.youtube.com/vi/JJJVzvVtwtw/0.jpg)](https://youtu.be/JJJVzvVtwtw "Confocal volume rendering") # Background and References @@ -49,4 +49,3 @@ Click image to see movie: --> [steve]: https://github.com/pieper - diff --git a/PW31_2019_Boston/Projects/Connect_SPINE_and_XNAT/README.md b/PW31_2019_Boston/Projects/Connect_SPINE_and_XNAT/README.md index 1e17cf515..5d08bc073 100644 --- a/PW31_2019_Boston/Projects/Connect_SPINE_and_XNAT/README.md +++ b/PW31_2019_Boston/Projects/Connect_SPINE_and_XNAT/README.md @@ -20,14 +20,14 @@ Connect SPINE with XNAT. -1. Objective A. Understand the API and data structured exposed by XNAT +1. Objective A. Understand the API and data structured exposed by XNAT 1. Objective B. Define configuration parameters to be used when connecting/replicating data from XNAT to SPINE ## Approach and Plan -1. Understand the API and data structured exposed by XNAT in order to get access and extract data +1. Understand the API and data structured exposed by XNAT in order to get access and extract data 1. Test the API exposed by XNAT in a real scenario 1. Implement a replication protocol to import data from XNAT diff --git a/PW31_2019_Boston/Projects/DICOMImportSpectroscopy/README.md b/PW31_2019_Boston/Projects/DICOMImportSpectroscopy/README.md index 7421eaa8e..7ae903d1d 100644 --- a/PW31_2019_Boston/Projects/DICOMImportSpectroscopy/README.md +++ b/PW31_2019_Boston/Projects/DICOMImportSpectroscopy/README.md @@ -16,7 +16,7 @@ Algorithms have previous been coded in Matlab, and it is proposed their translat ## Objectives -1. To implement a standard uploading mechanism of DICOM images and MR spectroscopic data in FMRSI module +1. To implement a standard uploading mechanism of DICOM images and MR spectroscopic data in FMRSI module ## Approach and Plan @@ -32,7 +32,7 @@ Algorithms have previous been coded in Matlab, and it is proposed their translat ## Illustrations Fig. 1: FMRSI interface - + Fig. 2: Open Dialog - + diff --git a/PW31_2019_Boston/Projects/DICOM_object_for_3D_structures/README.md b/PW31_2019_Boston/Projects/DICOM_object_for_3D_structures/README.md index 9c7814627..ffd93eb7e 100644 --- a/PW31_2019_Boston/Projects/DICOM_object_for_3D_structures/README.md +++ b/PW31_2019_Boston/Projects/DICOM_object_for_3D_structures/README.md @@ -15,9 +15,9 @@ Back to [Projects List](../../README.md#ProjectsList) -1. Create and vizualise in DICOM module 3D distance map as a part of structure set. +1. Create and vizualise in DICOM module 3D distance map as a part of structure set. 2. Additionally have iso-distance levels as structure labelmaps. -1. GUI to select iso-distance levels. +1. GUI to select iso-distance levels. ## Approach and Plan diff --git a/PW31_2019_Boston/Projects/GLSLShaders/README.md b/PW31_2019_Boston/Projects/GLSLShaders/README.md index de122ba72..468d45166 100644 --- a/PW31_2019_Boston/Projects/GLSLShaders/README.md +++ b/PW31_2019_Boston/Projects/GLSLShaders/README.md @@ -24,7 +24,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Progress and Next Steps - Implemented GLSL based bilateral filter -- Started implementing infrastructure required for rendering to multiple targets in order to support GPU growcut algorithm +- Started implementing infrastructure required for rendering to multiple targets in order to support GPU growcut algorithm ### Result | Approach | Result | diff --git a/PW31_2019_Boston/Projects/GithubAsAuthoritativeVersionControlSystem/README.md b/PW31_2019_Boston/Projects/GithubAsAuthoritativeVersionControlSystem/README.md index 120049019..277e02162 100644 --- a/PW31_2019_Boston/Projects/GithubAsAuthoritativeVersionControlSystem/README.md +++ b/PW31_2019_Boston/Projects/GithubAsAuthoritativeVersionControlSystem/README.md @@ -63,4 +63,3 @@ to * [git_list_largest_file_from_history.sh](https://gist.github.com/jcfr/4348af13d2c8931daeab4ff9ab73e14b) * [slicer_git_history_350_largest_files.txt](https://gist.github.com/jcfr/93fe51974d9db8ef55a6d3172c1de68d) * [02_Update_Slicer_CLI_buildsystem_to_download_test_data_from_midas.ipynb](https://github.com/jcfr/jupyter-notebooks/blob/master/02_Update_Slicer_CLI_buildsystem_to_download_test_data_from_midas.ipynb) - diff --git a/PW31_2019_Boston/Projects/Globalization3DSlicer_OHIF/README.md b/PW31_2019_Boston/Projects/Globalization3DSlicer_OHIF/README.md index 572e9a90f..ed1068266 100644 --- a/PW31_2019_Boston/Projects/Globalization3DSlicer_OHIF/README.md +++ b/PW31_2019_Boston/Projects/Globalization3DSlicer_OHIF/README.md @@ -11,9 +11,9 @@ Back to [Projects List](../../README.md#ProjectsList) ## Project Description -The software globalization is the translation process of software from a source language into a target language. The process is divided into two steps: software internationalization (i18n) and software localization (L10n). The former is the task of designing software without a specific location i.e. building it independent of any specific language or culture. The latter is the process of adapting a software for a specific location. +The software globalization is the translation process of software from a source language into a target language. The process is divided into two steps: software internationalization (i18n) and software localization (L10n). The former is the task of designing software without a specific location i.e. building it independent of any specific language or culture. The latter is the process of adapting a software for a specific location. -The aim of this project is to develop/improve the 3D Slicer internationalization (i18n) and localization (l10n) support. This project is a next step in the [one](https://github.com/NA-MIC/ProjectWeek/blob/master/PW30_2019_GranCanaria/Projects/UltrasoundSimulatorTraining/README.md) presented during 30th PW NA-MIC. +The aim of this project is to develop/improve the 3D Slicer internationalization (i18n) and localization (l10n) support. This project is a next step in the [one](https://github.com/NA-MIC/ProjectWeek/blob/master/PW30_2019_GranCanaria/Projects/UltrasoundSimulatorTraining/README.md) presented during 30th PW NA-MIC. Also, we will discuss about the globalization in OHIF. @@ -29,10 +29,10 @@ Also, we will discuss about the globalization in OHIF. 1. Enhancing the contexts for the text displayed in GUI for the translation files (ts). 1. Removing the *QObject::tr* and *q->tr* - 1. Removing the lupdate warnings: + 1. Removing the lupdate warnings: 1. *Cannot invoke tr() like this*. 1. *Class MyClass lacks Q_OBJECT macro*. -1. Designing a WEB page so as to store the language files. +1. Designing a WEB page so as to store the language files. 1. Developing the i18n support for scripted module. ## Progress and Next Steps @@ -47,7 +47,7 @@ Also, we will discuss about the globalization in OHIF. * [r28344](http://viewvc.slicer.org/viewvc.cgi/Slicer4?view=revision&revision=28344): ENH: i18n: Add support for node combox, 3d view controller and qSlicerApp * [r28345](http://viewvc.slicer.org/viewvc.cgi/Slicer4?view=revision&revision=28345): STYLE: Remove translation files with old-style suffix 1. Support for i18n was **improved**. - * See the [PR-1162: improve-i18n-support](https://github.com/Slicer/Slicer/pull/1162) + * See the [PR-1162: improve-i18n-support](https://github.com/Slicer/SlicerGitSVNArchive/pull/1162) * Support for translating scripted modules (`slicer.i18n.tr()`) * Generation of `_untranslated.ts` files * Add targets `GenerateSlicerTranslationTemplates` and `GenerateSlicerTranslationQMFiles` @@ -63,15 +63,15 @@ Also, we will discuss about the globalization in OHIF. ## Illustrations Fig. 1: Spanish Slicer GUI - + Fig. 2: French Slicer GUI - + ## Background and References - [Translated Files](https://mt4sd.github.io/SlicerTranslatedFiles/) - [3D Slicer I18n support](https://www.slicer.org/wiki/Documentation/Labs/I18N) -- [Enabling I18n support ](https://discourse.slicer.org/t/slicer-internationalization/579) +- [Enabling I18n support ](https://discourse.slicer.org/t/slicer-internationalization/579) - [Globalization source code](https://github.com/mt4sd/Slicer/tree/support_i18n_l10n) - [Forgot tr](https://doc.qt.io/archives/qq/qq03-swedish-chef.html) - [Qt internationalization](https://doc.qt.io/qt-5/internationalization.html) diff --git a/PW31_2019_Boston/Projects/InferenceEnginesInCustusX/README.md b/PW31_2019_Boston/Projects/InferenceEnginesInCustusX/README.md index 3e52eaf2c..207122889 100644 --- a/PW31_2019_Boston/Projects/InferenceEnginesInCustusX/README.md +++ b/PW31_2019_Boston/Projects/InferenceEnginesInCustusX/README.md @@ -22,12 +22,12 @@ Running trained Deep Learning networks with inference engines. The focus will be -1. Use the [FAST](https://github.com/smistad/FAST) library for inference engine support. +1. Use the [FAST](https://github.com/smistad/FAST) library for inference engine support. ## Progress and Next Steps -The task of implementing support for multiple inference engines proved too large for Project Week. +The task of implementing support for multiple inference engines proved too large for Project Week. We ended up using the [OpenVINO Toolkit](https://docs.openvinotoolkit.org/) directly. The OpenVINO inference engine allows us to run the trained networks on the various Intel devices (CPU, GPU, FPFA, Movidius Stick, ...), so this choice still provides us with a decent multi-platform solution. @@ -49,4 +49,3 @@ Currently we got several research projects where deep learning networks are crea We want to be able to run these networks from inside CustusX to allow a more seamless integration in the OR. Some projects require the deep learning networks to run in real time, and in these cases they will need to run them on inference engines. [Video: Highlighting nerves and blood vessels on ultrasound images](https://youtu.be/06HTxmmu0mg) - diff --git a/PW31_2019_Boston/Projects/NeuroSegmentation/README.md b/PW31_2019_Boston/Projects/NeuroSegmentation/README.md index f698a60bd..8e5932722 100644 --- a/PW31_2019_Boston/Projects/NeuroSegmentation/README.md +++ b/PW31_2019_Boston/Projects/NeuroSegmentation/README.md @@ -16,7 +16,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Objective -1. Design and create a dedicated neuroanatomy segmentation module +1. Design and create a dedicated neuroanatomy segmentation module 1. Adopt an approach for sulcal definition ## Approach and Plan @@ -40,7 +40,7 @@ Back to [Projects List](../../README.md#ProjectsList) - New functionalities for [Surface Assisted Cortical Parcellation](https://www.dropbox.com/s/go5q8p49akuj4p8/CMA-SurfaceAssistedCorticalParcellation-Manual.pdf?dl=0) 1. Identified requirements to draw lines in sulcal pits -1. Presented the underlying principles for cortical parcellation (cortical ribbon is being parcellted using a combination of fiducial landmarks representing a set of planes and sulcal lines) +1. Presented the underlying principles for cortical parcellation (cortical ribbon is being parcellted using a combination of fiducial landmarks representing a set of planes and sulcal lines) # Illustrations ![](Segmentation1.png) diff --git a/PW31_2019_Boston/Projects/OHIFGLSLPlugins/README.md b/PW31_2019_Boston/Projects/OHIFGLSLPlugins/README.md index add034a72..60630ef27 100644 --- a/PW31_2019_Boston/Projects/OHIFGLSLPlugins/README.md +++ b/PW31_2019_Boston/Projects/OHIFGLSLPlugins/README.md @@ -37,9 +37,9 @@ Make use of the OHIF plugin infrstructure. ![Example of GLSL GrowCut](GLSLGrowCut.png) -* Prototype implementation: +* Prototype implementation: -[![GrowCut prototype](http://img.youtube.com/vi/xmvVyftgNjY/0.jpg)](http://www.youtube.com/watch?v=xmvVyftgNjY "GrowCut prototype") +[![GrowCut prototype](https://img.youtube.com/vi/xmvVyftgNjY/0.jpg)](https://www.youtube.com/watch?v=xmvVyftgNjY "GrowCut prototype") # Background and References @@ -64,4 +64,3 @@ Make use of the OHIF plugin infrstructure. [ohif-extensions]: https://docs.ohif.org/advanced/extensions.html [ohif]: http://ohif.org/ [james-magic]: https://github.com/JamesAPetts/OHIF-Viewer-XNAT/tree/xnatRoi-dev-vNext/Packages/icr-peppermint-tools - diff --git a/PW31_2019_Boston/Projects/OpenJPEG-JS/README.md b/PW31_2019_Boston/Projects/OpenJPEG-JS/README.md index cee45e8d0..f3557e8e9 100644 --- a/PW31_2019_Boston/Projects/OpenJPEG-JS/README.md +++ b/PW31_2019_Boston/Projects/OpenJPEG-JS/README.md @@ -58,4 +58,3 @@ There aren't any screenshots for cross-compiled code. [james]: https://github.com/jamesapetts [erik]: https://github.com/swederik [steve]: https://github.com/pieper - diff --git a/PW31_2019_Boston/Projects/PythonPackages/README.md b/PW31_2019_Boston/Projects/PythonPackages/README.md index a4d082ba5..a7efb93af 100644 --- a/PW31_2019_Boston/Projects/PythonPackages/README.md +++ b/PW31_2019_Boston/Projects/PythonPackages/README.md @@ -31,7 +31,7 @@ The objective is to have a consistent interface / process for installing patyhon 3. Document experience installing and using various python packages in Slicer * which ones work well with no problems * what properties of some packages lead to problems (e.g. conflicting dependencies) - + ## Progress and Next Steps diff --git a/PW31_2019_Boston/Projects/ROS-MED/README.md b/PW31_2019_Boston/Projects/ROS-MED/README.md index e228199fd..a4e783fa6 100644 --- a/PW31_2019_Boston/Projects/ROS-MED/README.md +++ b/PW31_2019_Boston/Projects/ROS-MED/README.md @@ -9,7 +9,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description We will extend the tutorial on ROS-MED, a platform for medical robotics research based on ROS and 3D Slicer. -Specifically, we will find out a way to import the robot model from CAD software. We will use a needle guide manipulator for MR-guided prostate biopsy (Smart Template) as an example project. +Specifically, we will find out a way to import the robot model from CAD software. We will use a needle guide manipulator for MR-guided prostate biopsy (Smart Template) as an example project. Details of ROS-MED can be found in [our recent workshop at International Symposium on Medical Robotics](https://rosmed.github.io/) diff --git a/PW31_2019_Boston/Projects/RealSensePatientRegistration/README.md b/PW31_2019_Boston/Projects/RealSensePatientRegistration/README.md index 56ec11013..9cdd8db24 100644 --- a/PW31_2019_Boston/Projects/RealSensePatientRegistration/README.md +++ b/PW31_2019_Boston/Projects/RealSensePatientRegistration/README.md @@ -10,9 +10,10 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -The goal of this project is to develop a module in Slicer for registering [Intel RealSense][realsense] depth data to a CT scan of a patient's -head and maintain this registration using an optical marker. The module will use facial surface anatomy to perform initial -registration between the RealSense and a CT scan. An optical marker will be rigidly fixed to the patient's head within the +The goal of this project is to develop a module in Slicer for registering [Intel RealSense][realsense] depth data to a CT scan of a patient's +head and maintain this +tion using an optical marker. The module will use facial surface anatomy to perform initial +registration between the RealSense and a CT scan. An optical marker will be rigidly fixed to the patient's head within the camera's field of view and used to preserve registration when the patient's face is no longer visible. ## Objective @@ -25,7 +26,7 @@ Objective B: Maintain patient registration after face is no longer visible using -1. Acquire depth images from the RealSense in Slicer and use them to generate a point cloud +1. Acquire depth images from the RealSense in Slicer and use them to generate a point cloud 2. Determine how to stream these depth images and calculate the point cloud in real time 3. Use a captured point cloud to verify the accuracy of the registration to a model 4. Use Slicer to track an optical marker defined rigidly relative to the real time point cloud @@ -37,7 +38,7 @@ Objective B: Maintain patient registration after face is no longer visible using 1. Created Slicer module for generating a point cloud from a depth image 2. Demonstrated streaming of real-time point clouds from depth images captured by the RealSense -3. Investigated accuracy of Model Registration module for registering captured point clouds to dense models +3. Investigated accuracy of [Model Registration module](https://github.com/SlicerIGT/SlicerIGT/blob/master/ModelRegistration/ModelRegistration.py) for registering captured point clouds to dense models 4. Explored RealSense parameters to find settings best suited to close range facial scanning # Illustrations @@ -71,3 +72,4 @@ Objective B: Maintain patient registration after face is no longer visible using - [DepthImageToPointCloud](https://github.com/PerkLab/DepthImageToPointCloud): Slicer extension to display point cloud data streamed from IntelRealsense camera - [Intel RealSense](https://www.intelrealsense.com/stereo-depth) +- [SlicerIGT extension](https://www.slicerigt.org/) - [Model Registration module](https://github.com/SlicerIGT/SlicerIGT/blob/master/ModelRegistration/ModelRegistration.py) diff --git a/PW31_2019_Boston/Projects/RealSensePatientRegistration/SlicerPointCloudTracking/DepthImageToPointCloud/DepthImageToPointCloud.py b/PW31_2019_Boston/Projects/RealSensePatientRegistration/SlicerPointCloudTracking/DepthImageToPointCloud/DepthImageToPointCloud.py index 306ae6d0d..b0668618f 100644 --- a/PW31_2019_Boston/Projects/RealSensePatientRegistration/SlicerPointCloudTracking/DepthImageToPointCloud/DepthImageToPointCloud.py +++ b/PW31_2019_Boston/Projects/RealSensePatientRegistration/SlicerPointCloudTracking/DepthImageToPointCloud/DepthImageToPointCloud.py @@ -69,10 +69,10 @@ def onSelect(self): def onApplyButton(self): logic = DepthImageToPointCloudLogic() - cameraParams = {"focalLength" : self.ui.focalLength.value, + cameraParams = {"focalLength" : self.ui.focalLength.value, "principlePointX" : self.ui.principlePointX.value, "principlePointY" : self.ui.principlePointY.value} - pointCloudParams = {"thresholdLower" : self.ui.thresholdLower.value, + pointCloudParams = {"thresholdLower" : self.ui.thresholdLower.value, "thresholdUpper" : self.ui.thresholdUpper.value, "depthScale" : self.ui.depthScale.value} logic.run(self.ui.inputSelector.currentNode(), self.ui.outputSelector.currentNode(), cameraParams, pointCloudParams) @@ -132,13 +132,13 @@ def depthDataCallback(self,caller, eventId): polyData = generatePolyData(listOfPoints) pointCloudModel.SetAndObservePolyData(polyData) - {"focalLength" : self.ui.focalLength.value, + {"focalLength" : self.ui.focalLength.value, "principlePointX" : self.ui.principlePointX.value, "principlePointY" : self.ui.principlePointY.value} #Takes a dim[0] by dim[1] numpy array, and returns a list of [X Y Z] coordinates def generate_pointcloud(self,depthData, cameraParams, pointCloudParams): - points = [] + points = [] for i in range(0,depthData.shape[0],1): for j in range(0,depthData.shape[1],1): Z = depthData[i,j] / pointCloudParams['depthScale'] @@ -177,7 +177,7 @@ def run(self, inputVolume, outputVolume, cameraParams, pointCloudParams): inputImage = inputVolume.GetImageData() imageDims = inputImage.GetDimensions()[0:2] - + #Extract the vtkImageData from the vtkMRMLScalarVolumeNode extractFilterDepth = vtk.vtkImageExtractComponents() extractFilterDepth.SetInputData(inputImage) @@ -194,7 +194,7 @@ def run(self, inputVolume, outputVolume, cameraParams, pointCloudParams): polyData = self.generatePolyData(listOfPoints) outputVolume.SetAndObservePolyData(polyData) - + logging.info('Processing completed') return True diff --git a/PW31_2019_Boston/Projects/SegOverlay_ProstateCancerAI/README.md b/PW31_2019_Boston/Projects/SegOverlay_ProstateCancerAI/README.md index f47157817..9e13e41d0 100644 --- a/PW31_2019_Boston/Projects/SegOverlay_ProstateCancerAI/README.md +++ b/PW31_2019_Boston/Projects/SegOverlay_ProstateCancerAI/README.md @@ -9,7 +9,7 @@ Back to [Projects List](../../README.md#ProjectsList) - [Steve Pieper][steve] ([Isomics, Inc.][Isomics]) - [Erik Ziegler][erik] ([Radical Imaging][radical]) - [Danny Brown][danny] ([Radical Imaging][radical]) -- [Anneke Meyer][anneke] +- [Anneke Meyer][anneke] - [Alireza Sedghi][alireza] ([Medical Informatics Lab][med-i-lab]) - [Parvin Mousavi][parvin] ([Medical Informatics Lab][med-i-lab]) diff --git a/PW31_2019_Boston/Projects/SegmentationStatisticsSpectroscopy/README.md b/PW31_2019_Boston/Projects/SegmentationStatisticsSpectroscopy/README.md index 6c456c355..5b5d5a12e 100644 --- a/PW31_2019_Boston/Projects/SegmentationStatisticsSpectroscopy/README.md +++ b/PW31_2019_Boston/Projects/SegmentationStatisticsSpectroscopy/README.md @@ -22,7 +22,7 @@ Algorithms have previous been coded in Matlab, and it is proposed their translat 1. Implementing highlighting of brain cortex areas related to voxel data taken from magnetic resonance spectroscopy (MRS) files. 1. Implementing and verifying the mechanism for data exchanging between time-resolved module and quantification serviced located in a secure external server. -1. Display time-resolved metabolite concentrations on a plot chart +1. Display time-resolved metabolite concentrations on a plot chart ## Progress and Next Steps 1. Assessment of different options to implement graphic charts, including importing matplotlib library to generate complex graphics @@ -31,18 +31,16 @@ Algorithms have previous been coded in Matlab, and it is proposed their translat ## Illustrations Fig. 1: FMRSI interface - + Fig. 2: Open Dialog - + Fig. 3: Quantification of average spectrum - + -Fig. 4: Time-resolved quantification - +Fig. 4: Time-resolved quantification + Fig. 5: Selected metabolite time serie - - - + diff --git a/PW31_2019_Boston/Projects/SlicerCIP/README.md b/PW31_2019_Boston/Projects/SlicerCIP/README.md index 3860927a9..dfe57da60 100644 --- a/PW31_2019_Boston/Projects/SlicerCIP/README.md +++ b/PW31_2019_Boston/Projects/SlicerCIP/README.md @@ -30,7 +30,7 @@ Migrate the SlicerCIP extension to the last Slicer stable release, and "pave the 1. SlicerCIP is now available in the Slicer 4.10 Release. -2. Pre-packaged SlicerCIP is also available in chestimagingplatform.org +2. Pre-packaged SlicerCIP is also available in chestimagingplatform.org # Illustrations ![Chest Imaging Platform website](SlicerCIP.png) diff --git a/PW31_2019_Boston/Projects/SlicerFEniCS/README.md b/PW31_2019_Boston/Projects/SlicerFEniCS/README.md index 2d7d29405..4b3190935 100644 --- a/PW31_2019_Boston/Projects/SlicerFEniCS/README.md +++ b/PW31_2019_Boston/Projects/SlicerFEniCS/README.md @@ -31,7 +31,7 @@ is to integrate FEniCS into a Slicer extension. 1. Install FEniCS Python packages inside Slicer. 1. Run FEniCS demo inside Slicer. -1. Create Slicer FEniCS extension. +1. Create Slicer FEniCS extension. ## Progress and Next Steps diff --git a/PW31_2019_Boston/Projects/SlicerFEniCS/slicer-fenics-install.md b/PW31_2019_Boston/Projects/SlicerFEniCS/slicer-fenics-install.md index 0826cac36..096a3bc08 100644 --- a/PW31_2019_Boston/Projects/SlicerFEniCS/slicer-fenics-install.md +++ b/PW31_2019_Boston/Projects/SlicerFEniCS/slicer-fenics-install.md @@ -14,7 +14,7 @@ Set paths to Slicer and FEniCS MY_SLICER_DIR=/opt/slicer/Slicer-4.11.0-2019-06-24-linux-amd64/ MY_FENICS_SRC=~/projects/fenics/fenics-2019.1.0/ MY_FENICS_DIR=/opt/fenics/fenics-2019.1.0/ - + Set Python dependency versions MPI4PY_VERSION="==3.0.1" @@ -38,7 +38,7 @@ Download and install Python header files Set DOLFIN environment variables source ${MY_FENICS_DIR}/share/dolfin/dolfin.conf - + Install Python dependencies ${MY_SLICER_DIR}/bin/PythonSlicer -m pip install mpi4py${MPI4PY_VERSION} diff --git a/PW31_2019_Boston/Projects/TouchAndPenInteractions/README.md b/PW31_2019_Boston/Projects/TouchAndPenInteractions/README.md index d18a298ec..f58f000d5 100644 --- a/PW31_2019_Boston/Projects/TouchAndPenInteractions/README.md +++ b/PW31_2019_Boston/Projects/TouchAndPenInteractions/README.md @@ -46,5 +46,5 @@ Implement and integrate robust interactions using both touchscreen gestures and # Background and References -- [WIP Slicer branch](https://github.com/Sunderlandkyl/Slicer/tree/gesture_interaction4) +- [WIP Slicer branch](https://github.com/Sunderlandkyl/Slicer/tree/gesture_interaction4) - [WIP VTK branch](https://github.com/Sunderlandkyl/VTK/tree/slicer_qt_gestures2) diff --git a/PW31_2019_Boston/Projects/UltrasoundFeatureCorrespondence/README.md b/PW31_2019_Boston/Projects/UltrasoundFeatureCorrespondence/README.md index 7adeb3166..e6e4edcd6 100644 --- a/PW31_2019_Boston/Projects/UltrasoundFeatureCorrespondence/README.md +++ b/PW31_2019_Boston/Projects/UltrasoundFeatureCorrespondence/README.md @@ -12,7 +12,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -The overarching goal of this project is to use pre- and intra-operative imaging to help neurosurgeons determine whether gross total resection has been achieved. +The overarching goal of this project is to use pre- and intra-operative imaging to help neurosurgeons determine whether gross total resection has been achieved. ## Objective @@ -44,9 +44,9 @@ The overarching goal of this project is to use pre- and intra-operative imaging ![Description of picture](Example2.jpg) ![Some more images](Example2.jpg) --> -1. Picture of the fiducial on the T2 pre-operative MRI image located on a sulcus +1. Picture of the fiducial on the T2 pre-operative MRI image located on a sulcus ![Picture of the fiducial on the T2 pre-operative MRI image located on a sulcus](ultrasound1.png) -2. Picture of the pre-dura Ultrasound scan on top of the MRI scan showing the same fiducial on the sulcus +2. Picture of the pre-dura Ultrasound scan on top of the MRI scan showing the same fiducial on the sulcus ![Picture of the pre-dura Ultrasound scan on top of the MRI scan showing the same fiducial on the sulcus](ultrasound2.png) 3. Screenshot of the pre-dura ultrasound with the fiducial on the sulcus ![Screenshot of the pre-dura ultrasound with the fiducial on the sulcus](ultrasound3.png) diff --git a/PW31_2019_Boston/Projects/Virtualrealityinterfaceimprovement/README.md b/PW31_2019_Boston/Projects/Virtualrealityinterfaceimprovement/README.md index 337bf39b8..60c764ad0 100644 --- a/PW31_2019_Boston/Projects/Virtualrealityinterfaceimprovement/README.md +++ b/PW31_2019_Boston/Projects/Virtualrealityinterfaceimprovement/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Virtual reality interface improvement +# Virtual reality interface improvement ## Key Investigators diff --git a/PW31_2019_Boston/Projects/VolumeRenderingImprovements/README.md b/PW31_2019_Boston/Projects/VolumeRenderingImprovements/README.md index 3728944d5..eb13ed271 100644 --- a/PW31_2019_Boston/Projects/VolumeRenderingImprovements/README.md +++ b/PW31_2019_Boston/Projects/VolumeRenderingImprovements/README.md @@ -10,7 +10,7 @@ Back to [Projects List](../../README.md#ProjectsList) - [Matthew Jolley](http://www.chop.edu/doctors/jolley-matthew-a) (Children's Hospital of Philadelphia) - [Steve Pieper](http://www.spl.harvard.edu/pages/People/pieper) (Isomics) - [Csaba Pinter](http://perk.cs.queensu.ca/users/pinter) (Queen's University, Canada) -- [Simon Drouin](http://nist.mni.mcgill.ca/?page_id=369) (Montreal Neurological Institute, Canada) +- [Simon Drouin](http://nist.mni.mcgill.ca/?page_id=369) (Montreal Neurological Institute, Canada) # Project Description diff --git a/PW31_2019_Boston/Projects/WMA/README.md b/PW31_2019_Boston/Projects/WMA/README.md index e1dd75881..37081a5a1 100644 --- a/PW31_2019_Boston/Projects/WMA/README.md +++ b/PW31_2019_Boston/Projects/WMA/README.md @@ -23,8 +23,8 @@ Software for data-driven white matter parcellation 1. Improve user documentation and results checking documentation based on user feedback. 1. Investigate alternatives for documentation. 1. Create batch script to run entire pipeline. -1. Update all versions of documentation; remove unused. -1. http://dmri.slicer.org/whitematteranalysis/ +1. Update all versions of documentation; remove unused. +1. http://dmri.slicer.org/whitematteranalysis/ 1. https://projects.iq.harvard.edu/whitematteranalysis/publications ## Approach and Plan @@ -40,7 +40,7 @@ Software for data-driven white matter parcellation 1. Batch script for running the fiber clustering pipeline is done. -1. Tested by mutltiple collaborators. +1. Tested by mutltiple collaborators. 1. Documentation of step-by-step instructions is done. It currently in a Google Doc and needs to be released donline soon. # Illustrations diff --git a/PW31_2019_Boston/Projects/WMAAging/README.md b/PW31_2019_Boston/Projects/WMAAging/README.md index 829ef3b2a..cb014230f 100644 --- a/PW31_2019_Boston/Projects/WMAAging/README.md +++ b/PW31_2019_Boston/Projects/WMAAging/README.md @@ -33,9 +33,9 @@ Back to [Projects List](../../README.md#ProjectsList) -1. The team has made substantial progress on integration UKF tractography and WM labeling tools (BWH team) with existing functionality to identify cerebral microbleeds (USC team). -2. An approach to identify, analyze and quantify changes in FA along major WM fasciculi of the brain was agreed upon and prototyped. -3. Testing and validation of integrated workflows was performed and the two teams made progress on identifying goals for future collaboration. +1. The team has made substantial progress on integration UKF tractography and WM labeling tools (BWH team) with existing functionality to identify cerebral microbleeds (USC team). +2. An approach to identify, analyze and quantify changes in FA along major WM fasciculi of the brain was agreed upon and prototyped. +3. Testing and validation of integrated workflows was performed and the two teams made progress on identifying goals for future collaboration. # Illustrations @@ -49,8 +49,6 @@ Back to [Projects List](../../README.md#ProjectsList) # Background and References -1. Di Fan, Nikhil N. Chaudhari, Kenneth A. Rostowsky, Maria Calvillo, Sean K. Lee, Nahian F. Chowdhury, Fan Zhang, Lauren J. O’Donnell and Andrei Irimia. Post-Traumatic Cerebral Microhemorrhages and Their Effects upon White Matter Connectivity in the Aging Human Brain[C]. 41th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC 2019). -2. Isaiah Norton, Ibn Essayed, Fan Zhang, Sonia Pujol, Alex Yarmarkovich, Alexandra Golby, Gordon Kindlmann, Demian Wasserman, Raul San José Estepar, Yogesh Rathi, Steve Pieper, Ron Kikinis, Hans Johnson, Carl-Fredrik Westin, and Lauren J. O'Donnell. 2017. “SlicerDMRI: Open Source Diffusion MRI Software for Brain Cancer Research.” Cancer Research, 77, 21, Pp. e101–103. +1. Di Fan, Nikhil N. Chaudhari, Kenneth A. Rostowsky, Maria Calvillo, Sean K. Lee, Nahian F. Chowdhury, Fan Zhang, Lauren J. O’Donnell and Andrei Irimia. Post-Traumatic Cerebral Microhemorrhages and Their Effects upon White Matter Connectivity in the Aging Human Brain[C]. 41th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC 2019). +2. Isaiah Norton, Ibn Essayed, Fan Zhang, Sonia Pujol, Alex Yarmarkovich, Alexandra Golby, Gordon Kindlmann, Demian Wasserman, Raul San José Estepar, Yogesh Rathi, Steve Pieper, Ron Kikinis, Hans Johnson, Carl-Fredrik Westin, and Lauren J. O'Donnell. 2017. “SlicerDMRI: Open Source Diffusion MRI Software for Brain Cancer Research.” Cancer Research, 77, 21, Pp. e101–103. 3. Fan Zhang, Ye Wu, Isaiah Norton, Laura Rigolo, Yogesh Rathi, Nikos Makris, and Lauren J. O'Donnell. 2018. “An anatomically curated fiber clustering white matter atlas for consistent white matter tract parcellation across the lifespan.” NeuroImage, 179, Pp. 429-447. - - diff --git a/PW31_2019_Boston/Projects/cardiacdeviceplacementsimulation/README.md b/PW31_2019_Boston/Projects/cardiacdeviceplacementsimulation/README.md index b81ff68d0..a319d5b8e 100644 --- a/PW31_2019_Boston/Projects/cardiacdeviceplacementsimulation/README.md +++ b/PW31_2019_Boston/Projects/cardiacdeviceplacementsimulation/README.md @@ -11,7 +11,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -Pre-operative device placement holds a potential to characterize the anatomical relationship of cardiac device (valve/catheter) to the surrounding structures before actual surgery. The deformation and displacement of the device across the cardia cycle can also be assesed pre-operatively. +Pre-operative device placement holds a potential to characterize the anatomical relationship of cardiac device (valve/catheter) to the surrounding structures before actual surgery. The deformation and displacement of the device across the cardia cycle can also be assesed pre-operatively. ## Objective diff --git a/PW31_2019_Boston/Projects/kidneyAtlas/README.md b/PW31_2019_Boston/Projects/kidneyAtlas/README.md index 9c2f74774..3e9e2cc56 100644 --- a/PW31_2019_Boston/Projects/kidneyAtlas/README.md +++ b/PW31_2019_Boston/Projects/kidneyAtlas/README.md @@ -6,7 +6,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Key Investigators - Babacar Diao (Ecole Militaire de Santé Dakar - Senegal) -- Michael Halle (Brigham and Women's Hospital) +- Michael Halle (Brigham and Women's Hospital) - Nayra Pumar (ULPGC – GTMA-IUIBS - MACbioIDi) - Juan Ruiz-Alzola (ULPGC - GTMA-IUIBS - MACbioIDi) @@ -42,7 +42,7 @@ This project focuses on the development of an anatomical atlas of the abdomen an Final version of the segmentation - + Language trasnlation and TA termn ## Background and References diff --git a/PW31_2019_Boston/README.md b/PW31_2019_Boston/README.md index e923b6d86..cc59e27f3 100644 --- a/PW31_2019_Boston/README.md +++ b/PW31_2019_Boston/README.md @@ -11,11 +11,11 @@ View this page on [https://projectweek.na-mic.org/PW31_2019_Boston/](https://pro ## Logistics - **Dates:** June 24-28, 2019 -- **Location:** MIT Bldg. 34, room numbers 401A&B (Grier conference room), 50 Vassar Street, Cambridge MA +- **Location:** MIT Bldg. 34, room numbers 401A&B (Grier conference room), 50 Vassar Street, Cambridge MA - **Hotels:** No rooms are reserved. Please ask on [discourse forum](https://discourse.slicer.org/c/community/project-week) if you have questions. - **Registration**: Fee is USD 350. Register using [this link](https://www.regonline.com/registration/Checkin.aspx?EventID=2555870). - [Frequently Asked Questions](https://projectweek.na-mic.org/#frequently-asked-quesions) -- **Slack Channel:** [Invite Link](https://join.slack.com/t/namic-projectweek/shared_invite/enQtNjY5MDEwMDMxMDcyLTkyNjA4MmQxMjFkZWNmMTMwNjliYzk5ZGExZTdiNmJlZWFjNzE0MWRiMmYzMTIzNzk0NDVkNWM3MGIzMDlkMTU) +- **Slack Channel:** [Invite Link](https://join.slack.com/t/namic-projectweek/shared_invite/enQtNjY5MDEwMDMxMDcyLTkyNjA4MmQxMjFkZWNmMTMwNjliYzk5ZGExZTdiNmJlZWFjNzE0MWRiMmYzMTIzNzk0NDVkNWM3MGIzMDlkMTU) ## Projects [(How to add a new project?)](Projects/README.md) @@ -130,9 +130,9 @@ List of registered participants so far (names will be added here after processin 1. COLTON BARR , PERK LAB, Queens, Canada 1. OLE VEGARD SOLBERG , SINTEF, Trondheim, Norway 1. JANNE BEATE BAKENG , SINTEF, Trondheim, Norway -1. POLINA GOLLAND , MIT -1. MAZDAK ABULNAGA , MIT -1. RUIZHI LIAO , MIT +1. POLINA GOLLAND , MIT +1. MAZDAK ABULNAGA , MIT +1. RUIZHI LIAO , MIT 1. DANIEL BROWN , RADICAL IMAGING 1. JAMES PETTS , THE INSTITUTE OF CANCER RESEARCH, LONDON 1. GREGORY SHARP , MASSACHUSETTS GENERAL HOSPITAL @@ -143,7 +143,7 @@ List of registered participants so far (names will be added here after processin 1. STEVE PIEPER , ISOMICS, INC. 1. OMAR TOUTOUNJI , MED-I-LAB, Queens, Canada 1. SYLVAIN BOUIX , BRIGHAM AND WOMEN'S HOSPITAL -1. DANIELLE PACE , MIT +1. DANIELLE PACE , MIT 1. JARRETT RUSHMORE , BOSTON UNIVERSITY SCHOOL OF MEDICINE 1. BENJAMIN ZWICK , INTELLIGENT SYSTEM FOR MEDICINE LABORATORY, THE UNIVERSITY OF WESTERN AUSTRALIA 1. ADAM WITTEK , INTELLIGENT SYSTEM FOR MEDICINE LABORATORY, THE UNIVERSITY OF WESTERN AUSTRALIA @@ -205,4 +205,3 @@ List of registered participants so far (names will be added here after processin 1. SARAH FRISKEN, BRIGHAM AND WOMEN'S HOSPITAL 1. SRINIVAS SRIDHAR, NORTHEASTERN UNIVERSITY - diff --git a/PW32_2019_London_Canada/PreparatoryMeetingsNotes.md b/PW32_2019_London_Canada/PreparatoryMeetingsNotes.md index 1ebfd6664..e3a803b4e 100644 --- a/PW32_2019_London_Canada/PreparatoryMeetingsNotes.md +++ b/PW32_2019_London_Canada/PreparatoryMeetingsNotes.md @@ -1,7 +1,6 @@ These are notes from the 32nd Project Week Preparation Meetings. -## Meeting #1: +## Meeting #1: ### Attendees ### - diff --git a/PW32_2019_London_Canada/Projects/BronchoscopeLocalizationFromDepthMaps/README.md b/PW32_2019_London_Canada/Projects/BronchoscopeLocalizationFromDepthMaps/README.md index e4a83d17f..8f40e7edb 100644 --- a/PW32_2019_London_Canada/Projects/BronchoscopeLocalizationFromDepthMaps/README.md +++ b/PW32_2019_London_Canada/Projects/BronchoscopeLocalizationFromDepthMaps/README.md @@ -35,10 +35,10 @@ The goal is to localize a bronchoscope through the use of depth maps generated f 4. Boolean operations to limit ICP registration to target area (with boolean operations performed using an instance of Blender imported into Slicer as a Python module) 5. Module created in Slicer 6. Boolean operations turned out to not be necessary and former issues were caused by a bug -7. Video of depth map tracking: -[![ ](http://img.youtube.com/vi/kM40rWXsx_k/0.jpg)](http://www.youtube.com/watch?v=kM40rWXsx_k) -8. Video of Blender Boolean operations within Slicer: -[![ ](http://img.youtube.com/vi/HDNilepxJLI/0.jpg)](http://www.youtube.com/watch?v=HDNilepxJLI) +7. Video of depth map tracking: +[![ ](https://img.youtube.com/vi/kM40rWXsx_k/0.jpg)](https://www.youtube.com/watch?v=kM40rWXsx_k) +8. Video of Blender Boolean operations within Slicer: +[![ ](https://img.youtube.com/vi/HDNilepxJLI/0.jpg)](https://www.youtube.com/watch?v=HDNilepxJLI) ## Installing and Importing Blender within Slicer diff --git a/PW32_2019_London_Canada/Projects/CTDiaphragmSegmentation(COPD)/README.md b/PW32_2019_London_Canada/Projects/CTDiaphragmSegmentation(COPD)/README.md index e05cc61a6..11d342190 100644 --- a/PW32_2019_London_Canada/Projects/CTDiaphragmSegmentation(COPD)/README.md +++ b/PW32_2019_London_Canada/Projects/CTDiaphragmSegmentation(COPD)/README.md @@ -3,21 +3,21 @@ Back to [Projects List](../../README.md#ProjectsList) ## Automated CT diaphragm muscle segementation ## Key Investigators -- Adamo Donovan (McGill University) -- Dr. Benjamin McDonald Smith (McGill University) +- Adamo Donovan (McGill University) +- Dr. Benjamin McDonald Smith (McGill University) - Investigator 3 (Affiliation) # Project Description -Manual diaphragm segmentation requires 2 hours per left hemi-diaphragm for a trained rater. +Manual diaphragm segmentation requires 2 hours per left hemi-diaphragm for a trained rater. -Previous automated methods of CT diaphragm segmentation, have relied on using a priori anatomical -knowledge (i.e. the lower surface of the lungs as a diaphragmatic landmark), mathematical models, and other -assumptions in order to segment the diaphragm, which was then compared to the gold standard manual -segmentation performed by expert radiologists. This method outputed measures of diaphragm distances, +Previous automated methods of CT diaphragm segmentation, have relied on using a priori anatomical +knowledge (i.e. the lower surface of the lungs as a diaphragmatic landmark), mathematical models, and other +assumptions in order to segment the diaphragm, which was then compared to the gold standard manual +segmentation performed by expert radiologists. This method outputed measures of diaphragm distances, surface area, and curvature. -Our method would be the first to use the gold standard method of over 300 manual and direct diaphragmatic -segmentations to train an artificial intelligence and neural network that would create a three-dimensional model +Our method would be the first to use the gold standard method of over 300 manual and direct diaphragmatic +segmentations to train an artificial intelligence and neural network that would create a three-dimensional model of the left hemi-diaphragm with measures of density, volume, and dome height. ## Objective diff --git a/PW32_2019_London_Canada/Projects/Cerebral vascular segmentation b/PW32_2019_London_Canada/Projects/Cerebral vascular segmentation index 5df5140c8..2e02d5431 100644 --- a/PW32_2019_London_Canada/Projects/Cerebral vascular segmentation +++ b/PW32_2019_London_Canada/Projects/Cerebral vascular segmentation @@ -3,7 +3,7 @@ Cerebral vascular Segmentation Slicer Bids app ## Key Investigators -- Mohamed Yousif +- Mohamed Yousif # Project Description diff --git a/PW32_2019_London_Canada/Projects/CranialReconstructionICV/README.md b/PW32_2019_London_Canada/Projects/CranialReconstructionICV/README.md index 7dc6f38f9..b7bb94d49 100644 --- a/PW32_2019_London_Canada/Projects/CranialReconstructionICV/README.md +++ b/PW32_2019_London_Canada/Projects/CranialReconstructionICV/README.md @@ -7,7 +7,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description Post-cranial vault reconstruction, the skull often contains holes which makes existing automated processes of intracranial volume calculation not applicable. -Goal is to find a faster way of calculating intracranial volume without manually delineating each slice while maintaining sufficient accuracy. +Goal is to find a faster way of calculating intracranial volume without manually delineating each slice while maintaining sufficient accuracy. ## Objective @@ -22,4 +22,3 @@ Goal is to find a faster way of calculating intracranial volume without manually # Illustrations # Background and References - diff --git a/PW32_2019_London_Canada/Projects/Image-based-Tool-Tracking(Deep Regression)/README.md b/PW32_2019_London_Canada/Projects/Image-based-Tool-Tracking(Deep Regression)/README.md index cc607ea16..6128c3831 100644 --- a/PW32_2019_London_Canada/Projects/Image-based-Tool-Tracking(Deep Regression)/README.md +++ b/PW32_2019_London_Canada/Projects/Image-based-Tool-Tracking(Deep Regression)/README.md @@ -6,8 +6,8 @@ Back to [Projects List](../../README.md#ProjectsList) - Hareem Nisar (Robarts / Western Uni.) # Project Description -An ultrasound image-based tool tracking system using passive markers to provide image guidance during intracardiac interventions. -We aim to design a passive marker design for surgical tools so that they appear uniquely on the ultrasound images. This unique marker configuration can be used to calculate the tool’s pose. +An ultrasound image-based tool tracking system using passive markers to provide image guidance during intracardiac interventions. +We aim to design a passive marker design for surgical tools so that they appear uniquely on the ultrasound images. This unique marker configuration can be used to calculate the tool’s pose. Using deep learning to estimate the pose of tool from the ultrasound images ## Objective @@ -26,7 +26,7 @@ TBC # Illustrations -Coming soon.. +Coming soon.. # Background and References diff --git a/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImNO 2023 Poster Image Quizzer.png b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImNO 2023 Poster Image Quizzer.png new file mode 100644 index 000000000..2514a334d Binary files /dev/null and b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImNO 2023 Poster Image Quizzer.png differ diff --git a/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImageQuizzer.md b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImageQuizzer.md index 3402b7af1..3639e9905 100644 --- a/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImageQuizzer.md +++ b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImageQuizzer.md @@ -18,4 +18,4 @@ # Fourth question + Answer 1 -+ \ No newline at end of file ++ diff --git a/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImageQuizzerStudyBrowser2.md b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImageQuizzerStudyBrowser2.md index 40e0ba9e5..db4d8ec3b 100644 --- a/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImageQuizzerStudyBrowser2.md +++ b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/ImageQuizzerStudyBrowser2.md @@ -6,4 +6,4 @@ - [Series 1](Day2_CT.nrrd) # Patient 3 -- [Series 1](Day3_CT.nrrd) \ No newline at end of file +- [Series 1](Day3_CT.nrrd) diff --git a/PW32_2019_London_Canada/Projects/ImageQuizzerProject/P10-1 Carol Johnson PitchSlide.pdf b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/P10-1 Carol Johnson PitchSlide.pdf new file mode 100644 index 000000000..1d6add9e1 Binary files /dev/null and b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/P10-1 Carol Johnson PitchSlide.pdf differ diff --git a/PW32_2019_London_Canada/Projects/ImageQuizzerProject/README.md b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/README.md index aad34bd39..a8075001d 100644 --- a/PW32_2019_London_Canada/Projects/ImageQuizzerProject/README.md +++ b/PW32_2019_London_Canada/Projects/ImageQuizzerProject/README.md @@ -9,6 +9,9 @@ ImageQuizzerProject The goal of this project is to develop a versatile clinical teaching tool which will present patient images with an associated questionnaire and capture user assessment for offline analysis. The content of the questionnaire will be customizable using the designer component of the application thereby providing a means to create a variety of educational/research resources. +Online documentation can be found here: + + ## Objective @@ -61,14 +64,16 @@ To learn the concepts that are needed to develop the three components of the pro - Qt groups boxes and radio buttons were created dynamically from the coded md file - I can load images for the quiz from the study browser .md file. - Python code to coordinate moving through the assigned images still to be coded - + # Illustrations + ![ImageQuizzerInSlicer](ImageQuizzer_Slicer.png) + + + # Background and References @@ -83,4 +91,3 @@ To learn the concepts that are needed to develop the three components of the pro A similar project was designed for the ClearCanvas 2.0 workstation. Following is a link to the publication to show how it was used. https://www.sciencedirect.com/science/article/pii/S0360301615272239?via%3Dihub - diff --git a/PW32_2019_London_Canada/Projects/KidneySegmentation/README.md b/PW32_2019_London_Canada/Projects/KidneySegmentation/README.md index e1fdde609..961abb0fc 100644 --- a/PW32_2019_London_Canada/Projects/KidneySegmentation/README.md +++ b/PW32_2019_London_Canada/Projects/KidneySegmentation/README.md @@ -3,11 +3,11 @@ Back to [Projects List](../../README.md#ProjectsList) ## Plus ultrasound simulation using kidney models ## Key Investigators -- Lyla Mu (Western University) +- Lyla Mu (Western University) # Project Description - + ## Objective 1. Segment kidney, calyx, pyramid and blood vessels. @@ -33,5 +33,3 @@ Back to [Projects List](../../README.md#ProjectsList) - Source code: https://github.com/Lyla-M/UnityOpenIGTLink - Test data: Kidney CT https://github.com/neheller/kits19/tree/master/data/case_00000 - - diff --git a/PW32_2019_London_Canada/Projects/NiftyNet-Alive-demo/README.md b/PW32_2019_London_Canada/Projects/NiftyNet-Alive-demo/README.md index 8b1378917..e69de29bb 100644 --- a/PW32_2019_London_Canada/Projects/NiftyNet-Alive-demo/README.md +++ b/PW32_2019_London_Canada/Projects/NiftyNet-Alive-demo/README.md @@ -1 +0,0 @@ - diff --git a/PW32_2019_London_Canada/Projects/PAxisTransform/README.md b/PW32_2019_London_Canada/Projects/PAxisTransform/README.md index ec63ae2ff..4d4d499a2 100644 --- a/PW32_2019_London_Canada/Projects/PAxisTransform/README.md +++ b/PW32_2019_London_Canada/Projects/PAxisTransform/README.md @@ -5,18 +5,18 @@ Back to [Projects List](../../README.md#ProjectsList) ## Key Investigators - Tina Wu (Sunnybrook Health Science Centre) -## Acknowledgement +## Acknowledgement - Michael Hardisty (Sunnybrook Health Science Centre) - Andras Lasso (Perk Lab) - Andrew Yang # Project Description - + Current slicer modules for general registration (BRAINs or Elastix) are unable to handle large initial mismatch between the two objects of interest. The purpose of this project is to create a slicer module that would allow registering two volumes with large initial mismatches (>15-20 degs) based on their principal axes. The module would also come with the capability for allowing users to visualize the volumes (as a model) and the direction of the principal vectors. ## Objective -1. Implement algorithm for performing principal axis transformation. -2. Implement visualization tools. +1. Implement algorithm for performing principal axis transformation. +2. Implement visualization tools. ## Approach and Plan diff --git a/PW32_2019_London_Canada/Projects/RegistrationCochlearModeling/README.md b/PW32_2019_London_Canada/Projects/RegistrationCochlearModeling/README.md index b4a1f18ff..558e0eb77 100644 --- a/PW32_2019_London_Canada/Projects/RegistrationCochlearModeling/README.md +++ b/PW32_2019_London_Canada/Projects/RegistrationCochlearModeling/README.md @@ -29,4 +29,3 @@ Had preliminary work completed coming in to Slicer week. Measuremnts were comple ![Cropped volume rendering of the human helicotrema. BM annotates the cochlear soft tissue, and the bony shell is visible. Fiducials are placed to measure this region](Figure2Noannotation.PNG) ![Smoothed 3D Model of the cochlea with fiducials placed in helicotrema region. Cross-sectional slices are visualized in (B) and (C)](HelicoFidOnSmoothedModel.PNG) - diff --git a/PW32_2019_London_Canada/Projects/RobSlicerProject b/PW32_2019_London_Canada/Projects/RobSlicerProject index 9caafc7d1..b41f3c88f 100644 --- a/PW32_2019_London_Canada/Projects/RobSlicerProject +++ b/PW32_2019_London_Canada/Projects/RobSlicerProject @@ -1,19 +1,19 @@ ## Write full project title here ## Key Investigators -- Roberta Piazza, Visiting Graduate Student at Robarts Research Institute +- Roberta Piazza, Visiting Graduate Student at Robarts Research Institute # Project Description +verify if it is possible quantify this error and correct it--> ## Objective -1. Objective A. Create a module to calibrate sensorized tool (for example conavi probe). -1. Objective B. . -1. Objective C. Describe it in 1-2 sentences. +1. Objective A. Create a module to calibrate sensorized tool (for example conavi probe). +1. Objective B. . +1. Objective C. Describe it in 1-2 sentences. ## Approach and Plan diff --git a/PW32_2019_London_Canada/Projects/Slicer_ARM64/README.md b/PW32_2019_London_Canada/Projects/Slicer_ARM64/README.md index 79b70a17d..eb52d23d5 100644 --- a/PW32_2019_London_Canada/Projects/Slicer_ARM64/README.md +++ b/PW32_2019_London_Canada/Projects/Slicer_ARM64/README.md @@ -3,7 +3,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## 3D Slicer on ARM64 (with GPU support!) ## Key Investigators -- Adam Rankin (Robarts Research Institute) +- Adam Rankin (Robarts Research Institute) # Project Description Compile and run Slicer on a [NanoPC-T4 SBC](http://wiki.friendlyarm.com/wiki/index.php/NanoPC-T4) with OpenGL ES support compiled and working. diff --git a/PW32_2019_London_Canada/Projects/Slicer_SimpleElastix/README.md b/PW32_2019_London_Canada/Projects/Slicer_SimpleElastix/README.md index 0c2956841..79fdee67c 100644 --- a/PW32_2019_London_Canada/Projects/Slicer_SimpleElastix/README.md +++ b/PW32_2019_London_Canada/Projects/Slicer_SimpleElastix/README.md @@ -3,7 +3,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## SimpleElastix Slicer Integration ## Key Investigators -- Patrick Carnahan (Robarts Research Institute) +- Patrick Carnahan (Robarts Research Institute) # Project Description @@ -11,7 +11,7 @@ Modify the SimpleElastix project to seperate it from the SimpleITK build and ins SimpleElastix could then potentially be integrated into the SlicerElastix module to allow direct use of the Elastix registration filters from python. ## Objective -1. Complete build configuration work to enable standalone python wrapping of elastix filters +1. Complete build configuration work to enable standalone python wrapping of elastix filters 1. Integrate building of SimpleElastix fork into SlicerElastix to allow for easy use within Slicer @@ -29,5 +29,3 @@ SimpleElastix could then potentially be integrated into the SlicerElastix module - Source code: https://github.com/pcarnah/SimpleElastix - Documentation: TODO - - diff --git a/PW32_2019_London_Canada/Projects/Spinemodels/README.md b/PW32_2019_London_Canada/Projects/Spinemodels/README.md index 4224a830b..a4af7473a 100644 --- a/PW32_2019_London_Canada/Projects/Spinemodels/README.md +++ b/PW32_2019_London_Canada/Projects/Spinemodels/README.md @@ -45,4 +45,4 @@ Determine if accurate spine models can be created from MRI. - Documentation: https://link.to.docs - Test data: https://link.to.test.data ---> \ No newline at end of file +--> diff --git a/PW32_2019_London_Canada/Projects/Template/README.md b/PW32_2019_London_Canada/Projects/Template/README.md index f966f7994..762c86964 100644 --- a/PW32_2019_London_Canada/Projects/Template/README.md +++ b/PW32_2019_London_Canada/Projects/Template/README.md @@ -3,17 +3,17 @@ Back to [Projects List](../../README.md#ProjectsList) ## Write full project title here ## Key Investigators -- Investigator 1 (Affiliation) -- Investigator 2 (Affiliation) +- Investigator 1 (Affiliation) +- Investigator 2 (Affiliation) - Investigator 3 (Affiliation) # Project Description - + ## Objective -1. Objective A. Describe it in 1-2 sentences. -1. Objective B. Describe it in 1-2 sentences. -1. Objective C. Describe it in 1-2 sentences. +1. Objective A. Describe it in 1-2 sentences. +1. Objective B. Describe it in 1-2 sentences. +1. Objective C. Describe it in 1-2 sentences. ## Approach and Plan @@ -40,4 +40,3 @@ Back to [Projects List](../../README.md#ProjectsList) - Source code: https://github.com/YourUser/YourRepository - Documentation: https://link.to.docs - Test data: https://link.to.test.data - diff --git a/PW32_2019_London_Canada/Projects/VertebralSegmentation3DUNet/README.md b/PW32_2019_London_Canada/Projects/VertebralSegmentation3DUNet/README.md index fe8575da1..5539a2f56 100644 --- a/PW32_2019_London_Canada/Projects/VertebralSegmentation3DUNet/README.md +++ b/PW32_2019_London_Canada/Projects/VertebralSegmentation3DUNet/README.md @@ -3,7 +3,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Write full project title here ## Key Investigators -- Geoff Klein (U of T) +- Geoff Klein (U of T) # Project Description Use a previously trained 3D U-Net from Keras to segment vertebral bodies from CT scans. diff --git a/PW32_2019_London_Canada/Projects/VesselSegmentation/README.md b/PW32_2019_London_Canada/Projects/VesselSegmentation/README.md index 9e0885068..e3b4a17d2 100644 --- a/PW32_2019_London_Canada/Projects/VesselSegmentation/README.md +++ b/PW32_2019_London_Canada/Projects/VesselSegmentation/README.md @@ -2,17 +2,17 @@ ## Key Investigators - Hareem Nisar, Phd Candidate, Robarts research Institute -- Roberta Piazza, Visiting Graduate Student at Robarts Research Institute +- Roberta Piazza, Visiting Graduate Student at Robarts Research Institute # Project Description The idea is to use electromagnetic sensor on Conavi catheter, needle and guidewire to track them without -using fluoroscopy. +using fluoroscopy. ## Objective -1. Objective A. Segment vessel from US catheter. -1. Objective B. Build vessel model. +1. Objective A. Segment vessel from US catheter. +1. Objective B. Build vessel model. ## Approach and Plan diff --git a/PW32_2019_London_Canada/Projects/WristKinematics/README.md b/PW32_2019_London_Canada/Projects/WristKinematics/README.md index 73443f3bc..d5cd1a02d 100644 --- a/PW32_2019_London_Canada/Projects/WristKinematics/README.md +++ b/PW32_2019_London_Canada/Projects/WristKinematics/README.md @@ -3,7 +3,7 @@ Back to [Projects List](../../README.md#ProjectsList) ## Wrist Kinematics from 4D CT ## Key Investigators -- Puneet Kaur Ranota (Western University) +- Puneet Kaur Ranota (Western University) - Adam Rankin - Andras Lasso (PerkLab, Queen's) diff --git a/PW32_2019_London_Canada/Projects/bullseye/README.md b/PW32_2019_London_Canada/Projects/bullseye/README.md index def6eb0ec..b1ee1558b 100644 --- a/PW32_2019_London_Canada/Projects/bullseye/README.md +++ b/PW32_2019_London_Canada/Projects/bullseye/README.md @@ -3,8 +3,8 @@ Back to [Projects List](../../README.md#ProjectsList) ## Bullseye - Computer Vision for the glenoid implant in total shoulder arthroplasty ## Key Investigators -- David M. Burns, MD, PhD(c) (Sunnybrook Research Institute) -- Samuel C.P. Newhook, BASc (Sunnybrook Research Institute) +- David M. Burns, MD, PhD(c) (Sunnybrook Research Institute) +- Samuel C.P. Newhook, BASc (Sunnybrook Research Institute) - Cari Whyne, PhD (Sunnybrook Research Institute) # Project Description @@ -17,14 +17,14 @@ Positioning of the glenoid component in total shoulder arthroplasty is a challen ## Approach and Plan 1. Targeting the automation of intra-operative image processing steps. These are as follows: - 1. Segmentation of tracker and glenoid from intra-operative optical image + 1. Segmentation of tracker and glenoid from intra-operative optical image 2. Pre-alignment for optical tracker registration - 3. Optical tracker registration (surface based) + 3. Optical tracker registration (surface based) 4. Validation of tracker registration (visual / numeric) 5. Pre-alignment for glenoid optical image to pre-operative mesh model - 6. Glenoid registration (surface based) + 6. Glenoid registration (surface based) 7. Validation of glenoid registration (visual / numeric) - 8. Visualize prediction + 8. Visualize prediction 2. Define data flow for each step (inputs and outputs) 3. Create modules for each step or grouping of 2-3 steps 4. Create a workflow module for linking steps (handle workflow logic) diff --git a/PW32_2019_London_Canada/Projects/slicerHands/README.md b/PW32_2019_London_Canada/Projects/slicerHands/README.md index e7b749caa..4cf0990fd 100644 --- a/PW32_2019_London_Canada/Projects/slicerHands/README.md +++ b/PW32_2019_London_Canada/Projects/slicerHands/README.md @@ -4,31 +4,31 @@ Back to [Projects List](../../README.md#ProjectsList) ## Key Investigators - Leah Groves -- Daniel Allen +- Daniel Allen - Adam Rankin # Project Description -Module to integrate hand models into Slicer that are paired with tracking from LeapMotion +Module to integrate hand models into Slicer that are paired with tracking from LeapMotion ## Objective -1. Stream data from leap motion into Slicer +1. Stream data from leap motion into Slicer 1. Pair tracking with models to represent hands 1. Make hands user specfic through scaling models accordingly and allow user input ## Approach and Plan -1. Use PlusServer to stream data into 3D Slicer -1. Pair cylinder and sphere models with tracking information -1. Make module adpatable to width and length of digits +1. Use PlusServer to stream data into 3D Slicer +1. Pair cylinder and sphere models with tracking information +1. Make module adpatable to width and length of digits ## Progress and Next Steps -1. Transforms representing joints are streamed into 3D Slicer -1. On connecting sphere models are generated and paired with appropriate transforms +1. Transforms representing joints are streamed into 3D Slicer +1. On connecting sphere models are generated and paired with appropriate transforms -Future work: -1. Positions cylinders according to joints -1. Stream in finger dimensions and have model sizes adapt to user -1. Check update rate when calibrated into the VIVE +Future work: +1. Positions cylinders according to joints +1. Stream in finger dimensions and have model sizes adapt to user +1. Check update rate when calibrated into the VIVE # Illustrations diff --git a/PW33_2020_GranCanaria/Breakouts/Slicer5/README.md b/PW33_2020_GranCanaria/Breakouts/Slicer5/README.md index 504670197..ef10dd375 100644 --- a/PW33_2020_GranCanaria/Breakouts/Slicer5/README.md +++ b/PW33_2020_GranCanaria/Breakouts/Slicer5/README.md @@ -12,7 +12,7 @@ Back to [Project Week](../../README.md) - Andras Lasso (Queen's University) - Csaba Pinter (Ebatin S.L) - Jean-Christophe Fillion-Robin (Kitware Inc.) -- Kyle Sunderland (Queen's University) +- Kyle Sunderland (Queen's University) - Steve Pieper (Isomics Inc.) - Sonia Pujol (Brigham and Women’s Hospital and Harvard Medical School) @@ -85,7 +85,7 @@ Action Items (Jc): * Extract list of all contributors and ask on discourse which emails is preferred * Mantis: * Create redirect from mantisarchive.slicer.org/view.php?id=4681 to https://issues.slicer.org/view.php?id=4681 - + Post-release Action items: * Mantis: * Create static website with archived mantis issues @@ -118,7 +118,7 @@ Notes: * after Github migration, we will transition to readthedocs and markdown Action items (Andras, Jc): -* integrate https://github.com/Slicer/Slicer/pull/686 +* integrate https://github.com/Slicer/Slicer/pull/686 ### Slicer Training and Tutorials @@ -128,7 +128,7 @@ Notes: * two use cases for tutorials * at home: website works well. For example, https://lassoan.github.io/SlicerSegmentationRecipes/ * teaching: slides works well - + Ideas: * every tutorial could have a discourse thread to post comment but on youtube video there is a comment section but there are no comments * generation of PDF from PPT slides could be automated @@ -138,7 +138,7 @@ Action items: * Shared Drive: While non-kitware member can be added, the team drive can not be publicly visible. See https://support.google.com/a/thread/13540273?hl=en * Shared Folder: This can be visible by anyone with the link and non-kitware member can be added. Click [here](https://drive.google.com/drive/folders/1aU77cEqkEBl8764-IL-hdX067YYjZUE1?usp=sharing) to access the folder * update power point slides into shared folder. Create one folder per Slicer version (e.g 3.6, 4.0, 4.1, ...) (Sonia) - + ### Default Theme @@ -155,10 +155,10 @@ Notes: * Sequences module is now a very useful module and should be integrated into the code Questions: -* Should the module be integrated as a [Slicer Remote module](https://www.slicer.org/wiki/Documentation/Nightly/Developers/Build_system/Remote_Module) or do we want to cherry pick the complete history ? +* Should the module be integrated as a [Slicer Remote module](https://www.slicer.org/wiki/Documentation/Nightly/Developers/Build_system/Remote_Module) or do we want to cherry pick the complete history ? Action items (Andras, 2 to 3 weeks): -* Revisit MRML Copy API +* Revisit MRML Copy API * Integrate Sequences ### Extension description file @@ -168,6 +168,6 @@ Notes: * Two steps: * (1) Update extension index changing format and extension. For now, keep backend working with s4ext * (2) de-duplication of metadata in CMakeLists.txt vs description file. Metadata will be extracted from the json file using CMake function - + Action items (Jc, Sam): * Implement step 1 diff --git a/PW33_2020_GranCanaria/Logistics.md b/PW33_2020_GranCanaria/Logistics.md index 2e60f10a2..2672c676c 100644 --- a/PW33_2020_GranCanaria/Logistics.md +++ b/PW33_2020_GranCanaria/Logistics.md @@ -9,12 +9,12 @@ || - **Recommended hotels (special rates) and maps** - - Booking: [NH Imperial Playa](https://www.nh-hoteles.es/event/na-mic-workshop) Link available until: December 19, 2020 + - Booking: [NH Imperial Playa](https://www.nh-hoteles.es/event/na-mic-workshop) Link available until: December 19, 2020 - [NH Imperial Playa Map](https://cutt.ly/twjO0PO) - Booking: [NH Las Palmas Playa Las Canteras](https://www.nh-hoteles.es/event/na-mic-workshop-2020) Link available until: December 19, 2020 - [NH Las Palmas Playa Las Canteras Map](https://cutt.ly/vwkkTDE) - Booking after December 19, 2020 by email to: ProjectWeek33 NHBooking - + - **Transportation** from the airport to the city (Las Palmas de Gran Canaria): - Taxi (line at the airport) - [Bus -line 60-](https://www.guaguasglobal.com/en/lineas-horarios/linea/) @@ -54,7 +54,7 @@ Juan Ruiz Alzola, Maria Dolores Afonso Suarez, Asmaa Skareb. * The academic authority signing on behalf the host center is *Prof. Félix Tobajas, Subdirector de Estudiantes, Movilidad y Prácticas Externas, Escuela de Ingeniería de Telecomunicación y Electrónica (Mobility Deputy Director, Telecommunication and Electrical Engineering School), Universidad de Las Palmas de Gran Canaria*. - * Once properly prepared and signed the application at the applicant's home institution, it should be sent by email to: SempeEite 3. The application will be processed by the host center and the outcome will be reported in a few days. diff --git a/PW33_2020_GranCanaria/PW33InTheMedia.md b/PW33_2020_GranCanaria/PW33InTheMedia.md index 76ca923c5..cbb51fdaa 100644 --- a/PW33_2020_GranCanaria/PW33InTheMedia.md +++ b/PW33_2020_GranCanaria/PW33InTheMedia.md @@ -1,9 +1,7 @@ ## 33rd Winter Project Week in the Media - + - [La Provincia](https://www.laprovincia.es/sociedad/2020/01/22/ciencia-femenino-toma-forma-ghana/1246788.html) - - diff --git a/PW33_2020_GranCanaria/PreparatoryMeetingsNotes.md b/PW33_2020_GranCanaria/PreparatoryMeetingsNotes.md index 72bcef601..953ad5da0 100644 --- a/PW33_2020_GranCanaria/PreparatoryMeetingsNotes.md +++ b/PW33_2020_GranCanaria/PreparatoryMeetingsNotes.md @@ -20,5 +20,3 @@ Proposal about the areas to establish in order to organize the projects: - DICOM It could be interesting during the next preparatory meetings to have a brainstorming on breakout sessions topics - - diff --git a/PW33_2020_GranCanaria/Projects/3DSlicerTrainingPrograms/README.md b/PW33_2020_GranCanaria/Projects/3DSlicerTrainingPrograms/README.md index 48c7cfb7c..890f2d445 100644 --- a/PW33_2020_GranCanaria/Projects/3DSlicerTrainingPrograms/README.md +++ b/PW33_2020_GranCanaria/Projects/3DSlicerTrainingPrograms/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# 3D Slicer Training Programs. +# 3D Slicer Training Programs. ## Key Investigators @@ -24,7 +24,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -In this edition of the training project, new professionals will work in order to develop new training material and study how to enhance and improve the use of 3DSlicer as a training tool. We intend to create new training tools as new atlases and study its use in the African countries of Senegal and Mauritania. It will also be included the proposal of a training program for Universities in Morocco and other projects that could arise. +In this edition of the training project, new professionals will work in order to develop new training material and study how to enhance and improve the use of 3DSlicer as a training tool. We intend to create new training tools as new atlases and study its use in the African countries of Senegal and Mauritania. It will also be included the proposal of a training program for Universities in Morocco and other projects that could arise. ## Objective @@ -38,7 +38,7 @@ In this edition of the training project, new professionals will work in order to ## Progress and Next Steps -1. Initial training have been delivered for the African collaborators, including newcomers from Morocco +1. Initial training have been delivered for the African collaborators, including newcomers from Morocco 1. In these training sessions the African collaborators experienced have been advised 1. Information to draft the introduction of the development of 3D Slicer modules for engineers 1. The creation of multidisciplinary workgroups, with medicine and engineer degree students have been planned diff --git a/PW33_2020_GranCanaria/Projects/AnatomicalAtlases/README.md b/PW33_2020_GranCanaria/Projects/AnatomicalAtlases/README.md index 421a5b7e8..6f9812338 100644 --- a/PW33_2020_GranCanaria/Projects/AnatomicalAtlases/README.md +++ b/PW33_2020_GranCanaria/Projects/AnatomicalAtlases/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Anatomical Atlases Senegal. +# Anatomical Atlases Senegal. ## Key Investigators @@ -52,4 +52,3 @@ These atlases and their creation process will be used as training material in th - [Pelvis Atlas project](https://projectweek.na-mic.org/PW30_2019_GranCanaria/Projects/PelvicAnatomyAtlases/) - [Atlas Development For Education project](https://projectweek.na-mic.org/PW28_2018_GranCanaria/Projects/AtlasDevelopmentForEducation/) - diff --git a/PW33_2020_GranCanaria/Projects/AnatomicalAtlasesMauritania/README.md b/PW33_2020_GranCanaria/Projects/AnatomicalAtlasesMauritania/README.md index 751e4ff7e..336a3a706 100644 --- a/PW33_2020_GranCanaria/Projects/AnatomicalAtlasesMauritania/README.md +++ b/PW33_2020_GranCanaria/Projects/AnatomicalAtlasesMauritania/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Anatomical Atlases Mauritania. +# Anatomical Atlases Mauritania. ## Key Investigators @@ -14,7 +14,7 @@ Back to [Projects List](../../README.md#ProjectsList) - Mohamed Bamba (undergraduate, FM- UNA) - Yahya Abatna (undergraduate, FM- UNA) - Mariem Isselmou (undergraduate, FM- UNA) -- Mohamed Aly Dedew (undergraduate, FM- UNA) +- Mohamed Aly Dedew (undergraduate, FM- UNA) - MOROCCO - Siham Alaoui (University Abdelmalek Essaâdi) - Mouhcine Madani (University Abdelmalek Essaâdi) @@ -46,7 +46,7 @@ These atlases and their creation process will be used as training material in th ## Progress and Next Steps 1. Atlases created for testing are already finished -1. The Mauritanian team is creating new atlases +1. The Mauritanian team is creating new atlases 1. Files to be uploaded to our server with the open anatomy browser are to be created 1. It is a challenge to gather quality images to be segmented, so a main interest point for us is to gather these images for different parts of the body normal and with some kind of pathology to be segmented @@ -59,4 +59,3 @@ These atlases and their creation process will be used as training material in th - [Pelvis Atlas project](https://projectweek.na-mic.org/PW30_2019_GranCanaria/Projects/PelvicAnatomyAtlases/) - [Atlas Development For Education project](https://projectweek.na-mic.org/PW28_2018_GranCanaria/Projects/AtlasDevelopmentForEducation/) - diff --git a/PW33_2020_GranCanaria/Projects/ArduinoSlicer/README.md b/PW33_2020_GranCanaria/Projects/ArduinoSlicer/README.md index c2d25d3f4..d5908c84d 100644 --- a/PW33_2020_GranCanaria/Projects/ArduinoSlicer/README.md +++ b/PW33_2020_GranCanaria/Projects/ArduinoSlicer/README.md @@ -44,7 +44,7 @@ It can be useful for students and for not complex data/instruction stream. 4. The first layer of code was written (thanks to Andras!) https://github.com/pzaffino/SlicerArduinoController - + 5. The next step will be to refine the link module and to start writing more modules (one for each possible application). # Illustrations diff --git a/PW33_2020_GranCanaria/Projects/ClubFoot/README.md b/PW33_2020_GranCanaria/Projects/ClubFoot/README.md index d24a7360d..998c5a42b 100644 --- a/PW33_2020_GranCanaria/Projects/ClubFoot/README.md +++ b/PW33_2020_GranCanaria/Projects/ClubFoot/README.md @@ -15,7 +15,7 @@ Back to [Projects List](../../README.md#ProjectsList) About 1-2 in every 1000 babies are born with what's called clubfoot, the most common skeletal deformity in children. Clubfoot is well treated using a plaster casting method developed in the 1960s, but there is a potential to use -3D scanning and printing techniques to make the process more efficient and cost-effective. Also easier activities of +3D scanning and printing techniques to make the process more efficient and cost-effective. Also easier activities of daily living for families with plastic instead of plaster casts. ## Objective diff --git a/PW33_2020_GranCanaria/Projects/CustomizationOfUserInterfacesForDissemination/README.md b/PW33_2020_GranCanaria/Projects/CustomizationOfUserInterfacesForDissemination/README.md index d81861227..5f22e4183 100644 --- a/PW33_2020_GranCanaria/Projects/CustomizationOfUserInterfacesForDissemination/README.md +++ b/PW33_2020_GranCanaria/Projects/CustomizationOfUserInterfacesForDissemination/README.md @@ -49,24 +49,24 @@ This project aims at creating a minimal user interface oriented to museological ![Some more images](interfaceSketch.png)
Sketch of the interface.
-
+
![Some more images](arduinoBoard.jpg) Arduino board with push buttons and trackballs.
-
+
![Interface prototype with translated texts and corporate image](interf.png)
Interface prototype with translated texts and corporate image
-
+
![Museum logo](logo.png)
Museum logo
-
+
![Detail of the custom buttons](botonera.png)
Detail of the custom buttons
-
+
# Background and References diff --git a/PW33_2020_GranCanaria/Projects/CustomizedGUIDiabeticFoot/README.md b/PW33_2020_GranCanaria/Projects/CustomizedGUIDiabeticFoot/README.md index f08e377f6..2d44bcd5f 100644 --- a/PW33_2020_GranCanaria/Projects/CustomizedGUIDiabeticFoot/README.md +++ b/PW33_2020_GranCanaria/Projects/CustomizedGUIDiabeticFoot/README.md @@ -13,7 +13,7 @@ Back to [Projects List](../../README.md#ProjectsList) This project is the next step in the project [Medical Infrared Imaging with 3DSlicer](https://projectweek.na-mic.org/PW28_2018_GranCanaria/Projects/MedicalInfraredImagingWithSlicer/) presented during 28th PW NA-MIC. This research is the result of a collaboration between the IAC and the University of Las Palmas de Gran Canaria. The main objective is to develop a multichannel sensor prototype that runs in Slicer through PLUS Toolkit. This sensor is being designed to be used by the clinic staff in particular for medical diagnosis (at this time we are focused on foot ulcers in diabetic patients). -The aim is to create a friendly customized interface for monitoring the foot ulcers in diabetic patients. +The aim is to create a friendly customized interface for monitoring the foot ulcers in diabetic patients. @@ -50,10 +50,10 @@ The aim is to create a friendly customized interface for monitoring the foot ulc We addressed our challenges with 3D Slicer and Plus community. The collaboration with the community, along this week has been quite succesfull and our project has achieve several goals. 1. We have resolved our running and stopping problem with the Plus server thanks to Kyle. 1. The leak issue when the customized application is closed was disccussed with Jean. He recommended following the current slicelet web page because the current implentation is old-fashion. (Figure 3) -1. We talked with several colleages about the image orientation when the image is serializated in 3D slicer. Finally, we resolved this issue taking into account the image orientation generated in Plus toolkit. -1. The perfomannce was analyzed with the community but the final solution need further discution so it will be a goal for the next project week. We decided to use low frame rate to reduce the CPU utilization. +1. We talked with several colleages about the image orientation when the image is serializated in 3D slicer. Finally, we resolved this issue taking into account the image orientation generated in Plus toolkit. +1. The perfomannce was analyzed with the community but the final solution need further discution so it will be a goal for the next project week. We decided to use low frame rate to reduce the CPU utilization. 1. We need to evaluate the solution (combined images in different layers) that we found in the slicer script repository. -1. The window/level volume is not updated in real-time, so it should be analyzed. We discussed several solutions with the community. +1. The window/level volume is not updated in real-time, so it should be analyzed. We discussed several solutions with the community. # Illustrations diff --git a/PW33_2020_GranCanaria/Projects/DiabeticFootSegmentation/README.md b/PW33_2020_GranCanaria/Projects/DiabeticFootSegmentation/README.md index 8797ecc72..a5fea12b8 100644 --- a/PW33_2020_GranCanaria/Projects/DiabeticFootSegmentation/README.md +++ b/PW33_2020_GranCanaria/Projects/DiabeticFootSegmentation/README.md @@ -30,11 +30,11 @@ The aim is to integrate an algorithm, which is based on Deep Learning, for foot ## Progress and Next Steps * [x] **Integrate dependencies** * [x] Libtorch (Pytorch C++ API) - * [x] PCL (Point Cloud Library) + * [x] PCL (Point Cloud Library) * [x] SuperBuild option * [x] **PyTorch modules to TorchScript** * [x] Convert PyTorch modules to TorchScript - * [x] Load a TorchScript model in the extension + * [x] Load a TorchScript model in the extension * [ ] **Feet segmentation algorithm** * [x] Deep Learning segmentation * [x] *Convert VTK images to tensor* @@ -45,12 +45,12 @@ The aim is to integrate an algorithm, which is based on Deep Learning, for foot * [x] *Geometry plane segmentation* * [x] *Improve Deep Learning results* * [ ] Apply the resulting mask - - + + ### To do: * Fixes an error when returning the point cloud data to a VTK Image * Include Windows support in SuperBuild option - * Test strategies based on point cloud processing in VTK in order to remove PCL dependency + * Test strategies based on point cloud processing in VTK in order to remove PCL dependency # Illustrations ![Workflow](images/Workflow.png "Proposed workflow") @@ -92,4 +92,3 @@ specifically planes. [2] [TorchScript tutorial](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) [3] [Diabetic Foot Extension](https://github.com/SolidusAbi/DiabeticFootExtension). Repository with the extension that includes the "SuperBuild" option and the developed module. - diff --git a/PW33_2020_GranCanaria/Projects/ExtensionsManagerRefresh/README.md b/PW33_2020_GranCanaria/Projects/ExtensionsManagerRefresh/README.md index e754c695f..9fc36e552 100644 --- a/PW33_2020_GranCanaria/Projects/ExtensionsManagerRefresh/README.md +++ b/PW33_2020_GranCanaria/Projects/ExtensionsManagerRefresh/README.md @@ -10,7 +10,7 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -The current infrastructure is on ["life-support"](https://www.slicer.org/wiki/Documentation/Labs/ExtensionsServer#status), during the past two years, Kitware has been working toward phasing out the aging Midas based server used to host, serve and manage Slicer application and extension packages. +The current infrastructure is on ["life-support"](https://www.slicer.org/wiki/Documentation/Labs/ExtensionsServer#status), during the past two years, Kitware has been working toward phasing out the aging Midas based server used to host, serve and manage Slicer application and extension packages. Kitware now has a Girder plugin for the backend (meaning API endpoints and database interaction). See https://slicer-package-manager.readthedocs.io. We also experimented with few prototype regarding the extension manager but have nothing production ready. diff --git a/PW33_2020_GranCanaria/Projects/GitHubAsAuthoritativeVersionControlSystem/README.md b/PW33_2020_GranCanaria/Projects/GitHubAsAuthoritativeVersionControlSystem/README.md index 5d83ce2f6..284091a34 100644 --- a/PW33_2020_GranCanaria/Projects/GitHubAsAuthoritativeVersionControlSystem/README.md +++ b/PW33_2020_GranCanaria/Projects/GitHubAsAuthoritativeVersionControlSystem/README.md @@ -52,7 +52,7 @@ _Copied from [Slicer 5](../../Breakouts/Slicer5/README.md) breakout session page * Extract list of all contributors and ask on discourse which emails is preferred * Mantis: * Create redirect from mantisarchive.slicer.org/view.php?id=4681 to https://issues.slicer.org/view.php?id=4681 - + Post-release Action items: * Mantis: * Create static website with archived mantis issues diff --git a/PW33_2020_GranCanaria/Projects/HighResolutionBrainSegmentation/README.md b/PW33_2020_GranCanaria/Projects/HighResolutionBrainSegmentation/README.md index c5d465ea5..4d713525d 100644 --- a/PW33_2020_GranCanaria/Projects/HighResolutionBrainSegmentation/README.md +++ b/PW33_2020_GranCanaria/Projects/HighResolutionBrainSegmentation/README.md @@ -16,7 +16,7 @@ -1. Integrate Freesurfer file formats. +1. Integrate Freesurfer file formats. Freesurfer is a program used to generate brain surfaces from MRI data. Freesurfer file formats use their own coordinate space and thus do not seamlessly integrate with the MRI files read by Slicer. The objective is to make freesurfer file formats easily accessible within Slicer. 2. Sulcal Drawing The use of 3D tools to draw and delimit brain sulci depends on freesurfer input (number 1) and an algorithm to draw on sulci and extract the lines in coordinate space. The goal is to troubleshoot this process. @@ -65,7 +65,7 @@ The use of 3D tools to draw and delimit brain sulci depends on freesurfer input - Submit extension to extensions index - Remove FreeSurfer classes from Slicer core 1. Documentation - - Integrate labeling manuals with the Neurosegmentation module as dockable panels for each structure + - Integrate labeling manuals with the Neurosegmentation module as dockable panels for each structure 1. Case Management - Implement mechanisms for editing/reviewer feedback - Allow repository selection @@ -87,4 +87,3 @@ https://github.com/PerkLab/SlicerFreeSurferImporter http://www.freesurfer.net/pub/docs/wiki/mris_pmake.help.xml.html https://github.com/PerkLab/NeuroSegmentation/tree/version_control - diff --git a/PW33_2020_GranCanaria/Projects/MEVISDraw/README.md b/PW33_2020_GranCanaria/Projects/MEVISDraw/README.md index 2d61d70f6..137830f1d 100644 --- a/PW33_2020_GranCanaria/Projects/MEVISDraw/README.md +++ b/PW33_2020_GranCanaria/Projects/MEVISDraw/README.md @@ -47,4 +47,4 @@ MEVIS draw contains a version of the "CSI" tool ("contouring with snapping and i ![different SEG files listed in CTK DICOM Browser](MeVisLab_SEG_in_CTK_DICOMDatabase.png) -![MeVisLab-exported segmentation overlaid in Slicer](MeVisLab_SEG_in_Slicer.png) \ No newline at end of file +![MeVisLab-exported segmentation overlaid in Slicer](MeVisLab_SEG_in_Slicer.png) diff --git a/PW33_2020_GranCanaria/Projects/NIRSOpenIGTLink/README.md b/PW33_2020_GranCanaria/Projects/NIRSOpenIGTLink/README.md index 3003134d3..93689b193 100644 --- a/PW33_2020_GranCanaria/Projects/NIRSOpenIGTLink/README.md +++ b/PW33_2020_GranCanaria/Projects/NIRSOpenIGTLink/README.md @@ -24,7 +24,7 @@ In this project, we will explore the possibility of communicating a proprietary -1. Discuss the convenience of making our NIRS device OpenIGTLink-compliant. +1. Discuss the convenience of making our NIRS device OpenIGTLink-compliant. 2. Select minimum set of queries/messages to implement. 3. Implement a simple 3DSlicer module showing control/interaction with NIRS device. @@ -32,39 +32,39 @@ In this project, we will explore the possibility of communicating a proprietary ## Progress and Next Steps -- OpenIGTLink library was installed in NIRS device uProcessor (Raspberry Pi Zero) +- OpenIGTLink library was installed in NIRS device uProcessor (Raspberry Pi Zero) - TrackerServer example code was changed (myTrackerServer) to receive simulated position parameters from uController. -- myTrackerServer and ImageServer example code were implemented as services in the Raspberry Pi Zero to have a simple tracking and image servers . -- SlicerOpenIGTLink module was installed in 3DSlicer to define connectors and visualize tracking and image data. +- myTrackerServer and ImageServer example code were implemented as services in the Raspberry Pi Zero to have a simple tracking and image servers . +- SlicerOpenIGTLink module was installed in 3DSlicer to define connectors and visualize tracking and image data. - A minimum mechanism was successfully implemented in the NIRS device to communicate with 3D Slicer through OpenIGTLink protocol. -- Raspberry pi Zero W + microcontroller combined system is an powerful and cheap option for making OpenIGTLink-compatible hardware and interacting with Slicer. +- Raspberry pi Zero W + microcontroller combined system is an powerful and cheap option for making OpenIGTLink-compatible hardware and interacting with Slicer. # Illustrations - + Fig. 1: Architecture (original figure at the beginning of workshop) .

-
-
+
+
- + Fig. 2: System architecture (detail). NIR light sensors/sources modules were not installed in the hardware setup used in this workshop.

-
-
+
+
- -Fig. 3: Operation example. ImagerClient is running in the red window. TrackerClient is running in 3D window. Microcontroller is turned on ("operationmode" command) and off ("commandmode") by sending TCP/IP commands. TrackerClient receive position data when microcontroller is on. + +Fig. 3: Operation example. ImagerClient is running in the red window. TrackerClient is running in 3D window. Microcontroller is turned on ("operationmode" command) and off ("commandmode") by sending TCP/IP commands. TrackerClient receive position data when microcontroller is on.

-
-
+
+
- -Fig. 4: Hardware Setup used during the workshop. + +Fig. 4: Hardware Setup used during the workshop.

-
-
+
+
1. Creation of a 3D Bezier Markup. -2. Meetings to decide on an integration strategy. +2. Meetings to decide on an integration strategy. ## Progress and Next Steps diff --git a/PW33_2020_GranCanaria/Projects/OHIFSlicerBridge/README.md b/PW33_2020_GranCanaria/Projects/OHIFSlicerBridge/README.md index b8d096f6b..38ba2e8e6 100644 --- a/PW33_2020_GranCanaria/Projects/OHIFSlicerBridge/README.md +++ b/PW33_2020_GranCanaria/Projects/OHIFSlicerBridge/README.md @@ -23,7 +23,7 @@ Possible scenarios: * continue segmentation with Slicer Segment Editor * transfer results back to dicomweb server and continue review in OHIF * Other use cases? - + ## Objectives Define use cases and explore implementation options. @@ -35,7 +35,7 @@ Possible implementation plan: * passes in path to study, slocer downloads and loads the data ready to segment * opens new tab to slicer vnc * user can push segmentation results back to same study via dicomweb - + List other requirements: * reasonable performance launching and using cloud slicer for segmentation * secured connection to Slicer @@ -57,7 +57,7 @@ List other requirements: 2. Implemented downloading of DICOM data to local Slicer from browser using dicomweb from a url: ```slicer://viewer/?studyUID=%202.16.840.1.113669.632.20.121711.10000158860&access_token=token_here&dicomweb_endpoint=http%3A%2F%2Fdemo.kheops.online%2Fapi&dicomweb_uri_endpoint=%20http%3A%2F%2Fdemo.kheops.online%2Fapi%2Fwado```
Registry key to associate Slicer application with "slicer://" custom URL - + ``` Windows Registry Editor Version 5.00 @@ -82,4 +82,3 @@ List other requirements: ![SlicerInOHIF](Untitled.png) ## Background and References - diff --git a/PW33_2020_GranCanaria/Projects/OpenAnatomy/README.md b/PW33_2020_GranCanaria/Projects/OpenAnatomy/README.md index 5eed3cd8a..bf5f160a3 100644 --- a/PW33_2020_GranCanaria/Projects/OpenAnatomy/README.md +++ b/PW33_2020_GranCanaria/Projects/OpenAnatomy/README.md @@ -83,7 +83,7 @@ volumes: [{ slicePosition: [r, a, s] // can be null if off } }] - + annotation: { label: "name of atlas" author: // list, do we have this? diff --git a/PW33_2020_GranCanaria/Projects/OpenAnatomyBrowser/README.md b/PW33_2020_GranCanaria/Projects/OpenAnatomyBrowser/README.md index 04ab2e395..604d2c704 100644 --- a/PW33_2020_GranCanaria/Projects/OpenAnatomyBrowser/README.md +++ b/PW33_2020_GranCanaria/Projects/OpenAnatomyBrowser/README.md @@ -25,7 +25,7 @@ Make the Open Anatomy atlas browser more user friendly and more compatible with 1. Better understanding of common problems between React apps and vtkjs. 1. Understanding of coordinate system mismatch between volumes and models (RAS models), will fix this problem going forward. 1. Provided Andras with latest TA2 draft metadata for labeling atlases. -1. Discussed translation of TA2 to Spanish with Juan Andres. +1. Discussed translation of TA2 to Spanish with Juan Andres. 1. Worked on a preliminary export format for metadata from Slicer. 1. Gained user feedback about metadata uses and atlas viewer needs. diff --git a/PW33_2020_GranCanaria/Projects/SEEGPlanning/README.md b/PW33_2020_GranCanaria/Projects/SEEGPlanning/README.md index 0bf68ca66..b202c7050 100644 --- a/PW33_2020_GranCanaria/Projects/SEEGPlanning/README.md +++ b/PW33_2020_GranCanaria/Projects/SEEGPlanning/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -Improuving StereoPlan STIM Slicer module for SEEG Procedures. +Improuving StereoPlan STIM Slicer module for SEEG Procedures. ## Key Investigators @@ -23,7 +23,7 @@ In our center, since the end of 2017, surgery is conducted with the Rosa robot. 1. To put de code on python3 (2to3) 2. To modify markups interaction behavior (migration doc) -1. To add new features +1. To add new features ## Progress and Next Steps @@ -33,7 +33,7 @@ In our center, since the end of 2017, surgery is conducted with the Rosa robot. 2. new feature : Spin around un axe avaiable from the widget epi-Plan (reformat Slice2RASMAtrix) == !! keyword shortcut (mac: command+option+drag) 3. Much remains to be done. SEEG procedure : add new features to fuse new modalities. DBS modules still in 3D Slicer 4.8 ... -# Illustrations +# Illustrations 1. Stereo-Plan Videos @@ -59,5 +59,3 @@ E. Bardinet and D. Hasboun. WSSFN 2019. Very helpfull : migration doc ==>> [migration doc] (https://www.slicer.org/wiki/Documentation/Nightly/Developers/Tutorials/MigrationGuide/Slicer#Markups) - - diff --git a/PW33_2020_GranCanaria/Projects/SlicerAstroUpdate/README.md b/PW33_2020_GranCanaria/Projects/SlicerAstroUpdate/README.md index 6bdd6532a..6c69888c2 100644 --- a/PW33_2020_GranCanaria/Projects/SlicerAstroUpdate/README.md +++ b/PW33_2020_GranCanaria/Projects/SlicerAstroUpdate/README.md @@ -35,13 +35,13 @@ Low priority: * [ ] (9) Fix compilation of wcslib for windows for having SlicerAstro binaries for windows too. * [ ] (10) Consider updating BBarolo from 1.4 to 1.5. -Last two points will be addressed later on (tickets: [107][slicerastroissue107] and [108][slicerastroissue108]). +Last two points will be addressed later on (tickets: [107][slicerastroissue107] and [108][slicerastroissue108]). **All the 13 SlicerAstro modules are updated and properly working with current Slicer master branch (23/01/2020)!** # Illustrations -[![](https://raw.githubusercontent.com/Punzo/SlicerAstroWikiImages/master/Screenshot-SlicerAstro-ProjectWeek2020.png)](http://www.youtube.com/watch?v=D-4G9lKVjaY "Wein069") -[![](https://raw.githubusercontent.com/Punzo/SlicerAstroWikiImages/master/Screenshot-PVSLICE.png)](http://www.youtube.com/watch?v=D-4G9lKVjaY "Wein069") +[![](https://raw.githubusercontent.com/Punzo/SlicerAstroWikiImages/master/Screenshot-SlicerAstro-ProjectWeek2020.png)](https://www.youtube.com/watch?v=D-4G9lKVjaY "Wein069") +[![](https://raw.githubusercontent.com/Punzo/SlicerAstroWikiImages/master/Screenshot-PVSLICE.png)](https://www.youtube.com/watch?v=D-4G9lKVjaY "Wein069") # Background and References SlicerAstro extends 3DSlicer to provide a 2D/3D interactive viewer for astronomical datasets with 3D interaction features, @@ -56,4 +56,3 @@ For more information refer to this [link](https://github.com/Punzo/SlicerAstro/w [slicerastroissue]: https://github.com/Punzo/SlicerAstro/issues/106 [slicerastroissue107]: https://github.com/Punzo/SlicerAstro/issues/107 [slicerastroissue108]: https://github.com/Punzo/SlicerAstro/issues/108 - diff --git a/PW33_2020_GranCanaria/Projects/SlicerOnAndroid/readme.md b/PW33_2020_GranCanaria/Projects/SlicerOnAndroid/README.md similarity index 80% rename from PW33_2020_GranCanaria/Projects/SlicerOnAndroid/readme.md rename to PW33_2020_GranCanaria/Projects/SlicerOnAndroid/README.md index fdfb30492..c163ca7ed 100644 --- a/PW33_2020_GranCanaria/Projects/SlicerOnAndroid/readme.md +++ b/PW33_2020_GranCanaria/Projects/SlicerOnAndroid/README.md @@ -6,37 +6,37 @@ Back to [Projects List](../../README.md#ProjectsList) - Attila Nagy (University of Szeged) - Steve Pieper (Isomics) -- # Project Description + Investigate the possibilities of portig Slicer to Android, to be used in a Desktop-like Android environment. ## Objective -1. Put together a software environment with all the needed tools for cross-compilation and document it. -1. Proof of concept work: compile an example Qt app for ARM64 and install and run it on phone. -1. Once there, try to actually compile. +1. Put together a software environment with all the needed tools for cross-compilation and document it. +2. Proof of concept work: compile an example Qt app for ARM64 and install and run it on phone. +3. Once there, try to actually compile. ## Approach and Plan 1. Lots of google-ing, trial and error... -1. Install a VM to host the build environment. -1. Install and configure all the needed tools, IDEs, etc -1. A POC compilation. +2. Install a VM to host the build environment. +3. Install and configure all the needed tools, IDEs, etc +4. A POC compilation. ## Progress and Next Steps 1. Did the VM install -1. Have all the tools configured properly -1. Compilaed a Qt app for ARM64, installed it, and it actually RUNS! :) +2. Have all the tools configured properly +3. Compilaed a Qt app for ARM64, installed it, and it actually RUNS! :) # Illustrations diff --git a/PW33_2020_GranCanaria/Projects/SlicerToUnity/README.md b/PW33_2020_GranCanaria/Projects/SlicerToUnity/README.md index 319cb1c5a..8261ffa72 100644 --- a/PW33_2020_GranCanaria/Projects/SlicerToUnity/README.md +++ b/PW33_2020_GranCanaria/Projects/SlicerToUnity/README.md @@ -19,7 +19,7 @@ Developing a real time viewer showing image data (e.g., segmentations, 3D models -1. Establish a streaming connection between unity and 3D slicer +1. Establish a streaming connection between unity and 3D slicer 2. Select Data that should be streamable (Meta data, dicom images, 3d models) 3. Integrate the streamed data into Unity application @@ -29,7 +29,7 @@ Developing a real time viewer showing image data (e.g., segmentations, 3D models 1. Get an overview about pre-existing solutions (Web server Http streaming, OpenITGLink) 2. Developing a slicer plug-in - * create new plugin + * create new plugin * integrate streaming technology * select data to be streamed * put data into streamable format @@ -45,7 +45,7 @@ Developing a real time viewer showing image data (e.g., segmentations, 3D models 1. Look at pre-existing solutions (Web server Http streaming, OpenITGLink) * OpenITGLink seems a bit too much for our goals - * http streaming looks promising (https://github.com/pieper/SlicerWeb) + * http streaming looks promising (https://github.com/pieper/SlicerWeb) * https://projectweek.na-mic.org/PW32_2019_London_Canada/Projects/KidneySegmentation/ * AR/VR solutions with slicer: https://www.slicer.org/wiki/Documentation/Labs/Augmented_Reality_and_Virtual_Reality_support#Current_approaches * https://github.com/Lyla-M/UnityOpenIGTLink diff --git a/PW33_2020_GranCanaria/Projects/UltrasoundNavigationTrainingSystemForBreastBiopsy/README.md b/PW33_2020_GranCanaria/Projects/UltrasoundNavigationTrainingSystemForBreastBiopsy/README.md index e12b841d3..8159655ca 100644 --- a/PW33_2020_GranCanaria/Projects/UltrasoundNavigationTrainingSystemForBreastBiopsy/README.md +++ b/PW33_2020_GranCanaria/Projects/UltrasoundNavigationTrainingSystemForBreastBiopsy/README.md @@ -36,7 +36,7 @@ We aiming at developing an affordable ultrasound navigation system for breast bi -1. A tailor-made coarse approach to breast phantom using agar-agar gelatine was created. Its costs is below 8 USD and it is usable at room temperature for more than one week, time enough to complete an introductory course in this procedure. We also tested some common materials, available at home or in grocery stores, to mimic mammary tumors +1. A tailor-made coarse approach to breast phantom using agar-agar gelatine was created. Its costs is below 8 USD and it is usable at room temperature for more than one week, time enough to complete an introductory course in this procedure. We also tested some common materials, available at home or in grocery stores, to mimic mammary tumors 1. The setup of the navigation system connecting both, ultrasound and tracking system to Slicer via PLUS Server has been completed. 1. Customization of the user interface developed for the previous work on vessel insertion was considered good enough and therefore no change was needed. @@ -48,15 +48,15 @@ We aiming at developing an affordable ultrasound navigation system for breast bi ![Phantom](breastPhantom.png) Custom breast phantom.
-
+
![Tracking](positionTracking.jpg) OptiTrack V120:Duo position tracking system.
-
+
![System](systemAndPhantoms.jpg) US Nav system. Test of position-tracking of tools and phantom ecogenicity.
-
+
![detail](interfaceDetail.png) Detail of the user interface. diff --git a/PW33_2020_GranCanaria/Projects/WorkflowForMaxillo/readme.md b/PW33_2020_GranCanaria/Projects/WorkflowForMaxillo/README.md similarity index 93% rename from PW33_2020_GranCanaria/Projects/WorkflowForMaxillo/readme.md rename to PW33_2020_GranCanaria/Projects/WorkflowForMaxillo/README.md index 554f2f653..96c6b417d 100644 --- a/PW33_2020_GranCanaria/Projects/WorkflowForMaxillo/readme.md +++ b/PW33_2020_GranCanaria/Projects/WorkflowForMaxillo/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Slicer on Android +# Adapt an already existing workflow in maxillofacial reconstructive surgery to Slicer ## Key Investigators @@ -17,7 +17,7 @@ Adapt an already existing workflow to Slicer. Currently it inolves MeshMixer and -1.Take a look at the current way of doing it and see what (likely not too big ) adjustments need to be done to be able to achieve the same results from within Slicer. +1.Take a look at the current way of doing it and see what (likely not too big ) adjustments need to be done to be able to achieve the same results from within Slicer. ## Approach and Plan diff --git a/PW33_2020_GranCanaria/Projects/dicomweb-server/README.md b/PW33_2020_GranCanaria/Projects/dicomweb-server/README.md index ed30a80da..499ba7ddd 100644 --- a/PW33_2020_GranCanaria/Projects/dicomweb-server/README.md +++ b/PW33_2020_GranCanaria/Projects/dicomweb-server/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Lightweight DICOMweb Server with CouchDB +# Lightweight DICOMweb Server with CouchDB ## Key Investigators @@ -23,7 +23,7 @@ This project aims to build on prototype [lightweight DICOM server that was devel * search by reference (e.g. find all segmentations of this image) * extract cohorts by tag (e.g. find all CTs with .5mm pixel spacing) * represent workflow state (e.g. give list of all studies that do not have manual segmentations) -* Work on improving performance +* Work on improving performance ## Approach and Plan diff --git a/PW33_2020_GranCanaria/Projects/dicomweb-server/test_ohifviewer.html b/PW33_2020_GranCanaria/Projects/dicomweb-server/test_ohifviewer.html index c79da8601..ba36c81a3 100644 --- a/PW33_2020_GranCanaria/Projects/dicomweb-server/test_ohifviewer.html +++ b/PW33_2020_GranCanaria/Projects/dicomweb-server/test_ohifviewer.html @@ -28,7 +28,7 @@ dicomWeb: [ { name: 'local dicomweb-server-js', - + wadoUriRoot: 'http://localhost:5985', qidoRoot: 'http://localhost:5985', wadoRoot: 'http://localhost:5985', @@ -49,4 +49,4 @@ ReactDOM.render(app, document.getElementById("root")); - \ No newline at end of file + diff --git a/PW33_2020_GranCanaria/Projects/ePadSlicer/README.md b/PW33_2020_GranCanaria/Projects/ePadSlicer/README.md index f0da819b9..161a0ae3a 100644 --- a/PW33_2020_GranCanaria/Projects/ePadSlicer/README.md +++ b/PW33_2020_GranCanaria/Projects/ePadSlicer/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# ePAD/Slicer Bridge and Segmentation Plugin +# ePAD/Slicer Bridge and Segmentation Plugin ## Key Investigators @@ -32,7 +32,7 @@ Open dockerized Slicer from ePad, open segmentation plugin and save the segmetat -1. Update [SlicerChronicle](https://github.com/pieper/SlicerChronicle) to python3 and Slicer 4.11 +1. Update [SlicerChronicle](https://github.com/pieper/SlicerChronicle) to python3 and Slicer 4.11 1. Generalize plan to work with locally running Slicer with Chronicle enabled (same code will work with cloud Slicer with or without docker container) 1. Update json job request file format 1. Test with local dicomweb-server diff --git a/PW33_2020_GranCanaria/Projects/modelhub-ai/README.md b/PW33_2020_GranCanaria/Projects/modelhub-ai/README.md index 7ae235ef4..86e09ae7a 100644 --- a/PW33_2020_GranCanaria/Projects/modelhub-ai/README.md +++ b/PW33_2020_GranCanaria/Projects/modelhub-ai/README.md @@ -52,4 +52,4 @@ project seems to be very well thought-through and documented, and recently got i # Illustrations Liver contours computed via the **cascaded-fcn-liver** model parsed and visualized in MeVisLab: -![MeVisLab viewer with CT slice & liver contour overlay](cascaded-fcn-liver_in_MeVisLab.png) \ No newline at end of file +![MeVisLab viewer with CT slice & liver contour overlay](cascaded-fcn-liver_in_MeVisLab.png) diff --git a/PW33_2020_GranCanaria/Projects/optimization/README.md b/PW33_2020_GranCanaria/Projects/optimization/README.md index 506dcc2ef..921453ceb 100644 --- a/PW33_2020_GranCanaria/Projects/optimization/README.md +++ b/PW33_2020_GranCanaria/Projects/optimization/README.md @@ -2,26 +2,26 @@ Back to [Projects List](../../README.md#ProjectsList) # Software for finding optimal weigths for extracting Spherical Harmonics components from a spherical distribution. -## Key Investigators -## Hans Knutsson, Carl-Fredrik Westin -Linkoping University, Sweden -Radiology, Brigham and Women’s Hospital, Boston, MA, US +## Key Investigators +## Hans Knutsson, Carl-Fredrik Westin +Linkoping University, Sweden +Radiology, Brigham and Women’s Hospital, Boston, MA, US Harvard Medical School, Boston, MA, US -## Corresponding author: +## Corresponding author: Hans Knutsson (hans.knutsson@liu.se ) -# Project Description +# Project Description The goal of this project is to provide a tool for finding optimal weigths for extracting Spherical Harmonics components from a spherical distribution. The set of coordinades on the sphere can be given as input or chosen from one of a number of precomputed sets. -One good example where this approach is useful is restoring rotation invariance of diffusion MRI estimators in the presence of missing or corrupted measurements +One good example where this approach is useful is restoring rotation invariance of diffusion MRI estimators in the presence of missing or corrupted measurements -## Objective +## Objective 1. Introduce a tool for analysis of signal distributions on a sphere to the diffusion MRI community. 2. Provide a tool for visualization of 3D point distributions. - 4. Basic thresholding --> -## Progress (pw 33) +## Progress (pw 33) -1. Made project page nicer. -2. Added link to hansknutsson GitHub. -3. Added explanations of paremeters used. -4. Impoved code comments. +1. Made project page nicer. +2. Added link to hansknutsson GitHub. +3. Added explanations of paremeters used. +4. Impoved code comments. 5. Added ISMRM reference. -## Illustrations +## Illustrations ![Opt_SPH_fig1](https://raw.githubusercontent.com/hansknutsson/HK_library_test/master/Complementary_material/Opt_SPH_fig1.jpg) -This figure shows the result of the weight optimization for the case of 120 uniformly distributed orientations with 12 missing measurements. The rows show results for spherical harmonic (SPH) degrees 0, 2 and 4. Colors indicate filter weight values, blue is most positive and red is most negative. The missing measurement locations are shown in white. +This figure shows the result of the weight optimization for the case of 120 uniformly distributed orientations with 12 missing measurements. The rows show results for spherical harmonic (SPH) degrees 0, 2 and 4. Colors indicate filter weight values, blue is most positive and red is most negative. The missing measurement locations are shown in white. ![SPHerrors_120_12missing](https://github.com/hansknutsson/HK_library_test/raw/master/Complementary_material/SPHerrors_120_12missing.png) The figure shows the estimated error distribution for the case of 120 uniformly distributed orientations with 12 missing measurements. The error is given as a function of the maximum SPH degree of the measured signal and the degree of the measurement filter. The left plot shows the result for a signal with equal energy for all SPH's up to the degree indicated on the x-axis. The right plot shows the result using the much more realistic case where the energy decreases for higher SPH degrees. The dashed lines show the errors using SPH function values as weights. The continuous lines show the result using the optimized weights. -## Background +## Background I developed the upploaded code as tools in my research towards finding optimal sets of waveforms for analysis of microstructural tissue features using diffusion weighted MRI (dMRI). The code can be used for optimization and visualisation of a number of aspects in dMRI. -## References +## References [ISMRM_poster_2019](https://github.com/hansknutsson/HK_library_test/blob/master/Complementary_material/ISMRM_poster_2019.pdf) ## Related resources can be found at [HK_library_test](https://github.com/hansknutsson/HK_library_test) diff --git a/PW33_2020_GranCanaria/Projects/syntheticCT_evaluation/README.md b/PW33_2020_GranCanaria/Projects/syntheticCT_evaluation/README.md index b510c6e2f..a48646b08 100644 --- a/PW33_2020_GranCanaria/Projects/syntheticCT_evaluation/README.md +++ b/PW33_2020_GranCanaria/Projects/syntheticCT_evaluation/README.md @@ -27,7 +27,7 @@ We want to create a Slicer module for stardazied conversion accuracy assessment. -1. Write an extension that will quantify conversion accuracy. The code will be written in python. +1. Write an extension that will quantify conversion accuracy. The code will be written in python. ## Progress and Next Steps @@ -35,7 +35,7 @@ We want to create a Slicer module for stardazied conversion accuracy assessment. 1. The extension "ImageCompare" was created. For the moment is contains just a module for synthetic CT evaluation. More modules could be add later. - + 2. The extension in already available in the nigthly build (thanks JC and Andras!) # Illustrations diff --git a/PW33_2020_GranCanaria/Projects/whitematter/README.md b/PW33_2020_GranCanaria/Projects/whitematter/README.md index 43350dd6a..46d1c67f1 100644 --- a/PW33_2020_GranCanaria/Projects/whitematter/README.md +++ b/PW33_2020_GranCanaria/Projects/whitematter/README.md @@ -17,7 +17,7 @@ ## Approach and Plan -1. The Levels of emotional awareness scale (LEAS) is used to assess social cognition. It has two parts: LEAS-self and LEAS-other, which are used to assess the ability to identify our own feeling and those of others, respectively. +1. The Levels of emotional awareness scale (LEAS) is used to assess social cognition. It has two parts: LEAS-self and LEAS-other, which are used to assess the ability to identify our own feeling and those of others, respectively. 1. Previously we have shown correlations between gray matter and the LEAS score. @@ -45,8 +45,8 @@ Potential white matter tracts of emotional awareness. 1. Malcolm JG, Shenton ME, Rathi Y. Filtered multitensor tractography. IEEE Trans Med Imaging. 2010 Sep;29(9):1664-75. PMID: 20805043. - + 1. Zhang F, Wu Y, Norton I, Rigolo L, Rathi Y, Makris N, O'Donnell LJ. An anatomically curated fiber clustering white matter atlas for consistent white matter tract parcellation across the lifespan. Neuroimage. 2018 Oct 1;179:429-447, PMID:29920375. - + 1. Martin Jáni, Zora Kikinis, et al., Neural Correlates of Emotional Awareness in Schizophrenia, Manuscript in preparation. diff --git a/PW33_2020_GranCanaria/README.md b/PW33_2020_GranCanaria/README.md index 594b2534b..2d31d5d6d 100644 --- a/PW33_2020_GranCanaria/README.md +++ b/PW33_2020_GranCanaria/README.md @@ -100,7 +100,7 @@ This is our third Project Week in Las Palmas. * [Improuving StereoPlan STIM Slicer module for SEEG Procedures](Projects/SEEGPlanning/README.md). Sara Fernandez Vidal, ICM) -* [Adapt an already existing workflow in maxillofacial reconstructive surgery to Slicer](Projects/WorkflowForMaxillo/readme.md) (Attila Nagy, András Lasso, Steve Pieper and whoever is interested) +* [Adapt an already existing workflow in maxillofacial reconstructive surgery to Slicer](Projects/WorkflowForMaxillo/README.md) (Attila Nagy, András Lasso, Steve Pieper and whoever is interested) * [Sarcopenia Quantification in Abdominal MR Images](Projects/SarcopeniaQuantification/README.md) (Hans Meine, Mike Halle, Ron Kikinis) @@ -131,7 +131,7 @@ Mohamed Septy, Ainina Ndiaye, Siham Alaoui, Mouhcine Madani, Otman Aghzout, Badi * [CTK DICOM database and browser improvements](Projects/ctkDICOMDatabase) (Marco Nolden, Andras Lasso, Steve Pieper, Jc) -* [Slicer on Android](Projects/SlicerOnAndroid/readme.md) (Attila Nagy, Steve Pieper and whoever is interested) +* [Slicer on Android](Projects/SlicerOnAndroid/README.md) (Attila Nagy, Steve Pieper and whoever is interested) * [Location Sensitive Hashing for web-scale image indexing](Projects/LSHindexing/README.md) (Sandy Wells, Steve Pieper) diff --git a/PW34_2020_Virtual/Breakouts/LiveDiscourse/README.md b/PW34_2020_Virtual/Breakouts/LiveDiscourse/README.md new file mode 100644 index 000000000..f0006a2af --- /dev/null +++ b/PW34_2020_Virtual/Breakouts/LiveDiscourse/README.md @@ -0,0 +1,103 @@ +# Live discourse + +NA-MIC Project Week 2020-12-16, 12-2pm EST + +## User questions/suggestions: + +* I could never fully understand those functions that can be edited in the Volume Rendering module (Advanced/Volume properties). Can you explain what they mean? +* Can Slicer be connected to a DICOMweb server to retrieve and store images there? + * See [SlicerDICOMwebBrowser](https://www.google.com/url?q=https://github.com/lassoan/SlicerDICOMwebBrowser&sa=D&ust=1608152866180000&usg=AOvVaw1wBAAMArS0JsnfJ9dpqhom) extension + * "Dropbox" for medical images. See [https://demo.kheops.online/](https://www.google.com/url?q=https://demo.kheops.online/&sa=D&ust=1608152866180000&usg=AOvVaw1FFfzhwsnKlrynrNWQfHhc)  + * Create Album -> create a link -> then use in Slicer dicom web client +* Are SampleData free for reuse ? + * Yes. except the Panoramix dataset that we are currently removing (see [PR Slicer#5311](https://www.google.com/url?q=https://github.com/Slicer/Slicer/pull/5311&sa=D&ust=1608152866181000&usg=AOvVaw2ugr46Icc7huRkWOSFJBKh)) + * If you need paperwork (e.g in context of IRB), getting data from [TCIA](https://www.google.com/url?q=https://www.cancerimagingarchive.net/collections/&sa=D&ust=1608152866181000&usg=AOvVaw1iEQDE9Bksz4jUfpqKBuTs) is recommended. +* What is the suggested "best practice" to learn slicer? + * A good starting point is to download the tutorials and pre-computed datasets on the [Slicer Training Compendium](https://www.google.com/url?q=https://www.slicer.org/wiki/Documentation/Nightly/Training&sa=D&ust=1608152866181000&usg=AOvVaw26T6zx2JlfCRYQngyhzsCe). (be sure to be aware of the version of Slicer you are using vs the version mentioned in the tutorial). + * Additional questions: + * How can I get help without feeling like I'm asking for too much? + * We are always happy to help either on the Slicer Discourse Forum or directly by email.   + * Try to pose your questions thoughtfully ([http://sscce.org](https://www.google.com/url?q=http://sscce.org&sa=D&ust=1608152866182000&usg=AOvVaw2X0yT_eoIk3OccuwxT-Qtm)/) + * A good practice is to check if your Slicer question has already been answered on the Slicer Forum before posting it. + * What about adding a category in the discourse for "Beginner/New User Questions?" + * Is there a way to get help if I don't speak English well? + * Many Slicer team members are non-native English speakers, just mention what your native language is and we’ll try to find someone who could help out + * There are some [Chinese resources](https://www.google.com/url?q=https://spujol.github.io/SlicerTutorialsInChinese/&sa=D&ust=1608152866182000&usg=AOvVaw09IFKcwb2Dn_B5lvluSjNg) for example + * (Sonia Pujol spujol@bwh.harvard.edu) I am always happy to help with Slicer questions in French + * (Andrey, @fedorov in Discourse) I am a native Russian speaker, and fluent in Ukrainian - happy to help +* Volume rendering seems to delineate structures quite nicely. Can I use it for quantification? + * Short answer: No. You would need to perform a segmentation or place markups fiducials on the visible surface to measure. + * Longer answer: placing fiducials on volume rendering is possible, also segmentation based on opacity could possibly be done. Fiducials are placed at 50% opacity. + * Follow up questions: + * Which picker is used ? + Software picker is used to place markups. In the future, we could re-evaluate to make picking faster. + * Can we adjust the 50% threshold ? + Adjusting the transfer function may be an alternative approach. + * Comments / suggestions: + * Add presets for CBCT + * MicroCT does volume render but it’s tricky to adjust depending if 8-bit or 16-bit +* Documentation is quite fragmented ([ReadTheDocs](https://www.google.com/url?q=https://slicer.readthedocs.io/en/latest/&sa=D&ust=1608152866183000&usg=AOvVaw1qaL9prP4aEkFKp6l-XXV0), [wiki](https://www.google.com/url?q=https://www.slicer.org/wiki/Documentation/Nightly&sa=D&ust=1608152866183000&usg=AOvVaw0viYLr3r0tTLETqJzzR7pt)). Can I do something to help? + * (Jc) We currently focus on moving documentation to readthedocs and updating the wiki to add links “redirecting” to the readthedocs. We would greatly benefit from help to: + (a) move documentation of module to readthedocs, + (b) review the description of existing modules and update them. + (c) mark deprecated content on the wiki as “historical”. This is done by using the [historical](https://www.google.com/url?q=https://www.slicer.org/wiki/Template:Historical&sa=D&ust=1608152866184000&usg=AOvVaw1-MGgsjSNbcYn4-EZNrKKv)template. For an example of use, see [this page](https://www.google.com/url?q=https://www.slicer.org/wiki/Slicer:git-svn&sa=D&ust=1608152866184000&usg=AOvVaw1IOdB6qBUnLBDbKoHbYcoM), + (d) improve the extension wizard to to include template for documentation (we would need to discuss this during the weekly hangout) + * Comments / Suggestions: + * Ron: Add last modified timestamp to documentation page + * Jc: Looking into enabling [https://pypi.org/project/sphinx-gitstamp/](https://www.google.com/url?q=https://pypi.org/project/sphinx-gitstamp/&sa=D&ust=1608152866184000&usg=AOvVaw3-7jkad2dwZtWvgTkg6FdT)  + * Andras: Discourse tag “[feature-requests](https://www.google.com/url?q=https://discourse.slicer.org/c/support/feature-requests/9&sa=D&ust=1608152866185000&usg=AOvVaw3e-VRli8zjjlwOH8jcFLmg)” can be “followed” to be notified +* Slicer’s Segmentation Editor is very powerful.  Please review where to find the best tutorials and materials for learning it to segment data in the wild. + * [https://lassoan.github.io/SlicerSegmentationRecipes/](https://www.google.com/url?q=https://lassoan.github.io/SlicerSegmentationRecipes/&sa=D&ust=1608152866185000&usg=AOvVaw1S7uBrYJ_gxlTN4nPU-xkA) + * Also many tutorials in Slicer’s tutorial documentation + * How to store tutorials: github pages, google slides, powerpoint +* Why estimation of MeanDiffusivity is not possible? + * should be available in SlicerDMRI[ (see this paper](https://www.google.com/url?q=https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5679308/&sa=D&ust=1608152866186000&usg=AOvVaw0KhmgS5oL5KEu69IBgYcDJ) for features) +* With the addition of more model editing tools, are there any plans to add a Sketchfab exporter? + * With the [OpenAnatomy extension](https://www.google.com/url?q=https://github.com/PerkLab/SlicerOpenAnatomy&sa=D&ust=1608152866186000&usg=AOvVaw2G9tYpcT4y8VzlCyS1275d) you can export glTF which should be easy to put in Sketchfab + * MorphoSource does not support interactive visualization + * Sketchfab can show models without making it easy to download, and download link is provided to MorphoSource (where access control and downloads are counted) + * From Stephen: Video explaining how to load data using vtk based javascript viewer: [https://vimeo.com/413597904](https://www.google.com/url?q=https://vimeo.com/413597904&sa=D&ust=1608152866186000&usg=AOvVaw2fD6Jj-5_ot6fAikgOseJy)  +* Is there a recommended way of removing noise from CT scans ? especially metal artifacts from dental work and titanium plates without affecting the quality very much ?  It was recommended to do anisotropic diffusion filter but i get the error “Pixel type: 16-bit signed integer is not supported in 3D by N3itk6simple40CurvatureAnisotropicDiffusionImageFilterE.” + * You can use the Cast Scalar Volume modul to create a different data type.  Probably Float is the best. +* Is there a way to do non-binary segmentations, where you could have a label and a probability associated with each volume sample? Or multiple labels and multiple probabilities associated with each volume sample? + * There is a representation called Fractional labelmap just for this, but it’s not used by the majority of the segmentation methods + * Thanks -- can it be displayed as a “LabelMap Volume” in the Slicer application? + * LabelMap Volume is strictly binary. In modern Slicer segmentations should be Segmentation nodes. The infrastructure of Fractional labelmaps and its visualization is in place, but it seems (in the latest nightly) that it’s not enabled + * OK, could I render it as one of the two volume images or do I have to convert it to a grey-scale volume first? + * I suggest reading [this page](https://www.google.com/url?q=https://slicer.readthedocs.io/en/latest/user_guide/image_segmentation.html&sa=D&ust=1608152866187000&usg=AOvVaw13NVCNZ_TinUiWaTud7nwp) to understand difference btw volume and segmentation + * In almost all cases supersampling the labelmap in comparison to the source data is better use of memory than a fractional volume would be.  E.g. 8 bits per pixel could be fractional but could also be a 2x2x2 supersample. +* Probably a small thing, but in Mimics I’m used to having a circle, square and lasso when editing segmentations, would it be difficult to add the option to edit with a square that is 1 voxel and scalable? Sometimes 1 voxel can cause a spike or hole, but you don’t need to smooth a large area. + * We can add this if there is a compelling use case. +* I saw the feature that allows you to adjust contours with control points that you showed in your SlicerHeart demo - where does that live? + * Markups module: closed/open curves +* Can triangle models/surface models be edited on the fly? If yes, could you point us to an example where this is done? + * Segment Editor: for closed surfaces only, uses binary labelmap representation + * Dynamic modeler: for any models, but has limited editing features + +## Developer questions/suggestions + +* How 3D Slicer development is funded? How can I become a full-time 3D Slicer developer? + * The typical scenario these days is that Slicer support comes as a by-product of other funded efforts that rely on it.  For example, several commercial and academic projects that employ the core slicer developers and agree that it's in their best interest to have a robust platform to work from. + * [Commercial partners](https://www.google.com/url?q=https://www.slicer.org/wiki/CommercialUse&sa=D&ust=1608152866188000&usg=AOvVaw3XnLkseHXVCKjr2nh_04q3)are hiring (see also [Kitware](https://www.google.com/url?q=https://jobs.lever.co/kitware?location%3DCarrboro%252C%2520North%2520Carolina&sa=D&ust=1608152866188000&usg=AOvVaw1ocz2l_o5-b8_br4tvr645)) + * What about creating a Slicer Developer job board? + * We usually post these in the “Jobs” category on discourse: [https://discourse.slicer.org/c/announcements/events/27](https://www.google.com/url?q=https://discourse.slicer.org/c/announcements/events/27&sa=D&ust=1608152866189000&usg=AOvVaw2vhsUGOEcUt188Qar3oDME)  +* How do I figure out how to automate something that I know how to do in the GUI? + * The [Python script repo](https://www.google.com/url?q=https://www.slicer.org/wiki/Documentation/Nightly/ScriptRepository&sa=D&ust=1608152866189000&usg=AOvVaw3ZmLPg1utOtnVL2SB1h_s6) is the best starting point for automating Slicer using Python +* How can I make auto-complete work in my Python IDE (Visual Studio Code, PyCharm)? + * Setting up PythonSlicer as Python interpreter can help + * ITK Python wrapping: interface file pyi, works for PyCharm + * See [https://www.python.org/dev/peps/pep-0484/#stub-files](https://www.google.com/url?q=https://www.python.org/dev/peps/pep-0484/%23stub-files&sa=D&ust=1608152866190000&usg=AOvVaw28FSoHWgcI-SELwqGQ_LAT)  +* Where can I upload sample data for my own extension? + * (Large data sets can be store in Github repositories as release assets - for example [https://github.com/lassoan/SlicerOrthodonticAnalysis/releases/tag/TestingData](https://www.google.com/url?q=https://github.com/lassoan/SlicerOrthodonticAnalysis/releases/tag/TestingData&sa=D&ust=1608152866190000&usg=AOvVaw0mZGiuL-OLOoQxVrTFUsaO)) + * Scripts for Automatic content-based addressing: [https://github.com/Slicer/SlicerTestingData/releases](https://www.google.com/url?q=https://github.com/Slicer/SlicerTestingData/releases&sa=D&ust=1608152866190000&usg=AOvVaw3qq5s-BBsABhHGhtpM9roY) +* How to import and use python modules inside Slicer python or extension modules?(what about other modules?) + * slicer.util.pip_install() in the python interactor or in python code to pip install packages + * For custom applications: Python packages can be added as additional dependencies +* How does the continuous integration for extensions work? If I start a new extension, can I use the CI from the beginning? + * In order for the extension to be built by the CI, it needs to be submitted to the [ExtensionIndex](https://www.google.com/url?q=https://github.com/Slicer/ExtensionsIndex&sa=D&ust=1608152866191000&usg=AOvVaw0ZlrDq71HMxPkFbyIuav2S) + * We generally require that extensions be basically functional before approving a submission to the index, +* My 2 cents: maybe it could be useful to highlight/remind participants that some Slicer algorithms can be run via command line, without GUI. Maybe someone is interested in adding some slicer stuff to his/her processing script that runs automatically without user interaction. +* What's the best way to share a module to others without going through extension manager? (for under development modules to share with people to test functionality) + * extension can be installed from file + * manually downloaded, non-packaged modules can be added to Slicer by using Application settings / Modules / Additional module paths + * for functions that do not depend on Slicer, creating a regular python package can be an option diff --git a/PW34_2020_Virtual/Breakouts/Segmentation/README.md b/PW34_2020_Virtual/Breakouts/Segmentation/README.md new file mode 100644 index 000000000..984d541a8 --- /dev/null +++ b/PW34_2020_Virtual/Breakouts/Segmentation/README.md @@ -0,0 +1,5 @@ +# Live discourse + +NA-MIC Project Week 2020-12-17, 2:00-3:30pm EST + +Andras Lasso will lead breakout session on image segmentation in 3D Slicer. Bring your own questions/problems or listen to what other challenges others have and ways to solve them. diff --git a/PW34_2020_Virtual/Breakouts/SegmentationStorageFormat/README.md b/PW34_2020_Virtual/Breakouts/SegmentationStorageFormat/README.md new file mode 100644 index 000000000..cde563f70 --- /dev/null +++ b/PW34_2020_Virtual/Breakouts/SegmentationStorageFormat/README.md @@ -0,0 +1,158 @@ +# Segmentation and annotation storage format + +NA-MIC Project Week 2020-12-17, 11am-12pm EST + +In this breakout session we try to come up with a consensus on how to best store image segmentation and annotations that is convenient for many workflows and software tools. + +## Attendees + +1. Andras Lasso (Queen's University) +1. Steve Pieper (Isomics) +1. Matt McCormick (Kitware) +1. Hans Johnson (University of Iowa) +1. Mike Halle (BWH) +1. Alexis Girault (Kitware) +1. Ron Kikinis (BWH) +1. Theodore Aptekarev +1. Mehran Azimbagirad +1. Sam Horvath (Kitware) +1. add your name here + +## Current State + +### Commonly used formats for image segmentation + +- 3D image + colormap file: nrrd/nift and separate file in custom file format for storing segment names and color (3D Slicer, ITK-Snap, ...) +- 3D Slicer seg.nrrd file: standard nrrd with custom fields ([specification](https://github.com/Slicer/Slicer/blob/master/Libs/MRML/Core/vtkMRMLSegmentationStorageNode.h#L68-L102), [example](https://www.slicer.org/wiki/Documentation/Nightly/ScriptRepository#Get_information_from_segmentation_nrrd_file_header)) +- DICOM segmentation object +- DICOM SR (see this [white paper by David Clunie](https://docs.google.com/document/d/1bR6m7foTCzofoZKeIRN5YreBrkjgMcBfNA7r9wXEGR4/edit#heading=h.fgs65rsvrdy3)) +- Legacy DICOM RT parallel contours, and other weird things (overlay, …) +- Label JSON compatible with or using the [OME-NGFF "image-label" metadata](https://ngff.openmicroscopy.org/latest/#label-md) + - two files (json sidecar) + - python, c++, javascript, rust, java + - [zarr](https://zarr.readthedocs.io/en/stable/), n5 (not-hdf5) + - relies on “folder” structure (similar to BIDS) + - when working with local filesystem, this facilitates parallel write + - easy mapping to web hosting to request partial data components + - also support for zipping into single file for transport, reading on local filesystems + +### Commonly used formats for annotation (markups - points, lines, curves, ...) + +- Slicer fcsv: good for problem-specific export, not well suited as archival format +- Slicer markups json (https://github.com/Slicer/Slicer/blob/master/Modules/Loadable/Markups/Resources/Schema/markups-schema-v1.0.0.json) +- DICOM SR: limited and complicated, but standard +- AIM: did not really take off, a new json format is being created +- MetaIO: + - frame of references were nicely defined + - [Confusions between coordinate system and transform](https://discourse.slicer.org/t/bug-when-reading-mha-file-with-anatomicalorientation/7038) + +## Requirements + +- Scope: + - Use shared data model, can be used in different file format + - Interchange format (not high-performance) +- Easy reading/writing in Python, C++, JavaScript +- Compatibility with 3D Slicer, ITK, DICOM, ... +- Web friendly (JSON) +- a python module for Slicer I/O +- non iteratively running segment editor: It makes sense to use the module interactively for workflow/parameter exploration, but once a methodology to process a certain dataset is clear, I would love to be able to run through command line in batch. Right now I use the segment editor interactively and then move to Jupyter and/or C++ CLIs that use analogous functionality, but that I can batch-ify. It's really annoying. +- separate style, semantic, representation +- Allow specifying hierarchies +- provenance (optional reference to source images, operator, date/time) +- Frame of reference UID +- Compression +- Tiling + +## Suggested solutions + +- NRRD with single json object in a custom field +- Label JSON compatible with or using the [OME-NGFF "image-label" metadata](https://ngff.openmicroscopy.org/latest/#label-md) +- NRRD replacement modeled after glTF +- Use glTF and create a standard extension, see [TRAKO](https://github.com/bostongfx/trako) as an example. Why? + - NRRD taught us many things, but in 2020 we don’t need ad-hoc parsing. We have JSON. + - Even if you have a robust parser for NRRD, extra key values need to be parsed manually. + - NRRD was designed to be w2not just human readable, but human writable. It permits all sorts of duplicate fields and values (signed long long int vs int64_t. Really? Keys with optional spaces in them? Really? “centers” and “centerings” as synonyms? Really?). + - It most likely isn’t UTF-8 compliant, so it can’t be internationalized. That’s important for segmentation labels from other languages. + - It has corner cases in parsing. For example, if a field value has a space in front of it, it will get eaten. + - It has extensibility, but it is limited. + - NRRD is a little tricky to parse efficiently in JavaScript because there’s no guarantee that the image data is aligned to the data type. For instance, floats might not be on a four byte boundary. This may lead to unnecessary copying of potentially large datasets. + - Support for only one buffer. glTF has multiple. This isn’t a big deal, but it’s nice to be able to have multiple datasets together. + +## Appendix + +### Seg.nrrd header example: + +
+NRRD0004
+# Complete NRRD file format specification at:
+# http://teem.sourceforge.net/nrrd/format.html
+type: unsigned char
+dimension: 4
+space: left-posterior-superior
+sizes: 2 256 256 112
+space directions: none (0.93750000000000022,0,0) (0,0.93750000000000022,0) (0,0,1.4000000000000001)
+kinds: list domain domain domain
+encoding: gzip
+space origin: (-119.53100000000005,-119.53099999999999,-77.700000000000003)
+Segment0_Color:=0.694118 0.478431 0.396078
+Segment0_ColorAutoGenerated:=1
+Segment0_Extent:=40 210 3 235 0 111
+Segment0_ID:=Segment_1
+Segment0_LabelValue:=1
+Segment0_Layer:=0
+Segment0_Name:=Head
+Segment0_NameAutoGenerated:=0
+Segment0_Tags:=Segmentation.Status:notstarted|TerminologyEntry:Segmentation category and type - 3D Slicer General Anatomy list~SCT^123037004^Anatomical Structure~SCT^69536005^Head~^^~Anatomic codes - DICOM master list~^^~^^|
+Segment1_Color:=0.564706 0.933333 0.564706
+Segment1_ColorAutoGenerated:=1
+Segment1_Extent:=113 148 80 115 64 89
+Segment1_ID:=Segment_2
+Segment1_LabelValue:=1
+Segment1_Layer:=1
+Segment1_Name:=Mass
+Segment1_NameAutoGenerated:=0
+Segment1_Tags:=Segmentation.Status:inprogress|TerminologyEntry:Segmentation category and type - 3D Slicer General Anatomy list~SCT^49755003^Morphologically Altered Structure~SCT^4147007^Mass~^^~Anatomic codes - DICOM master list~^^~^^|
+Segmentation_ContainedRepresentationNames:=Binary labelmap|Closed surface|
+Segmentation_ConversionParameters:=Collapse labelmaps|1|Merge the labelmaps into as few shared labelmaps as possible 1 = created labelmaps will be shared if possible without overwriting each other.&Compute surface normals|1|Compute surface normals. 1 (default) = surface normals are computed. 0 = surface normals are not computed (slightly faster but produces less smooth surface display).&Crop to reference image geometry|0|Crop the model to the extent of reference geometry. 0 (default) = created labelmap will contain the entire model. 1 = created labelmap extent will be within reference image extent.&Decimation factor|0.0|Desired reduction in the total number of polygons. Range: 0.0 (no decimation) to 1.0 (as much simplification as possible). Value of 0.8 typically reduces data set size by 80% without losing too much details.&Fractional labelmap oversampling factor|1|Determines the oversampling of the reference image geometry. All segments are oversampled with the same value (value of 1 means no oversampling).&Joint smoothing|0|Perform joint smoothing.&Oversampling factor|1|Determines the oversampling of the reference image geometry. If it's a number, then all segments are oversampled with the same value (value of 1 means no oversampling). If it has the value "A", then automatic oversampling is calculated.&Reference image geometry|-0.9375000000000001;0;0;119.53100000000003;0;-0.9375000000000001;0;119.53099999999999;0;0;1.4000000000000001;-77.7;0;0;0;1;0;255;0;255;0;111;|Image geometry description string determining the geometry of the labelmap that is created in course of conversion. Can be copied from a volume, using the button.&Smoothing factor|0.2|Smoothing factor. Range: 0.0 (no smoothing) to 1.0 (strong smoothing).&Threshold fraction|0.5|Determines the threshold that the closed surface is created at as a fractional value between 0 and 1.&
+Segmentation_MasterRepresentation:=Binary labelmap
+Segmentation_ReferenceImageExtentOffset:=0 0 0
+
+ +### NGFF image-label example: + +
+"image-label":
+  {
+    "version": "0.1",
+    "colors": [
+      {
+        "label-value": 1,
+        "rgba": [255, 255, 255, 0]
+      },
+      {
+        "label-value": 4,
+        "rgba": [0, 255, 255, 128]
+      },
+      ...
+      ],
+    "properties": [
+      {
+        "label-value": 1,
+        "area (pixels)": 1200,
+        "class": "foo"
+
+      },
+      {
+        "label-value": 4,
+        "area (pixels)": 1650
+      },
+      ...
+      ]
+  },
+  "source": {
+    "image": "../../"
+  }
+]
+
+ +Store schema in top-level folder diff --git a/PW34_2020_Virtual/Projects/CollaborativeVR/README.md b/PW34_2020_Virtual/Projects/CollaborativeVR/README.md new file mode 100644 index 000000000..bbf7d6dc5 --- /dev/null +++ b/PW34_2020_Virtual/Projects/CollaborativeVR/README.md @@ -0,0 +1,60 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Collaborative VR breakout session + +## Key Investigators + +- Csaba Pinter (Ebatinca / Pixel Medical) +- Simon Drouin (ETS Montreal) + +# Project Description + + +Virtual Reality (VR) has a great potential to facilitate communication between clinicians. For example, VR can be used to collaboratively plan a surgical case by manipulating 3D models derived from preoperative scans. Slicer already includes most of the components required to build prototypes for this kind of applications: +- The Virtual Reality module can display everything from Slicer's 3D in VR +- The VolumeRendering and PRISM Rendering modules enable advanced-programmable volume rendering +- The OpenIGTLinkIF module and OpenIGTLink protocol enables communication of medical and tracking data between different devices running Slicer +The Goal of this discussion is to coordinate future development of the above module to enable more natural collaborative interaction in VR. + +## Objective + +1. Review the current functionality of SlicerVR and PRISM Rendering +1. Discuss planned near future developments in those modules +1. Lay out a vision for the future of interactive VR in Slicer +1. Establish a protocol for Collaborative VR +1. Points for discussion: + 1. Future possibilities for interacting in VR: hand tracking, tool tracking + 1. Need to support different types of tracking in collaborative VR protocols + 1. Should SlicerVR have a fixed set of functionality and interaction paradigm or let users and/or developers choose. + +## Progress and Next Steps + +1. Potential application of SlicerVR: Learn dental anatomy (Sébastien Erckelbout) +1. Short term changes: + 1. Implement spatial references (e.g. a Floor) + 1. Improve object selection +1. Interaction + 1. Is it worth sharing a code base with Slicer Looking glass? + 1. For developers: need for more control over interaction: disable existing interaction and change behavior + 1. Long term: support for more complex VR controllers and hand tracking + 1. Have interaction settings for different scenarios (inside-out vs outside-in visualization) +1. Collaboration + 1. Currently, collaboration is setup using OpenIGTLinkIF and carefully setting up scenes on both ends. Transforms have to be setup for controllers and HMD + 1. We need to facilitate (automate) connection and scene setup for collaboration + 1. It might be necessary to use a server technology to hold common state (Mike Halle suggested [FireBase](https://firebase.google.com/) previously used) + 1. A matchmaking and communication solution to explore: [Photon](https://doc.photonengine.com/en-us/realtime/current/getting-started/realtime-intro) + +# Illustrations + + + +# Background and References + +[SlicerVR](https://github.com/KitwareMedical/SlicerVirtualReality) + +[PRISMRendering](https://github.com/ETS-vis-interactive/SlicerPRISMRendering) + +[PRISMRendering doc](https://githubcomets-vis-interactiveslicerprismrendering.readthedocs.io/en/latest/) diff --git a/PW34_2020_Virtual/Projects/CollaborativeVR/Readme.md b/PW34_2020_Virtual/Projects/CollaborativeVR/Readme.md deleted file mode 100644 index 9965e0166..000000000 --- a/PW34_2020_Virtual/Projects/CollaborativeVR/Readme.md +++ /dev/null @@ -1,44 +0,0 @@ -Back to [Projects List](../../README.md#ProjectsList) - -# Collaborative VR breakout session - -## Key Investigators - -- Csaba Pinter -- Simon Drouin (ETS Montreal) - -# Project Description - - -Virtual Reality (VR) has a great potential to facilitate communication between clinicians. For example, VR can be used to collaboratively plan a surgical case by manipulating 3D models derived from preoperative scans. Slicer already includes most of the components required to build prototypes for this kind of applications: -- The Virtual Reality module can display everything from Slicer's 3D in VR -- The VolumeRendering and PRISM Rendering modules enable advanced-programmable volume rendering -- The OpenIGTLinkIF module and OpenIGTLink protocol enables communication of medical and tracking data between different devices running Slicer -The Goal of this discussion is to coordinate future development of the above module to enable more natural collaborative interaction in VR. - -## Objective - - - -1. Review the current functionality of SlicerVR and PRISM Rendering -1. Discuss planned near future developments in those modules -1. Lay out a vision for the future of interactive VR in Slicer. - -## Progress and Next Steps - - - -1. Describe specific steps you **have actually done**. -1. ... -1. ... - -# Illustrations - - - -# Background and References - - diff --git a/PW34_2020_Virtual/Projects/ImagingDataCommons/README.md b/PW34_2020_Virtual/Projects/ImagingDataCommons/README.md index 5f94917c3..0b5cb16a8 100644 --- a/PW34_2020_Virtual/Projects/ImagingDataCommons/README.md +++ b/PW34_2020_Virtual/Projects/ImagingDataCommons/README.md @@ -14,6 +14,8 @@ Back to [Projects List](../../README.md#ProjectsList) [National Cancer Institute (NCI) Imaging Data Commons (IDC)](https://portal.imaging.datacommons.cancer.gov) is a cloud-based resource within [NCI Cancer Research Data Commons (CRDC)](https://datacommons.cancer.gov/) that connects researchers with cancer imaging datasets, resources for exploring those datasets and identifying relevant cohorts, and other components of CRDC that will host additional data types and support computation on the defined cohorts. +[Project inroductory slides](https://docs.google.com/presentation/d/1ZZI4R-D9og4eFBydERHSPLkX5WGcaJX7j5gjj_l_Q0I/edit?usp=sharing) + ## Objective @@ -27,7 +29,7 @@ Back to [Projects List](../../README.md#ProjectsList) 1. Present a brief demo of IDC. 2. Summarize main pointers related to IDC. -3. Be available for one-on-one discussions with the interested project week. +3. Be available for one-on-one discussions with the interested participants during project week. 4. Summarize collected feedback, if any. ## Progress and Next Steps @@ -53,6 +55,6 @@ Back to [Projects List](../../README.md#ProjectsList) * GitHub organization: [https://github.com/ImagingDataCommons](https://github.com/ImagingDataCommons) * Twitter: [@CancerIDC](https://twitter.com/CancerIDC) * [IDC YouTube channel](https://www.youtube.com/channel/UCQxuVp3_3aTJZBA4zZLMQtQ) -* [NCI Data Commons](https://datacommons.cancer.gov/)= +* [NCI Data Commons](https://datacommons.cancer.gov/) diff --git a/PW34_2020_Virtual/Projects/Landmark Curves for Brain Images/README.md b/PW34_2020_Virtual/Projects/Landmark Curves for Brain Images/README.md new file mode 100644 index 000000000..238c311e4 --- /dev/null +++ b/PW34_2020_Virtual/Projects/Landmark Curves for Brain Images/README.md @@ -0,0 +1,24 @@ + +# Landmark Curves for Brain Images + +## Key Investigators + +- Jarrett Rushmore (Boston University, Brigham and Women’s Hospital, MGH) +- Nikos Makris (Boston University, Brigham and Women’s Hospital, MGH) +- Ed Yeterian (Colby College) +- Kyle Sunderland (Queen’s University) +- Andraas Lasso (Queen’s University) +- Sylvain Bouix (Brigham and Women’s Hospital) + +# Project Description + +The overaching goal of this project is to facilitate the assignment of cerebral cortical areas borders in MRI images by developing a tool and technique for drawing on the surface representation of the human cerebral cortex. Since delineation of different brain areas are often performed by using the depths of cortical sulci as borders (i.e., anatomical landmarks), a semiautomated means to reliably create these borders on different views of the brain would enable fast, reliable and easy parcellation of brain areas. +## Objective + +The overall objective today is to demonstrate a tool and technique to produce curves on a surface representation of the cerebral cortex. + + +## References + +Link to Neurosegmental Parcellation Module +https://github.com/PerkLab/NeuroSegmentation diff --git a/PW34_2020_Virtual/Projects/MONAI/MONAI-0.4-ChangeLog.png b/PW34_2020_Virtual/Projects/MONAI/MONAI-0.4-ChangeLog.png new file mode 100644 index 000000000..1f0e83cad Binary files /dev/null and b/PW34_2020_Virtual/Projects/MONAI/MONAI-0.4-ChangeLog.png differ diff --git a/PW34_2020_Virtual/Projects/MONAI/MONAI-end_to_end.png b/PW34_2020_Virtual/Projects/MONAI/MONAI-end_to_end.png new file mode 100644 index 000000000..e837f64a9 Binary files /dev/null and b/PW34_2020_Virtual/Projects/MONAI/MONAI-end_to_end.png differ diff --git a/PW34_2020_Virtual/Projects/MONAI/MONAI-logo.png b/PW34_2020_Virtual/Projects/MONAI/MONAI-logo.png new file mode 100644 index 000000000..3d341d3c6 Binary files /dev/null and b/PW34_2020_Virtual/Projects/MONAI/MONAI-logo.png differ diff --git a/PW34_2020_Virtual/Projects/MONAI/README.md b/PW34_2020_Virtual/Projects/MONAI/README.md new file mode 100644 index 000000000..ead473305 --- /dev/null +++ b/PW34_2020_Virtual/Projects/MONAI/README.md @@ -0,0 +1,64 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# MONAI + +![](MONAI-logo.png) + +## Key Investigators + +- Stephen Aylward (Kitware) +- Matt McCormick (Kitware) +- Hans Johnson (The University of Iowa) + +# Project Description + +MONAI is a [PyTorch](https://pytorch.org/)-based, [open-source](https://github.com/Project-MONAI/MONAI/blob/master/LICENSE) framework for deep learning in healthcare imaging, part of [PyTorch Ecosystem](https://pytorch.org/ecosystem/). +Its ambitions are: +- developing a community of academic, industrial and clinical researchers collaborating on a common foundation; +- creating state-of-the-art, end-to-end training workflows for healthcare imaging; +- providing researchers with the optimized and standardized way to create and evaluate deep learning models. + + + +## Objective + + + +1. Introduce Monai +1. Datasets and DataLoaders for participating in Challenges and using pubic data collections +1. Transforms for data pre-processing and augmentation +1. Participating in a deep learning challenge in 10 lines of python +1. Integration into clinical workflows: MONAI + Nvidia CLARA +1. Ongoing efforts: Model Zoo + +## Approach and Plan + +1. Present MONAI +1. Advertise resources for support and training (including resources for hackathons / datathons) + +## Progress and Next Steps + + + +1. [YouTube: 5-minute presentation on Monday](https://youtu.be/tBrMVTlzb8s) +1. 1 hours presentation on Wednesday + +# Illustrations + +1. ![](MONAI-end_to_end.png) + +# Background and References + +* Learn + * Getting Started (Installation, Examples, Demos, etc.) https://monai.io/start.html + +* Contribute + * GitHub: https://github.com/Project-MONAI/MONAI + * Community Guide: https://github.com/Project-MONAI/MONAI#community + * Contributing Guide: https://github.com/Project-MONAI/MONAI#contributing + * Issue Tracker: “Good First Issue” tag: https://github.com/Project-MONAI/MONAI/labels/good%20first%20issue + +* Support + * PyTorch Forums. Tag @monai or see the MONAI user page. https://discuss.pytorch.org/u/MONAI/ + * Stack Overflow. See existing tagged questions or create your own: https://stackoverflow.com/questions/tagged/monai + * Join our Slack Channel. Fill out the Google Form here: https://forms.gle/QTxJq3hFictp31UM9 diff --git a/PW34_2020_Virtual/Projects/Napari/README.md b/PW34_2020_Virtual/Projects/Napari/README.md new file mode 100644 index 000000000..e7700ed07 --- /dev/null +++ b/PW34_2020_Virtual/Projects/Napari/README.md @@ -0,0 +1,52 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Napari (CZI) Collaboration Potential + +## Key Investigators + +- Nicholas Sofroniew (CZI) +- Jean-Christophe Fillion-Robin (Kitware) + +# Project Description + +[napari](https://napari.org/) is a fast, interactive, multi-dimensional image viewer for Python. It's designed for browsing, annotating, and analyzing large multi-dimensional images. + + + +## Objective + + + +1. Introduce the use cases addressed by napari +1. Identify and discuss areas for potential napari/ Slicer collaboration, including + - Using napari io plugins to read data into slicer, see [napari plugin docs](https://napari.org/docs/dev/plugins/index.html) + - Using Slicer [SimpleFilters](https://www.slicer.org/wiki/Documentation/4.10/Modules/SimpleFilters) on napari layers, and possibly generating gui elements for them using [magicgui](https://magicgui.readthedocs.io/en/latest/examples/napari_parameter_sweep/) + +## Approach and Plan + + + +1. Present napari +1. Be available for one-on-one discussions with the interested participants during project week. +1. Summarize collected feedback, if any. + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + +* Website: https://napari.org/ +* Documentation: https://napari.org/docs/dev/ +* GitHub: https://github.com/napari/napari diff --git a/PW34_2020_Virtual/Projects/OpenAnatomy/README.md b/PW34_2020_Virtual/Projects/OpenAnatomy/README.md new file mode 100644 index 000000000..0ac996922 --- /dev/null +++ b/PW34_2020_Virtual/Projects/OpenAnatomy/README.md @@ -0,0 +1,42 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Open Anatomy Project + +## Key Investigators + +- Michael Halle (BWH) +- Ron Kikinis (BWH) +- Sonia Pujol (BWH) +- Andras Lasso (Queens) +- Many, many international colleagues, collaborators, and friends (World) + +# Project Description + +The Open Anatomy Project is a technology, research, and social effort to make high quality medical data widely available, for free. +The project web site, https://openanatomy.org , includes free 3D atlases of the human anatomy. The site has been visited by more +than 36,000 unique users from 224 countries since October 2018. + +In the past year, we have seen increasing interest from the world-wide community to publish new atlases on the web site. We are +excited about this opportunity to connect expert anatomists to those people who can benefit from this data, particularly at this time +when so many people are using virtual educational tools. + +## Objective + +1. Publish several new atlases from Senegal and Mauritania. +2. Connect with members of the 3D Slicer community who would like to create atlases. +3. Work with the 3D Slicer technical community to improve the infrastructure that will make atlas publication easier. Specifically: + * Efficient image loading and display + * Export from Slicer + +## Approach and Plan + +1. Talk to all my friends.... + +## Progress and Next Steps + + +# Illustrations + +# Background and References + +* https://openanatomy.org/ diff --git a/PW34_2020_Virtual/Projects/PRISM_Volume_Rendering/Readme.md b/PW34_2020_Virtual/Projects/PRISM_Volume_Rendering/README.md similarity index 100% rename from PW34_2020_Virtual/Projects/PRISM_Volume_Rendering/Readme.md rename to PW34_2020_Virtual/Projects/PRISM_Volume_Rendering/README.md diff --git a/PW34_2020_Virtual/Projects/Plastimatch/README.md b/PW34_2020_Virtual/Projects/Plastimatch/README.md new file mode 100644 index 000000000..b05291eb1 --- /dev/null +++ b/PW34_2020_Virtual/Projects/Plastimatch/README.md @@ -0,0 +1,50 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Plastimatch + +## Key Investigators + +- Greg Sharp (MGH) + +# Project Description + +Plastimatch is a registration and image processing software that runs either standalone or as a Slicer extension within SlicerRT. +Over the past several years, there have been improvements in plastimatch that have not been migrated to 3D Slicer. + +## Objective + + + +1. Upgrade plastimatch version used by SlicerRT. +1. Add capability to run plastimatch executable from python. +1. Add capability for free-form command file editing. + +If time allows + +1. Add interface from 3D Slicer to surface-based registration (dmap-dmap and point-dmap). + +## Approach and Plan + + + +1. This is pretty straightforward stuff, just takes time to achieve. + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + +1. http://plastimatch.org +1. http://slicerrt.github.io diff --git a/PW34_2020_Virtual/Projects/ROS-IGTL-Bridge/README.md b/PW34_2020_Virtual/Projects/ROS-IGTL-Bridge/README.md new file mode 100644 index 000000000..3d637bb7c --- /dev/null +++ b/PW34_2020_Virtual/Projects/ROS-IGTL-Bridge/README.md @@ -0,0 +1,48 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# ROS-IGTL-Bridge for Prostate Bipsy Robot + +## Key Investigators + +- Junichi Tokuda (BWH) +- Pedro Moreira (BWH) + +# Project Description + + +BWH team has been working with Physical Science Inc. on a new generation of robotic device for MRI-guided prostate biopsy. +The new device has active 4-DOF needle guide that directs a biopsy needle to the target in the prostate based on the plan +created on an intraprocedural MRI. + +## Objective + +In this project, we will implement a new robot controll node on Robot Operating System (ROS) and integrate with 3D Slicer +and MRI scanner using [ROS-IGTL-Bridge](https://github.com/openigtlink/ROS-IGTL-Bridge) and OpenIGTLink. + +## Approach and Plan + + + +1. Define OpenIGTLink messages for the communication between ROS and 3D Slicer +1. Implement a ROS node that compute kinematics and commands individual motors +1. Extend [SliceTracker](https://github.com/SlicerProstate/SliceTracker) to control 3D Slicer and Siemens MRI Scanner. + + +## Progress and Next Steps + + + +1. Created the project page. + + +# Illustrations + + + +# Background and References +The study was funded in part by the National Institutes of Health (4R44CA224853, R01EB020667, R01CA235134, P41EB015898)and Siemens Healthneers. + + diff --git a/PW34_2020_Virtual/Projects/RadOncSegmentor/README.md b/PW34_2020_Virtual/Projects/RadOncSegmentor/README.md new file mode 100644 index 000000000..f13b617c5 --- /dev/null +++ b/PW34_2020_Virtual/Projects/RadOncSegmentor/README.md @@ -0,0 +1,48 @@ +# RadOncSegmentor Segmentation for Radiation Treatment Planning + +## Key Investigators +- [Harini Veeraraghavan](https://github.com/harveerar) (MSKCC) +- Aditya P. Apte, (MSKCC) +- [Eve M. LoCastro] (https://github.com/locastro) (MSKCC) +- Jue Jiang (MSKCC) +- Sharif Elguindi (MSKCC) +- Aditi Iyer (MSKCC) +- Joseph Deasy (MSKCC) + +# Project Description + +RadOncSegmentor contains methods for 3D segmentation of CT and MRI images to generate volumetric segmentation of tumors and organs at risk to radiotherapy. The open-source implementation of these methods is available as singularity containers through [CERR](https://github.com/cerr/CERR/wiki/Auto-Segmentation-models). + +## Objective +The goal for this project is to present to people about the various deep learning segmentation applications available through CERR for tumor and normal organs for radiation oncology applications. We will also show how these methods are currently used in our clinical workflow for radiation therapy treatment planning at MSK. + +## Approach and Plan + +1. Create project page that provides an overview of the project +1. 5-minute presentation and demo of the CERR-XNAT open-source library as well as (time permitting) our MIM-CERR- workflow used for routine clinical processing +1. Get feedback from people and hopefully find collaborators to work with. + +## Progress and Next Steps + +1. Made the project page +1. ... + +# Illustrations + + + +# Background and References + +1. Reference publications: +- Lung tumor segmentation from CT: Jiang J, Hu Y.C, Liu C.J, Halpenny D, Hellmann M.D, Deasy J.O, Mageras G, Veeraraghavan H, "Multiple resolution residually connected feature streams for automatic lung tumor segmentation from CT images", IEEE Trans. Med Imaging, 38(1): 134-144, 2019. +- Lung organs segmentation: + - [Um H, Jiang J, Thor M, Rimner A, Luo L, Deasy J.O, Veeraraghavan H, "Multiple resolution residual network for automatic thoracic organs at risk segmentation from CT"] (https://arxiv.org/abs/2005.13690), in MIDL 2020. + - Haq R, Hotca A, Apte A, Rimner A, Deasy J.O, Thor M, "Cardio-pulmonary substructre segmentation of radiotherapy computed tomography images using convolutional neural networks for clinical outcomes analysis", phiRO, 2020 +- Head and neck organs: + - [Jiang J, Elguindi S, Um H, Berry S, Veeraraghavan H, "Local block-self attention for normal organ segmentation"] (https://arxiv.org/abs/1909.05054) + - [Iyer A, Thor M, Haq R, Deasy J.O, Apte A](https://www.biorxiv.org/content/10.1101/772178v2.full) +- Prostate organs: + - Elguindi S, Zelefsky M, Jiang J, Veeraraghavan H, Deasy J.O, Hunt M, Tyagi N, "Deep learning-based auto-segmentation of targets and organs-at-risk for magnetic resonance imaging only planning for prostate radiotherapy", phiRo 2019. diff --git a/PW34_2020_Virtual/Projects/SlicerArduino/README.md b/PW34_2020_Virtual/Projects/SlicerArduino/README.md index 6b0ae74e5..f774b80c7 100644 --- a/PW34_2020_Virtual/Projects/SlicerArduino/README.md +++ b/PW34_2020_Virtual/Projects/SlicerArduino/README.md @@ -23,7 +23,7 @@ A [journal paper](https://www.mdpi.com/2306-5354/7/3/109) about SlicerArduino ha 1. Objective A. Show how SlicerArduino works. -1. Objective B. +1. Objective B. ## Approach and Plan diff --git a/PW34_2020_Virtual/Projects/SlicerCMF/README.md b/PW34_2020_Virtual/Projects/SlicerCMF/README.md index 89116765e..521592e7c 100644 --- a/PW34_2020_Virtual/Projects/SlicerCMF/README.md +++ b/PW34_2020_Virtual/Projects/SlicerCMF/README.md @@ -1,6 +1,6 @@ Back to [Projects List](../../README.md#ProjectsList) -# Updates on SlicerCMF (CranioMaxilloFacial) +# Updates on SlicerCMF (CranioMaxilloFacial) ## Key Investigators @@ -13,6 +13,10 @@ Back to [Projects List](../../README.md#ProjectsList) SlicerCMF is an extension of 3D Slicer, a free, open source software for visualization and image analysis. SlicerCMF can be installed from the 3D Slicer Extension Manager on Windows, Mac, and Linux to leverage the advanced features of 3D Slicer in dental image analysis. SlicerCMF provides registration, segmentation and quantification modules for dental images analysis that may support patient-specific decision making and assessment in the context of disease progression. +[Presentation slides](https://docs.google.com/presentation/d/1ukWRZckPlEXcHIK-D6YMEttRbRlV8IyAEDw0-QbTs8M/edit?usp=sharing) + +[YouTube tutorials](https://www.youtube.com/user/DCBIA/videos) + ## Objective During this project week we would like to present the latest work done in SlicerCMF. diff --git a/PW34_2020_Virtual/Projects/SlicerHeart/README.md b/PW34_2020_Virtual/Projects/SlicerHeart/README.md index c93575c63..74764a01d 100644 --- a/PW34_2020_Virtual/Projects/SlicerHeart/README.md +++ b/PW34_2020_Virtual/Projects/SlicerHeart/README.md @@ -24,6 +24,8 @@ Back to [Projects List](../../README.md#ProjectsList) SlicerHeart extension contains tools for cardiac image import (3D/4D ultrasound, CT, MRI), quantification, and implant placement planning and assessment. +![](SlicerHeartOverview.png) + ## Objective The goal for SlicerHeart project for this project week is to let people interested in cardiac applications know about what SlicerHeart can be used for now and what are the features that will be released publicly soon. @@ -31,21 +33,39 @@ The goal for SlicerHeart project for this project week is to let people interest ## Approach and Plan 1. Create project page that provides good overview of the project -1. 5-minute presentation on Monday -1. Longer presentation (if PI is available and there is confirmed interest/available time slow) +1. Overview presentation on Monday +1. Presentation/discussion of any details depending on interest ## Progress and Next Steps -1. Create project page (WIP) +1. Create project page 2. Confirm presentation/demo schedule # Illustrations - +_[Valve segmentation](https://link.springer.com/article/10.1007/s00246-017-1785-4)_ + +![](https://media.springernature.com/lw685/springer-static/image/art%3A10.1007%2Fs00246-017-1785-4/MediaObjects/246_2017_1785_Fig2_HTML.gif?as=webp) + +_[Valve quantification](https://www.sciencedirect.com/science/article/pii/S0894731719300021)_ + +![](https://ars.els-cdn.com/content/image/1-s2.0-S0894731719300021-gr3.jpg) + + +_[Virtual reality](https://www.onlinejase.com/article/S0894-7317(18)30343-2/fulltext)_ + +![](https://els-jbs-prod-cdn.jbs.elsevierhealth.com/cms/attachment/d9f61ddd-819b-4ff1-9a5f-ed19077a87d5/gr2.jpg) + +_[Phsyical modeling](https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10135/1013516/Patient-specific-pediatric-silicone-heart-valve-models-based-on-3D/10.1117/12.2255849.full?SSO=1)_ + +![](https://www.spiedigitallibrary.org/ContentImages/Proceedings/10135/1013516/FigureImages/00355_PSISDG10135_1013516_page_4_2.jpg) + +![](https://www.spiedigitallibrary.org/ContentImages/Proceedings/10135/1013516/FigureImages/00355_PSISDG10135_1013516_page_4_3.jpg) + +_[Valve repair simulation](https://link.springer.com/article/10.1007/s00246-017-1785-4)_ + +![](https://media.springernature.com/lw685/springer-static/image/art%3A10.1007%2Fs00246-017-1785-4/MediaObjects/246_2017_1785_Fig5_HTML.jpg?as=webp) # Background and References - +- [SlicerHeart public repository](https://github.com/SlicerHeart/SlicerHeart) diff --git a/PW34_2020_Virtual/Projects/SlicerHeart/SlicerHeartOverview.png b/PW34_2020_Virtual/Projects/SlicerHeart/SlicerHeartOverview.png new file mode 100644 index 000000000..732dac577 Binary files /dev/null and b/PW34_2020_Virtual/Projects/SlicerHeart/SlicerHeartOverview.png differ diff --git a/PW34_2020_Virtual/Projects/SlicerIGT/README.md b/PW34_2020_Virtual/Projects/SlicerIGT/README.md index 97bb4b7e7..ab60241f7 100644 --- a/PW34_2020_Virtual/Projects/SlicerIGT/README.md +++ b/PW34_2020_Virtual/Projects/SlicerIGT/README.md @@ -15,6 +15,8 @@ Back to [Projects List](../../README.md#ProjectsList) SlicerIGT has been developed since 2012 as an extension for 3D Slicer to support navigated medical interventions. For more information, visit the [SlicerIGT website](http://www.slicerigt.org/wp/). +Breakout room at 3 pm today (Dec 14) - [Agenda and meeting notes](https://docs.google.com/document/d/1laFD-GSYOq-mN0tLomYqcqLwrYXuwiMDU46AVToLcKE/edit#) + ## Objective We will add a step-by-step tutorial on how to use recent modules for collecting tracked ultrasound data for AI training, and how to deploy trained AI models in ultrasound-guided interventions. diff --git a/PW34_2020_Virtual/Projects/SlicerLiverAnalysis/README.md b/PW34_2020_Virtual/Projects/SlicerLiverAnalysis/README.md new file mode 100644 index 000000000..ae760ebd6 --- /dev/null +++ b/PW34_2020_Virtual/Projects/SlicerLiverAnalysis/README.md @@ -0,0 +1,45 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerLiverAnalysis: interactive tools for planning liver interventions. + +## Key Investigators + +- Rafael Palomar (Oslo University Hospital, Norwegian University of Science and Technology) + +# Project Description + + + +This project will kick-start the development of the *SlicerLiverAnalysis* extension that will be developed through the [ALive project](https://alive-research.no). The objective of the SlicerLiverAnalysis extension is to provide researchers with tools to perform liver analytics towards planning of liver interventions (resections, ablations). At this point in the project we need to port early prototypes of our resection planning algorithms into 3D Slicer, which may require enabling new infrastructure in Slicer (particularly for handling pluggable markups from modules other than the Markups module). + +## Objective + + + +1. Discuss alternatives to integrate new markups in modules other than the Markups module (pluggable markups). +2. Develop a 3D deformable surface model markup that will be the base for resection planning tools. + +## Approach and Plan + + + +1. Discussion and design of new 3D Slicer markups infrastructure to enable pluggable markups. +2. Development of new 3D deformable surface markups with new interaction (similar to the plane markups). + +## Progress and Next Steps + + + +1. A prototype of 3D deformable surfaces was already developed in the last NA-MIC week +2. Preliminary discussions with the Slicer developers have been held to raise awareness of the problems and possibilities. + +# Illustrations + + + +# Background and References +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/NorMIT-Plan/) (january 2020) + diff --git a/PW34_2020_Virtual/Projects/SlicerLookingGlass/README.md b/PW34_2020_Virtual/Projects/SlicerLookingGlass/README.md new file mode 100644 index 000000000..c23b0265c --- /dev/null +++ b/PW34_2020_Virtual/Projects/SlicerLookingGlass/README.md @@ -0,0 +1,56 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer Looking Glass + +![](SlicerLGF.png) + +## Key Investigators + +- Stephen Aylward (Kitware) +- Jean-Christophe Fillion-Robin (Kitware) + +# Project Description + +Kitware has partnered with Looking Glass Factory (LGF) to add support for LGF’s holographic displays from any VTK-based application including ParaView, 3D Slicer, and any custom application you create. + +## Objective + +1. Introduce Looking Glass Factory hardware +1. Overview of Looking Glass Extension for 3D Slicer +1. Review compatibility issues with VTK 9.0 and 3D Slicer LGF. +1. Discuss research and clinical applications of holographic displays. + +## Approach and Plan + +1. Present LGF via videos and tutorials +1. Evaluate current Slicer + VTK9.0 candidate for compatibility with current LGF Slicer extension. + +## Progress and Next Steps + + + +1. Create video for 5-minute presentation on Monday + +# Illustrations + +1. [Slicer+LGF Short Video](https://youtu.be/7-ROJ6awzqk) + +# Background and References + +Looking Glass Factory +* [https://lookingglassfactory.com/](https://lookingglassfactory.com/) + +Looking Glass Factory SDK (HoloPlayCore): +* [https://lookingglassfactory.com/software](https://lookingglassfactory.com/software) + +Initial release of VTK + Looking Glass Factory: +* [https://blog.kitware.com/vtk-holographic-display/](https://blog.kitware.com/vtk-holographic-display/) + +How-to video for ParaView + Looking Glass Factory: +* [https://vimeo.com/460590350/74a0a8f1c7](https://vimeo.com/460590350/74a0a8f1c7) + +Github for VTK extension: +* [https://github.com/Kitware/LookingGlassVTKModule](https://github.com/Kitware/LookingGlassVTKModule) + +Github for Slicer extension: +* [https://github.com/KitwareMedical/SlicerLookingGlass](https://github.com/KitwareMedical/SlicerLookingGlass) diff --git a/PW34_2020_Virtual/Projects/SlicerLookingGlass/SlicerLGF.png b/PW34_2020_Virtual/Projects/SlicerLookingGlass/SlicerLGF.png new file mode 100644 index 000000000..236c6193a Binary files /dev/null and b/PW34_2020_Virtual/Projects/SlicerLookingGlass/SlicerLGF.png differ diff --git a/PW34_2020_Virtual/Projects/SlicerMorph/README.md b/PW34_2020_Virtual/Projects/SlicerMorph/README.md new file mode 100644 index 000000000..cc82f4bbb --- /dev/null +++ b/PW34_2020_Virtual/Projects/SlicerMorph/README.md @@ -0,0 +1,49 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerMorph: An open and extensible platform to retrieve, visualize and analyze 3D morphology + +## Key Investigators + +- Sara Rolfe (University of Washington, Seattle Children's Research Institute) +- Murat Maga (University of Washington, Seattle Children's Research Institute) +- Steve Pieper (Isomics) + +# Project Description + +The goal of this project is to extend 3D Slicer with tools to help biologists working with 3D specimen data. The SlicerMorph toolkit enables biologists to retrieve, visualize, measure, annotate, and perform geometric morphometric analyses from high-resolution specimen data both from volumetric scans (CTs and MRs) as well as from 3D surface scanners effectively within 3D Slicer. + +## Objective + +During project week we would like to show a demo of the SlicerMorph extension and connect with other developers who are working on similar projects. + +1. Demonstrate the newest functions added to SlicerMorph +2. Identify developers who share goals and interests +3. Discuss areas of commonality and future work + +## Approach and Plan + +1. Short demo of SlicerMorph +2. Schedule a longer discussion if there is interest + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + +[SlicerMorph Github repository](https://github.com/SlicerMorph/SlicerMorph) + +[SlicerMorph webpage](https://slicermorph.github.io/) + +[SlicerMorph preprint](https://www.biorxiv.org/content/10.1101/2020.11.09.374926v1) diff --git a/PW34_2020_Virtual/Projects/SlicerSALT/README.md b/PW34_2020_Virtual/Projects/SlicerSALT/README.md index 5a401174b..b2eac99e7 100644 --- a/PW34_2020_Virtual/Projects/SlicerSALT/README.md +++ b/PW34_2020_Virtual/Projects/SlicerSALT/README.md @@ -1,10 +1,10 @@ Back to [Projects List](../../README.md#ProjectsList) -# Updates on SlicerSALT (ShapeAnaLysisToolbox) 3.0 +# Updates on SlicerSALT (ShapeAnaLysisToolbox) 3.0 ## Key Investigators -- Beatriz (Bea) Paniagua, PI (Kitware) +- Beatriz (Bea) Paniagua, PI (Kitware) - Jared Vicory, Algorithms (Kitware) - David Allemang, Sam Horvath, Jean-Christophe Robin-Fillion (Kitware) @@ -13,7 +13,16 @@ Back to [Projects List](../../README.md#ProjectsList) # Project Description -Three-dimensional (3D) shape lies at the core of understanding the physical objects that surround us. In the biomedical field, shape analysis has been shown to be powerful in quantifying how anatomy changes with time and disease. The Shape AnaLysis Toolbox (SALT, funded under the project R01EB021391) was created as a vehicle for disseminating advanced shape methodology as an open source, free, and comprehensive software tool. Over the past four years, we have made strides to increase the ease of use of complex shape analysis methodology for biomedical researchers. We used 3DSlicer as a basis to create SlicerSALT, fostering strong user support and engagement with the research community in training events. By looking at the active number of users of SlicerSALT, we can confidently say that we have succeeded, as indicated by the following metrics: the SALT website has 7,044-page views from 2,913 unique users. Our software packages have been downloaded 1,836 times by 719 single users. Since 2017, our user forum has 50 posts and 202 replies with a total of 21,526 views. +Three-dimensional (3D) shape lies at the core of understanding the physical objects that surround us. In the biomedical field, shape analysis has been shown to be powerful in quantifying how anatomy changes with time and disease. The Shape AnaLysis Toolbox (SALT, funded under the project R01EB021391) was created as a vehicle for disseminating advanced shape methodology as an open source, free, and comprehensive software tool. Over the past four years, we have made strides to increase the ease of use of complex shape analysis methodology for biomedical researchers. We used 3DSlicer as a basis to create SlicerSALT, fostering strong user support and engagement with the research community in training events. By looking at the active number of users of SlicerSALT, we can confidently say that we have succeeded, as indicated by the following metrics: the SALT website has 7,044-page views from 2,913 unique users. Our software packages have been downloaded 1,836 times by 719 single users. Since 2017, our user forum has 50 posts and 202 replies with a total of 21,526 views. + +[Project slides](https://docs.google.com/presentation/d/11B34-A8uU-kVc8-uI7G7OrUQf84ouHXE1Q9WAZtUWnY/edit?usp=sharing) + +Vimeo Video Tutorial 2020 + +[Module 1](https://vimeo.com/412300712) +[Module 2](https://vimeo.com/412356243) +[Module 3](https://vimeo.com/412373481) +[Module 4](https://vimeo.com/412382491) ## Objective diff --git a/PW34_2020_Virtual/Projects/SlicerTraining-DICOM/README.md b/PW34_2020_Virtual/Projects/SlicerTraining-DICOM/README.md new file mode 100644 index 000000000..7cee93996 --- /dev/null +++ b/PW34_2020_Virtual/Projects/SlicerTraining-DICOM/README.md @@ -0,0 +1,34 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer Training DICOM + +## Key Investigators + +- Sonia Pujol, PhD, Director of 3D Slicer Training & Education, BWH +- Steve Pieper, PhD, Isomics Inc +- Ron Kikinis, MD, BWH + +# Project Description + +DICOM is the international standard for handling, storing, printing and transmitting medical imaging data. Clinical imaging equipment generates DICOM files. +We want to provide the Slicer community with an introduction to the DICOM standard and an overview of the Slicer DICOM module. + + + +## Objective + +This project aims to provide training resources on DICOM & Slicer 5 + +## Approach and Plan + + + +1. Introduce the history and purpose of DICOM +1. Describe the main characteristics of the DICOM standard +1. Provide step-by-step guidance on how to load DICOM data in Slicer 5.0 + +## Progress and Next Steps + +# Illustrations + +# Background and References diff --git a/PW34_2020_Virtual/Projects/SlicerVR/README.md b/PW34_2020_Virtual/Projects/SlicerVR/README.md index ecf257026..e732fd504 100644 --- a/PW34_2020_Virtual/Projects/SlicerVR/README.md +++ b/PW34_2020_Virtual/Projects/SlicerVR/README.md @@ -16,13 +16,15 @@ A key infrastructural element that is still missing from SlicerVR is the ability +1. Build SlicerVR against VTK9 1. Add interactive Qt panel to VR scene [SlicerVR#43](https://github.com/KitwareMedical/SlicerVirtualReality/issues/43) -2. Make use of the in-VR widget via laser pointer and VR-optimized widgets +1. Make use of the in-VR widget via laser pointer and VR-optimized widgets ## Approach and Plan +1. Update SlicerVR CMake files to build with VTK9 1. Try [vtkQWidgetWidget](https://vtk.org/doc/nightly/html/classvtkQWidgetWidget.html) in SlicerVR, confirm that it now works 1. Explore existing possibilities for using a laser pointer emanating from the controllers to control the Qt-based widget (press, click, drag&drop, etc) 1. Add the already implemented but dormant VR-optimized widgets in the SlicerVR user interface @@ -37,6 +39,8 @@ A key infrastructural element that is still missing from SlicerVR is the ability # Illustrations +![In-VR user interface](https://spie.org/Images/Graphics/Newsroom/2019articles/Crime-920.jpg) + -Work in progress log is tracked in a [public Google Drive folder](https://drive.google.com/drive/folders/1dVx2fm4doZP9EQCyUkqJ5sXvO3BINfcZ?usp=sharing) -This section will be updated with the summary during the Project week. +After running a simple scrapper through the forum and collecting all articles that mention keywords such as “web”, “cloud”, “internet” and “remote” a list of 170 articles was filtered based on the criteria of being related to some sort of remote usage of Slicer and not simply mentioning “the cloud” in some context. + +The chosen 170 articles were filtered based on relevance to cloud or other remote web-based environments. The resulting 63 forum posts were scanned for external links and their click count. + +After analyzing the content and grouping the links tha data leads us to the 4 main approaches: + +- **Slicer as a desktop app running on a remote computer with remote desktop access** + + For remote access to regular workstations VNC/RPD is used quite often. VNC connection errors appear in the forum topic sample quite frequently. + + For running Slicer on a machine rented form a Cloud provider the forum pointed to the following: + + | # |Technology| Description | + |---|---|---| + | 4 | [SlicerDockers](https://github.com/pieper/SlicerDockers) | A collection of docker containers with Various versions of Slicer | + | 6 | [VirtualGL](https://virtualgl.org) | Techology behind noVNC that allows remote access to a Slicer machine via a web browser | + | 11 | [SlicerGCPSetup](https://github.com/QIICR/SlicerGCPSetup) | Instructions on setting up Slicer on a virtual machine rented from Google | + | 12 | [SlicerDocker](https://github.com/Slicer/SlicerDocker) | A collection of docker containers with Slicer and Slicer Notebook | + +- **Slicer as a headless computation node running on a remote computer** + + The SlicerDockers container contains a simple proof of concept example to run a slicer based script in a docker instance. + + | # |Technology| Description | + |---|---|---| + | 4 | [SlicerDockers](https://github.com/pieper/SlicerDockers) | A collection of docker containers with Various versions of Slicer | + | 5 | [Girder](https://github.com/girder/slicer_cli_web) | Slicer cli module for the Girder data management platform | + | 7 | [SlicerWeb](https://github.com/pieper/SlicerWeb) | Slicer module that exposes a web server to support web services an applications. | + + +- **SlicerJupyter with a kernel running on a remote computer with TCP access** + + While using Slicer via a Jupyter notebook it is possible to access all the features of the application, render all existing UI elements, use ipywidgets to control parameters in Slicer and have remote interactive access to the application as a whole. See the Binder link to try this functionality online. + Voilà being a part of Jupyter allows creation of simple dashboards. + + | # |Technology| Description | + |---|---|---| + | 1 | [SlicerJupyter](https://github.com/Slicer/SlicerJupyter) | A collection of instructions on setting up a Jupyter Notebook server inside Slicer locally and remotely. | + | 3 | [Binder](https://mybinder.org/v2/gh/slicer/SlicerNotebooks) | A collection of notebooks for Binder demonstrating basic Slicer functionality. | + | 8 | [Voilà](https://github.com/voila-dashboards/voila) | Dashboard creation toolkit for Jupyter ecosystem | + | 13 | [Google Collab](https://github.com/googlecolab/jupyter_http_over_ws) | Google's notebook service | + + +- **Pure web re-implementation of the slicer UI** + + If the task is to streampline the existing UI for a single user, serving a Slicelet from a docker container is a viable solution. For many of the re-implementations of the Slicer UI - vtk.js is the technology that is used unter the hood. + + | # |Technology| Description | + |---|---|---| + | 2 | [Slicelets](https://www.slicer.org/wiki/Documentation/Nightly/Developers/Slicelets) | Documentation for creating simplified UI for specific workflows. | + | 10 | [vtk.js](https://kitware.github.io/vtk-js) | Javascript port of the Visualization Tool Kit | + | 15 | [dcmjs/vtkDisplay](https://github.com/dcmjs-org/dcmjs-examples/tree/master/vtkDisplay) | An example that demonstrates how to display a DICOM Segmentation object with vtk-js. | + + Apart from the technologies mentioned there are a lot of web-applications and toolkits that can be a starting point or an inspiration for re-implementing certain functionality of Slicer: + + | # |Technology| Description | + |---|---|---| + | 9 | [ParaView Glance](https://kitware.github.io/paraview-glance/) | Web version of Paraview | + | 14 | [Universal Viewer](https://universalviewer.io/) | Web based 2D image viewer | + | 16 | [Sphinx-Gallery](https://sphinx-gallery.github.io) | Web based image viewer and gallery | + | 17 | [SliceDrop](http://slicedrop.com/) | Simple browser based image viewer | + | -- | [OHIF](https://github.com/OHIF/Viewers/) | A zero-footprint medical image viewer | + | -- | [Cornerstone.js](https://github.com/cornerstonejs) | A JS library to display interactive medical images | + + Cloud providers: + + | # |Technology| Description | + |---|---|---| + | 18 | [Jetstream](https://jetstream-cloud.org/) | HPC Cloud infrastructure provider | + | 19 | [Microsoft Azure VMs](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/windows/)| Cloud infrastructure provider | + | 20 | [Google Cloud](https://cloud.google.com/iap/)| Cloud infrastructure provider | + +--- + +To test different approaches a repository with docker-compose configuration files was created. It links to the projects mentioned above and makes launching sample containers as simple as running a single `docker-compose up` command. + +[SlicerCompose repository](https://github.com/piiq/SlicerCompose) + # Illustrations @@ -37,9 +115,18 @@ This section will be updated with the summary during the Project week. ![Some more images](Example2.jpg) --> +![Slicer Dockers](slicerdocker.jpg) + +![Slicer Web](slicerweb.jpg) + +![Slicer Jupyter](slicerjupyter.jpg) + # Background and References +* Motivating Example: high resolution brain imaging data running on a cloud hosted GPU: [https://youtu.be/oHZBFm02wbM](https://youtu.be/oHZBFm02wbM) +* Example of running docker in google cloud environment: [https://youtu.be/WgiT5mCfG_w](https://youtu.be/WgiT5mCfG_w) * [SlicerDockers](https://github.com/pieper/SlicerDockers) can run in cloud machines with a container optimized OS on a VM or a regular OS with docker installed. You can access the desktop by exposing the port or by tunneling the port through ssh. [SlicerDocker](https://github.com/Slicer/SlicerDocker) should also work in this way. -* https://github.com/pieper/SlicerMachines +* [https://github.com/pieper/SlicerMachines](https://github.com/pieper/SlicerMachines) is a system for generating bootable machine images that come pre-loaded with a GPU-backed Slicer and a desktop environment. +* [SlicerCompose](https://github.com/piiq/SlicerCompose) runs containers from SlicerDockers and SlicerJupyter using docker-compose configuration files diff --git a/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerdocker.jpg b/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerdocker.jpg new file mode 100644 index 000000000..62c2ff7d4 Binary files /dev/null and b/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerdocker.jpg differ diff --git a/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerjupyter.jpg b/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerjupyter.jpg new file mode 100644 index 000000000..5e6cb9fd8 Binary files /dev/null and b/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerjupyter.jpg differ diff --git a/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerweb.jpg b/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerweb.jpg new file mode 100644 index 000000000..3eb14c96d Binary files /dev/null and b/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/slicerweb.jpg differ diff --git a/PW34_2020_Virtual/Projects/SubjectHierarchyFolders/README.md b/PW34_2020_Virtual/Projects/SubjectHierarchyFolders/README.md index 65ac5d301..507ef3261 100644 --- a/PW34_2020_Virtual/Projects/SubjectHierarchyFolders/README.md +++ b/PW34_2020_Virtual/Projects/SubjectHierarchyFolders/README.md @@ -24,16 +24,12 @@ Recently the feature [has been added to Slicer by Andras Lasso](https://github.c 1. Consider how the feature would generalize from the BabySteps use case to work for other longitudinal applications where collections of reference and derived data must be managed (also should we consider Sequences in the design somehow?) -1. ... -1. ... ## Progress and Next Steps -1. Describe specific steps you **have actually done**. -1. ... -1. ... +1. Support for dropping subject hierarchy folders into views has been added, see this [pull request](https://github.com/Slicer/Slicer/pull/5350) # Illustrations diff --git a/PW34_2020_Virtual/Projects/TRAKO/README.md b/PW34_2020_Virtual/Projects/TRAKO/README.md new file mode 100644 index 000000000..79757d358 --- /dev/null +++ b/PW34_2020_Virtual/Projects/TRAKO/README.md @@ -0,0 +1,49 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# TRAKO Integration to 3D Slicer + +## Key Investigators + +- Daniel Haehn (University of Massachusetts Boston) +- Steve Pieper (Isomics) +- Lauren O'Donnell, Yogesh Rathi (BWH) + +# Project Description + +TRAKO is a new file format that stores streamlines and associated per-vertex and per-fiber data as glTF containers with compression. We will investigate how to best integrate TRAKO with 3D Slicer. + +## Objective + + + +We want to make it possible to read and write TRAKO (.TKO) files using 3D Slicer. + + +## Approach and Plan + + + +1. Start with the pip-installable `trako` package to make an importer and exporter +1. Integrate TRAKO with the MRML scene architecture. +1. Consider architecture and options for a C++ implementation + +## Progress and Next Steps + + + +1. We created ScriptedModule for SlicerDMRI (pull request open) that allows loading and saving of .TKO files. +1. We had to modify some things on Trako's side to make this work and cut a new release 0.3.5.dev9. +1. Steps required so far to make it work: 1) build Slicer + SlicerDMRI w/ pull request #145, and then 2) pip_install('trako') in the slicer +1. We now need JC's TrakoDracoPy wheels for all platforms and then update the SlicerDMRI extension to tie everything together. +1. Prototype implementation: https://github.com/pieper/SlicerDMRI/commit/1cfcbbc61d58588021de7a2e1ad8aafbf53274c1 + +# Illustrations + +![Loading .TKO files in 3D Slicer](slicer1.png) +![Saving .TKO files in 3D Slicer](slicer2.png) + +# Background and References + + + +TRAKO repository: https://github.com/bostongfx/trako diff --git a/PW34_2020_Virtual/Projects/TRAKO/slicer1.png b/PW34_2020_Virtual/Projects/TRAKO/slicer1.png new file mode 100644 index 000000000..c39abc23f Binary files /dev/null and b/PW34_2020_Virtual/Projects/TRAKO/slicer1.png differ diff --git a/PW34_2020_Virtual/Projects/TRAKO/slicer2.png b/PW34_2020_Virtual/Projects/TRAKO/slicer2.png new file mode 100644 index 000000000..fb5b6d26d Binary files /dev/null and b/PW34_2020_Virtual/Projects/TRAKO/slicer2.png differ diff --git a/PW34_2020_Virtual/Projects/TrainTheTrainers/README.md b/PW34_2020_Virtual/Projects/TrainTheTrainers/README.md new file mode 100644 index 000000000..d09c1629a --- /dev/null +++ b/PW34_2020_Virtual/Projects/TrainTheTrainers/README.md @@ -0,0 +1,65 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Train the Trainers: formation program with African countries + + +## Key Investigators + +- Juan Ruiz Alzola (Universidad de Las Palmas de Gran Canaria, Spain) +- Asmaa Skareb (Instituto Tecnológico de Canarias, Spain) +- Marilola Afonso (Universidad de Las Palmas de Gran Canaria, Spain) +- Nasara Cabrera Abu (Gobierno de Canarias, Spain) +- Tagwa Idris (Sudan) +- Septy Inge (Université Al-Aasriya, Mauritania) +- Otman Aghzout (Université AbdelMalek Saadi - Tetuán, Morocco) +- Maria Alexandra Fernandes (niversidad Eduardo Mondlane, Mozambique) +- Badiaa AIT AHMED (University Abdelmalek Essaâdi , Tétouan , Morocco) +- Nayra Pumar Carreras (Instituto Tecnológico de Canarias, Spain) + +# Project Description + +3D Slicer course and introductory sessions for IGT. This training program has been developed in colaboration with universities from +* Senegal +* Mauritania +* Morocco +* Sudan +* Syria +* Mozambique +* Ghana + + + +## Objective + + + +Objective A. Provide training in 3D Slicer for university teachers, so they can start implementing the use of this software in their lectures +Objective B. Familiarize participants with a collaborative open source sofware environment +Objective C. Introduction to IGT + +## Approach and Plan + + + +1. Creation of a training plattform, with material in english and french +1. Online courses with live videoconference classes +1. Online courses with recorded material from the live classes and training materials + +## Progress and Next Steps + + + +1. Moodle plattform with presentations done with the Xerte tool + + + +# Illustrations + + + +# Background and References + + diff --git a/PW34_2020_Virtual/Projects/XNAT-OHIF/AIAA.jpg b/PW34_2020_Virtual/Projects/XNAT-OHIF/AIAA.jpg new file mode 100644 index 000000000..45ef6af5a Binary files /dev/null and b/PW34_2020_Virtual/Projects/XNAT-OHIF/AIAA.jpg differ diff --git a/PW34_2020_Virtual/Projects/XNAT-OHIF/MPR.jpg b/PW34_2020_Virtual/Projects/XNAT-OHIF/MPR.jpg new file mode 100644 index 000000000..815554929 Binary files /dev/null and b/PW34_2020_Virtual/Projects/XNAT-OHIF/MPR.jpg differ diff --git a/PW34_2020_Virtual/Projects/XNAT-OHIF/README.md b/PW34_2020_Virtual/Projects/XNAT-OHIF/README.md new file mode 100644 index 000000000..eabcde34a --- /dev/null +++ b/PW34_2020_Virtual/Projects/XNAT-OHIF/README.md @@ -0,0 +1,52 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# XNAT: OHIF 2.0 and annotation workflows + +## Key Investigators + +- Dan Marcus (Washington University) +- Kate Alpert (Radiologics) +- Tim Olsen (Radiologics) + +# Project Description + + +XNAT 1.8 will be released December 21 with an upgraded viewer built on OHIF 2.0 (woohoo!). There's still plenty of room for improvement, in particular to support more complex image annotation workflows. + +## Objective + + + +1. Identify and implement performance improvements, particularly in transferring content between XNAT and the viewer. +1. Identify and implement format for capturing broader range of image annotations. +1. Identify and implement storage of image annotations in XNAT. + + +## Approach and Plan + + + +1. Performance improvements: Transfer DICOM series as a single request. +1. Minimize hard coded views: Implement “hanging protocol”. Any bright ideas? +1. Uniform storage of image annotations: Any bright ideas? Must support contours, segments, lines, boxes, text, templatized reports. + + +## Progress and Next Steps + + + +1. TBD +1. ... +1. ... + +# Illustrations +![4-up view with 3D objects](xnatohif4up.png) +![Contours](Viewer.jpg) +![Clara AI-Assisted Annotation](AIAA.jpg) +![MPR](MPR.jpg) +![Templatized reporting w/ radreport.org](Templates.jpg) + + +# Background and References + + diff --git a/PW34_2020_Virtual/Projects/XNAT-OHIF/Templates.jpg b/PW34_2020_Virtual/Projects/XNAT-OHIF/Templates.jpg new file mode 100644 index 000000000..1c5d466ee Binary files /dev/null and b/PW34_2020_Virtual/Projects/XNAT-OHIF/Templates.jpg differ diff --git a/PW34_2020_Virtual/Projects/XNAT-OHIF/Viewer.jpg b/PW34_2020_Virtual/Projects/XNAT-OHIF/Viewer.jpg new file mode 100644 index 000000000..834e5a85a Binary files /dev/null and b/PW34_2020_Virtual/Projects/XNAT-OHIF/Viewer.jpg differ diff --git a/PW34_2020_Virtual/Projects/XNAT-OHIF/xnatohif4up.png b/PW34_2020_Virtual/Projects/XNAT-OHIF/xnatohif4up.png new file mode 100644 index 000000000..a51dfdb6b Binary files /dev/null and b/PW34_2020_Virtual/Projects/XNAT-OHIF/xnatohif4up.png differ diff --git a/PW34_2020_Virtual/Projects/aevaSlicer/Readme.md b/PW34_2020_Virtual/Projects/aevaSlicer/README.md similarity index 99% rename from PW34_2020_Virtual/Projects/aevaSlicer/Readme.md rename to PW34_2020_Virtual/Projects/aevaSlicer/README.md index 5ec0e3986..ce617d26b 100644 --- a/PW34_2020_Virtual/Projects/aevaSlicer/Readme.md +++ b/PW34_2020_Virtual/Projects/aevaSlicer/README.md @@ -17,5 +17,3 @@ aeva software suite currently consists of - aevaSlicer: aevaSlicer will be familiar to users of Slicer. The interface is customized and new features have been added to accommodate a workflow amenable to generation of surface and volume meshes of anatomy from medical images. - aeva: aeva will be familiar to users of ParaView and Computational Model Builder. The interface is customized and new features have been added to support operations for import and export of anatomical representations and for annotation (template based and freeform, including a powerful set of region selection). - - diff --git a/PW34_2020_Virtual/README.md b/PW34_2020_Virtual/README.md index 75c736ac0..d1caa1633 100644 --- a/PW34_2020_Virtual/README.md +++ b/PW34_2020_Virtual/README.md @@ -2,115 +2,306 @@ ## Welcome to the web page for the 34th Project Week! -The [34th NA-MIC Project Week](https://projectweek.na-mic.org/PW34_2020_Virtual/) will be held virtually December 14-18, 2020. - -The event is free and connection details will be provided after [registering](https://forms.gle/VFTNfKRNsgj6YSLB9). +The [34th NA-MIC Project Week](https://projectweek.na-mic.org/PW34_2020_Virtual/) was held virtually December 14-18, 2020. +## History Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). -## Format - -The format of this virtual event will be different from the in-person ones. We will meet daily for 2 hours instead of full days: -* Monday's session will include 5-minute project presentations that include demos. -* Tuesday's session will be a discussion about features in 3D Slicer that are new since the last Project Week, and also on design considerations that are relevant for end-to-end solutions (or customizable Slicer apps). -* Wednesday's session will be a question and answer session about 3D Slicer, a "live" version of what takes place asynchronously on the Slicer discourse forum, and will include demostrations of how to use the Segmentation Editor in 3D Slicer. -* Thursday's session will be a discussion on recent advancing in the web and cloud technologies that are being used to create browser based visualization applications and large open databases. -* Friday's session will be a training workshop focused on fostering partnerships with researchers and clinicians in Western Africa. - -### Agenda - -| Day | Time (ET) | Description | Moderator | -|-----------|-------------|------------------------------------------------------------------------------------------------------|------------------| -| Monday | 12pm-2pm | 5-Minute Demos of All Projects | Simon Drouin | -| Tuesday | 12pm-2pm | New Features in 3D Slicer; Focus on End to End Solutions | Beatriz Paniagua | -| Wednesday | 12pm-2pm | Live Discourse - Questions and Answers from Users and Developers; Focus on Segmentation in 3D Slicer | Andras Lasso | -| Thursday | 12pm-2pm | Advances in Web/Cloud Technologies | Steve Pieper | -| Friday | 9.40am-12pm | West African Partnerships | Juan Ruiz Alzola | - ## Logistics - **Dates:** December 14-18, 2020. - **Location:** THE INTERNET - **Discourse Forum:** Please sign-up on the [discourse forum](https://discourse.slicer.org/c/community/project-week) to get updates and ask questions. + - [Project week discourse topic](https://discourse.slicer.org/t/project-week-starts-tomorrow/15040) for less real-time conversation +- **Slack:** [Invite Link](https://join.slack.com/t/namic-projectweek/shared_invite/zt-bmxjicl0-zWkWOV~Sp5lTh1dWhq4nYw) - **REGISTRATION:** [Link](https://forms.gle/VFTNfKRNsgj6YSLB9) -## Program Calendar +## Agenda + +**Note: Zoom Rooms Open at 11am EST and Project Presentations Start at 12noon EST on MONDAY** + [How to add this calendar to your own?](../common/Calendar.md) - +Notes on the agenda: + +* The format of this virtual event will be different from the in-person ones. +* **Monday's session** will include: + * 5-minute project presentations that include demos (for each of the projects listed below) + * Approximately 1:45 of presentations with a 10 min break after the 10th presentation + * An optional breakout session +* **Tuesday's session** will be a discussion about + * Features in 3D Slicer that are new since the last Project Week + * Design considerations that are relevant for end-to-end solutions (or customizable Slicer apps). +* **Wednesday's session** question and answer session about 3D Slicer for users and developers, a "live" version of what takes place asynchronously on the Slicer discourse forum. See discussed questions and answers [here](Breakouts/LiveDiscourse/README.md). +* **Thursday's session** will be a discussion on recent advancing in the web and cloud technologies that are being used to create browser based visualization applications and large open databases. +* **Friday's session** will be a training workshop focused on fostering partnerships with researchers and clinicians in Western Africa. +* The detailed program for each session is available from the link in the embedded Google Calendar above. +* In addition to the scheduled presentations, projects may create separate breakout sessions to work in small groups after the end of the main session + * Presenters and participants may use the chat function during the main session to request a breakout session with the designated person + * A few breakout sessions have already been planned (see Breakout Sessions section below) -## Projects [(How to add a new project?)](Projects/README.md) + +## Project Presentations for Monday [(How to add a new project?)](Projects/README.md) 1. [SlicerSALT: Shape analysis](Projects/SlicerSALT/README.md) (Beatriz Paniagua, Kitware) 1. [SlicerHeart](Projects/SlicerHeart/README.md) (Andras Lasso) -1. SlicerDMRI: diffusion imaging in Slicer (Lauren O’Donnell ) 1. [SlicerIGT and AIGT](Projects/SlicerIGT/README.md) (Tamas Ungi) -1. SlicerMorph: Registration, segmentation, surface manipulation of 3D biological datasets (Sara Rolfe) +1. [SlicerMorph: An open and extensible platform to retrieve, visualize and analyze 3D morphology](Projects/SlicerMorph/README.md) (Sara Rolfe) 1. [SlicerCMF: Craniofacial image processing](Projects/SlicerCMF/README.md) (David Allemang) -1. [PRISM volume rendering](Projects/PRISM_Volume_Rendering/Readme.md) (Simon Drouin) +1. [SlicerArduino](Projects/SlicerArduino/README.md) [(video)](https://youtu.be/8R6LfBqHNPY) (Paolo Zaffino, Maria Francesca Spadea) +1. [PRISM volume rendering](Projects/PRISM_Volume_Rendering/README.md) (Simon Drouin) 1. [Virtual reality](Projects/SlicerVR/README.md) (Csaba Pinter) -1. [Collaborative VR discussion](Projects/CollaborativeVR/Readme.md) (Csaba Pinter and Simon Drouin) -1. [SlicerArduino](Projects/SlicerArduino/README.md) (Paolo Zaffino, Maria Francesca Spadea) 1. [SlicerWeb and Slicer in Cloud environments](Projects/Slicer_in_Cloud_Environments/README.md) (Steve Pieper, Theodore Aptekarev) -1. OHIF web applications (Erik Ziegler) -1. [aevaSlicer volumetric mesh generation from segmentations](Projects/aevaSlicer/Readme.md) (Sam Horvath) -1. Slicer Segmentation Editor (Andras) -1. RadOncSegmentor Segmentation for Radiation Treatment Planning (Aditya Apte, Eve LoCastro, Harini Veeraraghavan, MSKCC) -1. Landmark Curves for Brain Images (Jarrett Rushmore, BWH/MGH/BU, Sylvain Bouix, BWH/HMS, Nikos Makris, BWH/MGH/HMS) -1. [Scalable Quality Assurance for Neuroimaging (SQAN)](Projects/SQAN/README.md) (Arvind Gopu) +1. [Napari: multi-dimensional image viewer for python](Projects/Napari/README.md) (Nicholas Sofroniew) + +10-minute BREAK + +11. [XNAT - OHIF](Projects/XNAT-OHIF/README.md) (Dan Marcus) +1. [MONAI: PyTorch-based, open-source framework for deep learning in healthcare imaging](Projects/MONAI/README.md) (Tina Kapur for Stephen Aylward) 1. [NCI Imaging Data Commons](Projects/ImagingDataCommons/README.md) (Andrey Fedorov,Steve Pieper, Ron Kikinis) -1. Train the Trainers formation program with African countries: training & anatomical atlases (Juan Ruiz, Asmaa Skareb, Nayra Pumar) -1. Abdominal Aortic Aneurysm 4D CT registration (Farah Alkhatib, Adam Wittek, Karol Miller, Toby Richards, Hozan Mufty) -1. Liver Resection Planning (Rafael Palomar, Ole Vegard Solberg, Geir Arne Tangen) -1. [Subject hierarchy: Drag&drop folders in views](Projects/SubjectHierarchyFolders/README.md) (Csaba Pinter, Steve Pieper) +1. [aevaSlicer volumetric mesh generation from segmentations](Projects/aevaSlicer/README.md) (Sam Horvath) +1. [Landmark Curves for Brain Images](Projects/Landmark%20Curves%20for%20Brain%20Images/README.md) (Jarrett Rushmore, BWH/MGH/BU, Sylvain Bouix, BWH/HMS, Nikos Makris, BWH/MGH/HMS, Kyle Sunderland, Queen's University, Andras Lasso, Queen's University) +1. [RadOncSegmentor](Projects/RadOncSegmentor/README.md) Segmentation for Radiation Treatment Planning (Aditya Apte, Eve LoCastro, Harini Veeraraghavan, MSKCC) +1. [Scalable Quality Assurance for Neuroimaging (SQAN)](Projects/SQAN/README.md) (Arvind Gopu) +1. [Train the Trainers](Projects/TrainTheTrainers/README.md) formation program with African countries: training & anatomical atlases (Juan Ruiz, Asmaa Skareb, Marilola Afonso, Nasara Cabrera Abu, Tagwa Idris, Septy Inge, Otman Aghzout, Babacar Diao, Alexandra Fernandes, Badiaa Ait Ahmed, Nayra Pumar) +1. [SlicerLiverAnalysis](Projects/SlicerLiverAnalysis/README.md) (Rafael Palomar OUS,NTNU. Ole Vegard Solberg, SINTEF) 1. [MICUDA - Generalized Entropy MI Registration](Projects/MICUDA/README.md) (Luiz Otavio Murta, Vinicius Pavanelli Vianna) +1. [TRAKO](Projects/TRAKO/README.md) (Daniel Haehn, Steve Pieper, Lauren O'Donnell, Yogesh Rathi)) +1. [Slicer Training - DICOM](Projects/SlicerTraining-DICOM/README.md) (Sonia Pujol) +1. [Slicer + Looking Glass Factory's Holographic Display](Projects/SlicerLookingGlass/README.md) (Jean-Christophe Fillion-Robin, Stephen Aylward) +1. [Open Anatomy](Projects/OpenAnatomy/README.md) (Mike Halle et. al.) +## Breakout Sessions: +To learn how to join, leave or display list of breakout rooms. See [here](./ZoomBreakoutRoom.md). +1. Monday @ 3pm [Ultrasound Image Guided Therapy](https://docs.google.com/document/d/1laFD-GSYOq-mN0tLomYqcqLwrYXuwiMDU46AVToLcKE/edit) (Tamas Ungi, Tina Kapur, Simon Drouin) +1. Tuesday @ 11am [Collaborative VR discussion](Projects/CollaborativeVR/README.md) (Csaba Pinter and Simon Drouin) +1. Wednesday @ 11am EST Project/Room 10 & 11: Dan Marcus, XNAT + OHIF +1. Wednesday @ 11am: Project 15 Landmark Curves for Brain Images  (Jarrett Rushmore, Kyle Sunderland) +1. Wednesday @ 2pm. Project 12 MONAI: PyTorch-based, open-source framework for deep learning in healthcare imaging (Stephen Aylward) +1. Thursday @ 11am. [Segmentation import/export/storage](Breakouts/SegmentationStorageFormat/README.md) (Andras Lasso) +1. Thursday @ 2pm [Image segmentation clinic](Breakouts/Segmentation/README.md)Bring your own segmentation problem or learn from how we solve others'(Andras Lasso) ## Registrants Do not add your name to this list below. It is maintained by the organizers based on your registration. [Register here](https://forms.gle/VFTNfKRNsgj6YSLB9). List of registered participants so far (names will be added here after processing registrations): - - -1. Simon Drouin -1. Tina Kapur -1. Nadya Shusharina -1. Salim Kanoun -1. Hans Johnson -1. Steve Pieper -1. Csaba Pintér -1. Saima Safdar -1. Alexandra Golby -1. Randy Gollub -1. Simon Oxenford -1. Sonia Pujol -1. Sylvain Bouix -1. Khalid Al Orabi -1. Andrey Fedorov -1. Farah Alkhatib -1. Dhruv Kool Rajamani -1. Adam Wittek -1. Matthew Toews -1. Vivian Monezi Tetzner -1. Samantha Horvath -1. Rafael Palomar -1. Paolo Zaffino -1. Michael Young -1. Arvind Gopu -1. Attila Nagy -1. Curtis Lisle -1. Vinicius Pavanelli Vianna -1. NIKOLAOS MAKRIS -1. Tamas Ungi +1. Simon Drouin , École de technologie supérieure (ETS Montreal) , QC , Canada +1. Tina Kapur , Brigham and Women's Hospital and Harvard Medical School , MA , USA +1. Nadya Shusharina , Massachusetts General Hospital , MA , USA +1. Salim Kanoun , Institut Claudius Regaud - Toulouse , Occitanie , France +1. Hans Johnson , University of Iowa , IA , USA +1. Steve Pieper , Isomics, Inc. , Massachusetts , USA +1. Csaba Pintér , Ebatinca S.L., Pixel Medical Inc. , Las Palmas , Spain +1. Saima Safdar , University of western australia , WA , Australia +1. Alexandra Golby , Brigham and Women’s hospital and Harvard Medical School , MA , USA +1. Randy Gollub , MGH , MA , USA +1. Simon Oxenford , Charité – Universitätsmedizin Berlin , Berlin , Germany +1. Sonia Pujol , Brigham and Women's Hospital, Harvard Medical School , MA , USA +1. Sylvain Bouic , Brigham and Women's Hospital , MA , USA +1. Khalid Al Orabi , King abdullah medical city , Makkah , Saudi Arabia +1. Andrey Fedorov , Brigham and Women's Hospital , MA , USA +1. Farah Alkhatib , University of Western Australia , Western Australia , Australia +1. Dhruv Kool Rajamani , Worcester Polytechnic Institute , Massachusetts , USA +1. Adam Wittek , Intelligent Systems for Medicine Laboratory, The University of Western Australia , Western Australia , Australia +1. Matthew Toews , École de Technologie Supérieure , QC , Canada +1. Vivian Monezi Tetzner , USP-FFCLRP , SP , Brazil +1. Samantha Horvath , Kitware , North Carolina , USA +1. Rafael Palomar , Oslo University Hospital , Norway , Norway +1. Paolo Zaffino , Magna Graecia University of Catanzaro , Catanzaro , Italy +1. Michael Young , Indiana University , IN , USA +1. Arvind Gopu , Indiana University , Indiana , USA +1. Attila Nagy , University of Szeged , N/A , Hungary +1. Curtis Lisle , KnowledgeVis, LLC , FL , USA +1. Vinicius Pavanelli Vianna , USP - University of Sao Paulo , Sao Paulo , Brazil +1. NIKOLAOS MAKRIS , MASSACHUSETTS GENERAL HOSPITAL , MASSACHUSETTS , USA +1. Tamas Ungi , Queen's University , Ontario , Canada +1. Eve LoCastro , Memorial Sloan Kettering Cancer Center , New York , USA +1. Ole Vegard Solberg , SINTEF , Trøndelag , Norway +1. Mehran Azimbagirad , University of Sao Paulo , SP , Brazil +1. Sara Rolfe , University of Washington , WA , USA +1. Junichi Tokuda , Brigham and Women's Hospital , MA , USA +1. Harini Veeraraghavan , Memorial Sloan Kettering Cancer Center , New York , USA +1. Renzo Phellan Aro , McGill University , QC , Canada +1. Beatriz Paniagua , Kitware Inc , NC , USA +1. Theodore Aptekarev , - , - , Russia/Israel +1. Eleni Siampli , Childrens National Medical Center , DC , USA +1. Ron Kikinis , Harvard Medical School , Massachusetts , USA +1. Carl-Fredrik Westin , Harvard Medical School , MA , USA +1. Lauren J ODonnell , BWH , MA , USA +1. Raul San Jose , Brigham and Women's Hospital , MA , USA +1. Julien Finet , Kitware , Rhône-Alpes , France +1. David Allemang , Kitware Inc , NC , USA +1. Forrest Li , Kitware, Inc. , NC , USA +1. Matt McCormick , Kitware , North Carolina , USA +1. Sarah Frisken , Brigham and Women's Hospital , MA , USA +1. Jean-Christophe Fillion-Robin , Kitware , North Carolina , USA +1. Nick Jowkar , Brigham and Women's Hospital , MA , USA +1. Luiz Murta , University of São Paulo , SP , Brazil +1. Murat Maga , Seattle Children's Research Institute / University of Washington , Washington , USA +1. Marco Nolden , German Cancer Research Center (DKFZ) , N/A , Germany +1. Nayra Pumar Carreras , Universidad de Las Palmas de Gran Canaria , Las Palmas , Spain +1. khalid Elamin Awad , University of Khartoum, Faculty of Medicine , Khartoum , Sudan +1. Marie Ndiaye , Université Assane Seck de Ziguinchor , Sénégal , Sénégal +1. Gregory C. Sharp , Massachusetts General Hospital , MA , USA +1. David García-Mato , Universidad Carlos III de Madrid , Madrid , Spain +1. Samba NDIAYE , CHEIKH ANTA DIOP UNIVERSITY , SENEGAL , Sénégal +1. Ahmed Temtam , Old Dominion University , VA , USA +1. Jared Vicory , Kitware , North Carolina , USA +1. Daniel Haehn , University of Massachusetts Boston , Massachusetts , USA +1. Jarrett Rushmore , Boston University, Brigham and Women's Hospital, MGH , MA , USA +1. Tagwa Idris , Massachusetts General Hospital , MA , USA +1. Fathelrahman Idris Gasmelseed Ali , University of Khartoum, Faculty of medicine , Khartoum , Sudan +1. EL Tahir Ahmed EL Tahir , University of Alfashir , North Darfur , Sudan +1. Abdelmajed Manna , Department of Anatomy, university of Alfashir , North Darfur , Sudan +1. Noureldeen abaker , Uorolgy resident , South Darfur , Sudan +1. Mahil Mohamed Sharief Abdalla , Department of Anatomy, Faculty of Medicine, University of Khartoum , Khartoum , Sudan +1. Haythem Mohammed Gorshi Ahmed , University of Khartoum , Khartoum , Sudan +1. Abuzar Mubarak Omer Osman , Ondokuz mayıs üniversity OMÜ , black sea , Turkey +1. Ehab Mohammed Rabie Abd Allah , Cairo University school of medicine , Khartoum , Sudan +1. Nicholas Sofroniew , Chan Zuckerberg Initiative , CA , USA +1. Osama Mohammed Ahmed Mohammed ELNOUR , Faculty of medicine and health scince , North Darfur State , Sudan +1. Mustafa Bahar , Soba University Hospital , Khartoum , Sudan +1. Dr.anatomy83@yahoo.com , Prince Sattam bin Abdulaziz University , Riyadh , Saudi Arabia +1. Abubaker Elssidig Elhaj Bakhit Farg , University of Ha'il. KSA , Ha'il Prevalence , Saudi Arabia +1. Pape Mady THIAO , École militaire de santé de Dakar , SÉNÉGAL , Sénégal +1. Elsadig Suleiman Adam , Nyala Specialized Hospital , South Darfur , Sudan +1. Juan Ruiz-Alzola , University of Las Palmas de Gran Canaria , Canarias , Spain +1. Idafen Santana , Las Palmas de Gran Canaria University , Canary Islands , Spain +1. NORALDAIM AHMED FADOL MOHAMMED , Ministry of health , Darfor , Sudan +1. Leila omer adam Ahmed , University of al fashir , North Darfur , Sudan +1. Babacar DIAO , Cheikh Anta DIOP University , Senegal , Sénégal +1. Khaly / TALL , Ecole Supérieure Polytechnique de Dakar , Sénégal , Sénégal +1. Idy , École Supérieure Polytechnique , Senegal , Sénégal +1. Stephen Aylward , Kitware , NC , USA +1. Selly Suzuki , São Leopoldo Mandic , Sao Paulo , Brazil +1. Haythem Guermazi , Faculté de Médecine de L'université de Nouakchott Al Aasriya Mauritania , Nouakchott , Mauritania +1. William Wells , BWH , Massachusetts , USA +1. Christian Herz , Children's Hospital of Philadelphia , Pennsylvania , USA +1. Mamadou Samba CAMARA , Ecole Supérieure Polytechnique / University of Dakar , Senegal , Sénégal +1. Michal Brzus , University of Iowa , IA , USA +1. Wafa Nour , Khartoum breast care center , Khartoum , Sudan +1. Izabel Rubira-Bullen , University Sao Paulo , Sao Paulo , Brazil +1. Tim Olsen , Radiologics , IL , USA +1. Blake Griggs , Radiologics, Inc , CA , USA +1. Amadou Gabriel Ciss , université cheikh anta diop , senegal , Sénégal +1. Idris Ahmed Abaker Ibrahim , Alfasher University faculty of medicine , NORTH DARFUR , Sudan +1. SOW NDEYE FATOU , UCAD , DAKAR , Sénégal +1. Souleymane Diatta , chu fann , dakar , Sénégal +1. Momar Sokhna Diop , Cheikh Anta Diop University , Senegal , Sénégal +1. Dan Marcus , Washington Universituy , MO , USA +1. PAPA SALMANE BA , UCAD , Sénégal , Sénégal +1. armin , medken , wa , USA +1. Salah osman salah , Rick , Khartoum , Sudan +1. Aya Zakareya Noor Hamid , Haj Al Safi Hospital , Khartoum , Sudan +1. Shiraz Yousif , University of khartoum , NSW , Australia +1. Assane Ndiaye , Université Assane Seck , Senegal , Sénégal +1. Kamal Eldeen Mohammed Ibrahim Dahab , University of Khartoum , Khartoum , Sudan +1. Asmaa Skareb , Canary Islands Institute of Technology , Gran Canaria , Spain +1. Manjula , Malmö University , Skane , Sweden +1. Michela Destito , Magna Graecia University of Catanzaro , Italy , Italy +1. Souleymane Diao , Université Cheikh Anta Diop , Sénégal , Sénégal +1. Geir Arne Tangen , SINTEF , Trondheim , Norway +1. Mohammed Nimir , University Hospital Coventry and Warwickshire , West Midlands , UK +1. Marwan Ahmed , St. George’s University , Saint George’s , Grenada +1. Elimam Elghazali Siddig Mohamed Mustafa , University of Khartoum , Khartoum , Sudan +1. Badiaa AIT AHMED , University Abdelmalek Essaâdi , North Africa , Morocco +1. Rabab SEDRATI , ABDELMALEK ESAADI UNIVERSITY , North of Africa , Morocco +1. Ahmedou Moulaye IDRISS , Faculty of Medicine University of Nouakchott Al Aasriya , Mauritania , Mauritania +1. Mohamed bamba hbedy , Faculté médecine Nouakchott , Nouakchott , Mauritania +1. EL FARAZE Mohammed , ABDELMALEK ESAADI UNIVERSITY , NORTH OF AFRICA , Morocco +1. Kyle Sunderland , Queen's University , Ontario , Canada +1. EL GUERI Jaouad , ENSA Tétouan , North of Africa , Morocco +1. Otman AGHZOUT , University Abdelmalek Essaedi , North Africa , Morocco +1. Andras Lasso , PerkLab, Queen's University , ON , Canada +1. Pedro Moreira , Brigham and Women's Hospital , MA , USA +1. Dr. Gargi Jani , National forensic sciences university , Gujarat , India +1. Anita Carolina Ribeiro , FOUSP , São Paulo , Brazil +1. Daniele Piccolo , Nucleode SRL, University of Padua , UD , Italy +1. David Manso Arcediano , Cella Medical Solutions , Murcia , Spain +1. Mohamed aly Dedew , Faculty of medecine of Nouakchott , Nouakchott , Mauritania +1. Miguel Xochicale , King's College London , London , UK +1. Mahomed Sidique Abdul Cadar Dada , Uem , Maputo , Moçambique +1. Tasos Raptis , National Technical University of Athens , Attiki , Greece +1. María Dolores Afonso Suárez , Universidad de Las Palmas de Gran Canaria , Canary Islands , Spain +1. Simon Doran , Institute of Cancer Research , Surrey , UK +1. Elameen Adam , University of Bahri , Khartoum , Sudan +1. Fatima Elnagieb Adam Gamereldien , Nyala Teaching hospital , South Darfour , Sudan +1. Rafael Pineda Reyes , University of Cordoba , Cordoba , Spain +1. Blanca Zufiria Gerbolés , Vicomtech , guipuzkoa , Spain +1. Mohammed Eltahir , University of Bahri , Khartoum , Sudan +1. Godwin Wunpiini Yiddi , Kwame Nkrumah University of Science and Technology , Ashanti Region , Ghana +1. Kate Alpert , Radiologics , CO , USA +1. Gordon Harris , Massachusetts General Hospital , MA , USA +1. Supriya Somarouthu , Massachusetts General Hospital , Massachusetts , USA +1. Awa NDIAYE SY , Université Cheikh Anta Diop de Dakar - Faculté de Médecine, de Pharmacie et d'Odontologie , SENEGAL , Sénégal +1. Aditya Apte , Memorial Sloan Kettering Cancer Center , New York , USA +1. Andrea Avena-Koenigsberger , Indiana University , INDIANA , USA +1. Juan Carlos Prieto , UNC , North Carolina , USA +1. Michael Halle , Brigham and Women's Hospital , Massachusetts , USA +1. Marta Vidal-Garcia , University of Calgary , Alberta , Canada +1. antonietta del bove , UNIVERSITAT ROVIRA I VIRGILI , SPAIN , Spain +1. Terrie Simmons-Ehrhardt , Virginia Commonwealth University , VA , USA +1. Arthur Porto , Louisiana state university , LA , USA +1. Lucia Cevidanes , University of Michigan , MI , USA +1. Lucas Lo Vercio , University of Calgary , Alberta , Canada +1. Roozbeh Shams , Polytechnique Montreal , QC , Canada +1. Eva Zaffarini , University of Calgary , Alberta , Canada +1. James Dickson , Radiologics Inc , Pennsylvania , USA +1. Erik Ziegler , Radical Imaging , Rhone-Alpes , France +1. Mounia Chakkour , Abdelmalek Essaâdi University (ENSA Tetuan) , North Africa , Morocco +1. Étienne Léger , Concordia University , Québec , Canada +1. Benedict Kingsford Andam , Kwame Nkrumah University of Science and Technology , Ayeduase , Ghana +1. Mohammad Alsad , Imperial College London , Cambs , UK +1. YAHYA TFEIL , faculty of medicine alassriya university mauritaniaania , nouakchott , Mauritania +1. jonas bianchi , University of the Pacific , California , USA +1. MOHAMED SABAN , UNIVERSIDAD DE VALENCIA , VALENCIA , Spain +1. Amdaouch Ibtisam , University of Abdelmalek Essaadi , Tanger-Tetouan , Morocco +1. Mariela Peralta-Mamani , Bauru School of Dentistry-University of Sao Paulo , SP , Brazil +1. Ángel Terrero Pérez , Bauru School of Dentistry, University of Sao Paulo , Sao Paulo , Brazil +1. David Orlando Grajales Lopera , Polytechnique Montreal , Québec , Canada +1. Alexis Girault , Kitware , NC , USA +1. Leonardo F Machado , University of São Paulo até Ribeirão Preto , SP , Brazil +1. Manuel Gustavo Chávez Sevillano , Rio de Janeiro State University , Rio de Janeiro , Brazil +1. Gabor Fichtinger , Queen's Univ , ON , Canada +1. Charles Johnson , University of Iowa , Iowa , USA +1. Morgan Hough , Center17 , CA , USA +1. Aron Aliaga Del Castillo , Bauru Dental School, University of São Paulo , São Paulo , Brazil +1. Camila Massaro , University of São Paulo , Sp , Brazil +1. Wilson Komla , Kwame Nkrumah University of Science and Technology , Ashanti Region , Ghana +1. Victor Elikem Akpaloo , Kwame Nkrumah University of Science and Technology , Greater Accra Region , Ghana +1. ABOAGYE, Gifty Takyiwaa , Kwame Nkrumah University of Science and Technology , Ashanti , Ghana +1. omar sow , université assane seck de ziguinchor , ziguinchor , senegal +1. Rudolf Bumm , Department of Surgery, Kantonsspital Graubünden , Graubünden , Switzerland +1. Kelly Diamond , Seattle Children's Research Institute , WA , United States +1. Autumn Kulaga , CCDC Data & Analysis Center , MD , US +1. DELEAT-BESSON , University of Michigan , Michigan , United States +1. Celia LE , University of Michigan , Michigan , United States +1. Deise Caldas , Malta , Saint Julians , Malta +1. Steve Moore , Washington University , MO , USA +1. Cseh Martin , University of Szeged 3D Center , Csongrad , Hungary +1. Nasara Cabrera Abu , Canary Islands Government , Canary Islands , Spain +1. Sokhna Astou Gawane Thiam , Université Cheikh Anta Diop , Sénégal , Sénégal +1. Jasmin Metzger , German Cancer Research Center , BW , Germany +1. Aron Helser , Kitware , NC , US +1. Marcela Gurgel , Federal University of Ceara , Ceara , Brazil +1. Jason Haider , Xenco Medical , California , United States +1. Jordan Karten , Massachusetts General Hospital , MA , USA +1. Ron Alkalay , Beth Israel Deaconess Medical Center , MA , US +1. Parikshit Juvekar , Brigham & Women's Hospital , Massachusetts , USA +1. Dhiego Bastos , BWH , MA , United States ## Statistics +* 204 registered attendees +* 26 countries +* 101 institutions (82 academic, 18 industry, and 1 goverment) + diff --git a/PW34_2020_Virtual/ZoomBreakoutRoom.md b/PW34_2020_Virtual/ZoomBreakoutRoom.md new file mode 100644 index 000000000..fb029d9db --- /dev/null +++ b/PW34_2020_Virtual/ZoomBreakoutRoom.md @@ -0,0 +1,10 @@ +Back to [Breakout Sessions](README.md#breakout-sessions) + +# Display list of breakout rooms +![](pw34-breakroom-button.png) + +# Leaving a breakout room +![](pw34-breakroom-leave.png) + +# List of breakout rooms +![](pw34-breakroom-list.png) diff --git a/PW34_2020_Virtual/pw-registrants-country-stats-final.png b/PW34_2020_Virtual/pw-registrants-country-stats-final.png new file mode 100644 index 000000000..37d911fa0 Binary files /dev/null and b/PW34_2020_Virtual/pw-registrants-country-stats-final.png differ diff --git a/PW34_2020_Virtual/pw-registrants-country-stats.png b/PW34_2020_Virtual/pw-registrants-country-stats.png new file mode 100644 index 000000000..410d8f15a Binary files /dev/null and b/PW34_2020_Virtual/pw-registrants-country-stats.png differ diff --git a/PW34_2020_Virtual/pw34-breakroom-button.png b/PW34_2020_Virtual/pw34-breakroom-button.png new file mode 100644 index 000000000..605ae7d6b Binary files /dev/null and b/PW34_2020_Virtual/pw34-breakroom-button.png differ diff --git a/PW34_2020_Virtual/pw34-breakroom-leave.png b/PW34_2020_Virtual/pw34-breakroom-leave.png new file mode 100644 index 000000000..aa96bb093 Binary files /dev/null and b/PW34_2020_Virtual/pw34-breakroom-leave.png differ diff --git a/PW34_2020_Virtual/pw34-breakroom-list.png b/PW34_2020_Virtual/pw34-breakroom-list.png new file mode 100644 index 000000000..0d544c8ff Binary files /dev/null and b/PW34_2020_Virtual/pw34-breakroom-list.png differ diff --git a/PW35_2021_Virtual/Breakouts/DIOCM+OHIF/DICOM-Overview.md b/PW35_2021_Virtual/Breakouts/DIOCM+OHIF/DICOM-Overview.md new file mode 100644 index 000000000..9116c0ccc --- /dev/null +++ b/PW35_2021_Virtual/Breakouts/DIOCM+OHIF/DICOM-Overview.md @@ -0,0 +1,111 @@ +--- +marp: true +--- + +# Overview of DICOM and the open source ecosystem + + +--- + +# Why use DICOM? +* Universal standard for medical imaging +* Don't lose data + * Get all the details from the scanner + * Well-defined representations with documentation + +--- + +# How to think about DICOM +* Each "dataset" is an instance of a "class" with strongly typed instance variables (called "elements") + * Instances can be stored as files (called Part-10 files after the section of the standard describing them) + * Instances can be grouped when they share unique IDs +* The sequence of instances are like a logfile of what the scanner generated and it's up to the application to sort through them to determine the relationships and map them into useful constructs like Volumes, Segmentations, etc. +* To create DICOM instances the application populates the elements to link it with the other instances as appropriate + +--- + +# Some of the more useful DICOM classes +* Imaging: CT, MR, PET, US... + * Orginal scan data +* Segmentation: SEG + * Image based labeling of structures +* Structured Reporting: SR + * Vector annotations, quantifications, qualitative findings +* Radiotherapy: RT + * Doses, plans, structures... +* Parametric Maps: PM + * Images with defined quantities and units +* Spatial Registration: SRO + * Linear and nonlinear with explicit frames of reference +* Whole Slide Images: WSI + * Microscopy images, possibly multichannel with annotations in SR + +--- + +# DICOM networking +* DIMSE is tradional "PACS" networking used worldwide + * Both endpoints need custom configuration + * Best for use within controlled firewalls +* DICOMweb is uses modern REST API concepts + * Better suited to internet and security + * Introduced JSON header encoding + +--- + +# DICOM Implementations: Java, C# +* PixelMed toolkit, open source, but intended for reference not for community use +* FairOaks +* probably others... + +--- + +# DICOM Implementations: C++ +* GDCM: traditional implementation used in ITK +* DCMTK: also widely used in ITK and many other places +* CommonTK (CTK) + * DCMTK + Qt * SQLite + * Core of Slicer's DICOM module +* dcmqi: convenience interface over DCMTK to support encoding of analysis results in DICOM + +--- + +# DICOM Implementations: Python +* pydicom + * Widely used, bundled with Slicer + * Maps instances to python objects and numpy arrays +* pydicomnet + * Implements DIMSE with pydicom +* dicomweb-client + * Implements DICOMweb with pydicom +* highdicom (new) + * Adds SEG, SR, etc on pydicom +--- + +# DICOM Implementations: JavaScript +* dicomParser, cornerstone, OHIF + * Layers of the Open Health Imaging Foundation stack +* dcmjs.org + * dcmjs: maps instances to/from JavaScript classes + * original: emscripten cross-compiled DCMTK + * current: pure JavaScript (browser/server) + * dicomweb-client/dicomweb-server: DICOMweb on dcmjs + * dcmjs-dimse (new): DIMSE on dcmjs (server only) +* Can be used in qSlicerWebWidget + +--- + +# DICOM in Slicer +* DICOM module supports local database and DIMSE networking +* DICOM Plugins examine related instances to propose mappings to Slicer datatypes, export Sicer data to DICOM +* DICOMwebBrowser query/retrieve/store and support Google DICOMweb stores securely +* DICOM Plugins provided by SlicerRT, QuantitativeReporting, PET... + +--- + +# Summary +* Supporting all of DICOM is a huge task +* Community is very active tools are becoming very capable +* Interoperability is improving + * Slicer-generated segmentations in OHIF + * OHIF structured report annotations in Slicer + * highdicom encoded machine learning results in Slicer and OHIF diff --git a/PW35_2021_Virtual/Projects/ARinSlicer/Figure_HoloLens2_OrthopedicOncologicalSurgery.png b/PW35_2021_Virtual/Projects/ARinSlicer/Figure_HoloLens2_OrthopedicOncologicalSurgery.png new file mode 100644 index 000000000..c50d20d32 Binary files /dev/null and b/PW35_2021_Virtual/Projects/ARinSlicer/Figure_HoloLens2_OrthopedicOncologicalSurgery.png differ diff --git a/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_CraniosynostosisSurgery.png b/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_CraniosynostosisSurgery.png new file mode 100644 index 000000000..0932bc810 Binary files /dev/null and b/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_CraniosynostosisSurgery.png differ diff --git a/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_NeedleInsertion.png b/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_NeedleInsertion.png new file mode 100644 index 000000000..f9e8a3e85 Binary files /dev/null and b/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_NeedleInsertion.png differ diff --git a/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_PatientCommunication.png b/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_PatientCommunication.png new file mode 100644 index 000000000..da1b76c7a Binary files /dev/null and b/PW35_2021_Virtual/Projects/ARinSlicer/Figure_Smartphone_PatientCommunication.png differ diff --git a/PW35_2021_Virtual/Projects/ARinSlicer/README.md b/PW35_2021_Virtual/Projects/ARinSlicer/README.md new file mode 100644 index 000000000..08594e753 --- /dev/null +++ b/PW35_2021_Virtual/Projects/ARinSlicer/README.md @@ -0,0 +1,92 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# AR in Slicer + +## Key Investigators + +- Alicia Pose Díez de la Lastra (Universidad Carlos III de Madrid, Madrid, Spain) +- Javier Pascau (Universidad Carlos III de Madrid, Madrid, Spain) +- Csaba Pinter (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Lucas Gandel (Kitware, France) +- Adam Rankin (Robarts Research Institute / Western University, Canada) +- Jean-Christophe Fillion-Robin (Kitware, USA) +- Mónica García-Sevilla (Universidad de Las Palmas de Gran Canaria , Gran Canaria , Spain) +- Houssem Gueziri + + +# Project Description + + +Augmented Reality has increase its adoption in many areas with exciting benefits. Universidad Carlos III de Madrid (Madrid, Spain) has already worked in several medical projects +based on AR (see their progress in https://biig-igt.uc3m.es/augmented-reality/). On these studies, they usually export information from Slicer to an alternative software (Unity). + +The ultimate goal of this project is to check if it is possible to incorporate this technology directly to 3D Slicer in order to centralize the working process, at the time +of benefiting from all Slicer tools. + +## Objective + + + +1. Learn what can be done in AR using 3D Slicer. +2. Explore possible paths to integrate AR in 3D Slicer in the future. +3. Study if we can receive transformations in Unity from Slicer, so that we can transfer navigation information between the two softwares. + +Some links of interest: +1. [Writing a Holographic Remoting remote app using the OpenXR API](https://docs.microsoft.com/en-us/windows/mixed-reality/develop/platform-capabilities-and-apis/holographic-remoting-create-remote-openxr) +2. [Slicer Documentation on Augmented Reality and Virtual Reality support](https://www.slicer.org/wiki/Documentation/Labs/Augmented_Reality_and_Virtual_Reality_support#Current_approaches) + +## Approach and Plan + + + +1. Develop a new Slicer extension called SlicerAR that streams AR directly to HoloLens 2. + +## Progress and Next Steps + + + +1. AR Support (OpenXR) in VTK: + +The WIP branch supporting Holographic remoting to stream VTK rendering inside the Hololens has been submitted [here](https://gitlab.kitware.com/vtk/vtk/-/merge_requests/8101). +The immediate actions to take are listed there in the TODOS section: 1. and 2. should be addressed to reuse this work in SlicerVR. + +2. Alicia and Lucas tried to replicate in Alicia's computer the steps of the WIP branch Lucas' shared above. They summarized them in [this](StepsToFollow.pdf) pdf document. Despite they were not able to complete the final step, they found out and fixed many new issues that improved the project. + +3. Alicia, Houssem and Étienne Léger also met to discuss some features related to pattern recognition in HoloLens with Vuforia and ArUco, latency and interconnectivity between Slicer and Unity. + +4. Alicia and Naghmeh additionally talked about connecting 3D Slicer and Unity in real time to send transformations between them. [Here](https://github.com/Lyla-M/UnityOpenIGTLink), [here](https://github.com/franklinwk/OpenIGTLink-Unity) and [here](https://github.com/thomasMuender/SlicerToUnity) you can find two GitHub projects that explain how to achieve this connection via OpenIGTLink. + +5. Alicia and Étienne met again after Project Week to discuss further about Étienne's work. He tracks a tool connected to Plus and Slicer via OpenIGTLink. He managed to send this information, in real-time, to a self-developed AR tablet application ([MARIN](https://github.com/AppliedPerceptionLab/MARIN)) using [IBIS](https://github.com/IbisNeuronav/Ibis). Everything is programmed in Qt (check out the Qt creator from Qt.io to start programming in Qt). + + + +# Illustrations + + +Here below you can find some AR implementations in health by Universidad Carlos III de Madrid (Madrid, Spain) in the past years: + +HoloLens 2 in Orthopedic Oncological Surgeries: + +![HoloLens 2 in Orthopedic Oncological Surgeries](Figure_HoloLens2_OrthopedicOncologicalSurgery.png) + +Smartphone app to communicate with the patient and help him/her understand his/her condition: + +![Smartphone app to communicate with the patient and help him/her understand his/her condition](Figure_Smartphone_PatientCommunication.png) + +Real-time guidance during Open Cranial Vault Remodeling using smartphone: + +![Smartphone app to guide open cranial vault remodeling](Figure_Smartphone_CraniosynostosisSurgery.png) + +Needle Insertion Guidance for Sacral Nerve Stimulation using smartphone: + +![Smartphone app to guide needle insertion for sacral nerve stimulation](Figure_Smartphone_NeedleInsertion.png) + + + + +# Background and References + + diff --git a/PW35_2021_Virtual/Projects/ARinSlicer/StepsToFollow.pdf b/PW35_2021_Virtual/Projects/ARinSlicer/StepsToFollow.pdf new file mode 100644 index 000000000..41422c0ff Binary files /dev/null and b/PW35_2021_Virtual/Projects/ARinSlicer/StepsToFollow.pdf differ diff --git a/PW35_2021_Virtual/Projects/CNN_Brain_Masking/BrainMaskPlan.png b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/BrainMaskPlan.png new file mode 100644 index 000000000..bb57e5e89 Binary files /dev/null and b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/BrainMaskPlan.png differ diff --git a/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETMaskResult.png b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETMaskResult.png new file mode 100644 index 000000000..9b2b10c3f Binary files /dev/null and b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETMaskResult.png differ diff --git a/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETMaskSlices.png b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETMaskSlices.png new file mode 100644 index 000000000..f42cc9a52 Binary files /dev/null and b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETMaskSlices.png differ diff --git a/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETaccMaskResult.png b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETaccMaskResult.png new file mode 100644 index 000000000..e165e6f9f Binary files /dev/null and b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETaccMaskResult.png differ diff --git a/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETaccMaskSlices.png b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETaccMaskSlices.png new file mode 100644 index 000000000..7bb118792 Binary files /dev/null and b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/HDBETaccMaskSlices.png differ diff --git a/PW35_2021_Virtual/Projects/CNN_Brain_Masking/README.md b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/README.md new file mode 100644 index 000000000..e2c6d65da --- /dev/null +++ b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/README.md @@ -0,0 +1,79 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# CNN based Brain Masking Module + +## Key Investigators + +- Raymond Yang (University of Massachusetts Boston) +- Lipeng Ning (BWH & Harvard Medical School) +- Daniel Haehn (University of Massachusetts Boston) +- Yogesh Rathi (BWH & Harvard Medical School) +- Steve Pieper (Isomics, Inc.) + + +# Project Description + +Develop a deep learning based Brain Masking Module with improved performance and accuracy over current alternatives. + +## Objective + + + +1. Objective A. Test accuracy and reliability of a Deep Learning based Brain Masking Solution +1. Objective B. Integrate the solution into 3D slicer +1. Objective C. Test and improve performance of the integrated solution + +## Approach and Plan + + + +1. Explore Image Registration options (EasyReg/MERMAID) +1. Research current Deep Learning based Brain Masking (HD-BET, Auto Net) +1. Get access to data for training and testing +1. Create the solution +1. Figure out how to integrate the solution +1. Evaluate the performance + +## Progress and Next Steps + + + +1. Applied for NIH Dataset Request +1. Tested HD-BET Segmentation +1. Extracted PyTorch parameter(s) from HD-BET +1. Begin building Slicer Module with HD-BET parameters + +# Illustrations + + +Swiss Skull Stripper +![SSS3D](SSSMaskResult.png) +![SSS4Spread](SSSMaskSlices.png) + +HD-BET Fast segmentation +![HDBETFast3D](HDBETMaskResult.png) +![HDBETFast4Spread](HDBETMaskSlices.png) + +HD-BET Accurate segmentation (5 model ensemble) +![HDBETAcc3D](HDBETaccMaskResult.png) +![HDBETAcc4Spread](HDBETaccMaskSlices.png) + + +# Background and References + +[EasyReg](https://github.com/uncbiag/easyreg) | +[HD-BET](https://github.com/MIC-DKFZ/HD-BET) | +[Auto Net](https://github.com/SadeghMSalehi/AutoContextCNN) + +Anatomical Data Augmentation via Fluid-based Image Registration Zhengyang Shen, Zhenlin Xu, Sahin Olut, Marc Niethammer. MICCAI 2020. + +Region-specific Diffeomorphic Metric Mapping Zhengyang Shen, François-Xavier Vialard, Marc Niethammer. NeurIPS 2019. + +Networks for Joint Affine and Non-parametric Image Registration Zhengyang Shen, Xu Han, Zhenlin Xu, Marc Niethammer. CVPR 2019. + +Isensee F, Schell M, Tursunova I, Brugnara G, Bonekamp D, Neuberger U, Wick A, Schlemmer HP, Heiland S, Wick W, Bendszus M, Maier-Hein KH, Kickingereder P. Automated brain extraction of multi-sequence MRI using artificial neural networks. Hum Brain Mapp. 2019; 1–13. https://doi.org/10.1002/hbm.24750 + +S. S. Mohseni Salehi, D. Erdogmus and A. Gholipour, "Auto-Context Convolutional Neural Network (Auto-Net) for Brain Extraction in Magnetic Resonance Imaging," in IEEE Transactions on Medical Imaging, vol. 36, no. 11, pp. 2319-2330, Nov. 2017, doi: 10.1109/TMI.2017.2721362. diff --git a/PW35_2021_Virtual/Projects/CNN_Brain_Masking/SSSMaskResult.png b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/SSSMaskResult.png new file mode 100644 index 000000000..f4fad55fa Binary files /dev/null and b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/SSSMaskResult.png differ diff --git a/PW35_2021_Virtual/Projects/CNN_Brain_Masking/SSSMaskSlices.png b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/SSSMaskSlices.png new file mode 100644 index 000000000..c7ab6253e Binary files /dev/null and b/PW35_2021_Virtual/Projects/CNN_Brain_Masking/SSSMaskSlices.png differ diff --git a/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Refine.gif b/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Refine.gif new file mode 100644 index 000000000..46c025896 Binary files /dev/null and b/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Refine.gif differ diff --git a/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Scene.png b/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Scene.png new file mode 100644 index 000000000..80cd615cf Binary files /dev/null and b/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Scene.png differ diff --git a/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Stimulation.png b/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Stimulation.png new file mode 100644 index 000000000..83c2245e4 Binary files /dev/null and b/PW35_2021_Virtual/Projects/DBSNavigation/DBSNav_Stimulation.png differ diff --git a/PW35_2021_Virtual/Projects/DBSNavigation/README.md b/PW35_2021_Virtual/Projects/DBSNavigation/README.md new file mode 100644 index 000000000..96c032d2f --- /dev/null +++ b/PW35_2021_Virtual/Projects/DBSNavigation/README.md @@ -0,0 +1,74 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Deep Brain Stimulation Navigation + +## Key Investigators + +- Simon Oxenford (Charite, Berlin) + +# Project Description + +### Deep Brain Stimulation + +During deep brain stimulation surgery, (depending on the center) micro electrodes are used to reassess the planned trajectory of implantation. The idea of the project is to communicate with the micro electrode's controler in order to get their location and signals. Together with the planning information, these can then be visualised in patient space, including different image modalities, atlases and electrophysiology features. + +![](DBSNav_Scene.png) + +- Synergy with [PyDBS Project](../VRDisplayPluginForPyDBSUsingZspace/README.md). + +### Test Stimulations + +Micro electrodes are also used to do test stimulations at increasing steps to induce therapeutic and side effects to better inform final electrode placement. + +![](DBSNav_Stimulation.png) + +- Synergy with [TMS Project](../TMS_Slicer_Module/README.md). + +### Registration Refinement + +In order to achieve higher accuracy of the atlases registration to patient space, we rely on [antsRegistration](https://github.com/simonoxen/SlicerANTs) and we are also working on a module to manually fix for mismatches in the registration output. + +![](DBSNav_Refine.gif) + +- Synergy with image registration in general. + + +## Objective + + + +1. Objective A. ... +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. SlicerNetstim extension added to ExtensionIndex. +1. Preview video of how WarpDrive module works. +1. [Recognized usability and performance improvements to implement.](https://github.com/netstim/SlicerNetstim/issues/3) + +[![](https://img.youtube.com/vi/bkXiCPN_KRI/0.jpg)](https://www.youtube.com/watch?v=bkXiCPN_KRI) + + +# Illustrations + + + +# Background and References + + + +- [https://github.com/netstim/SlicerNetstim](https://github.com/netstim/SlicerNetstim) diff --git a/PW35_2021_Virtual/Projects/DICOM-SR/README.md b/PW35_2021_Virtual/Projects/DICOM-SR/README.md new file mode 100644 index 000000000..5ef83abb7 --- /dev/null +++ b/PW35_2021_Virtual/Projects/DICOM-SR/README.md @@ -0,0 +1,87 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Extending DICOM-SR support in dcmjs and adding test cases + +## Key Investigators + +- Emel Alkim (Stanford University) +- Steve Pieper (Isomics) +- Andrey Fedorov (Brigham and Women's Hospital, Boston) + +# Project Description + + + +## Objective + + + +1. Identify gaps in dcmjs for reading/writing DICOM-SR +1. Add support for freehand and segmentation +1. Convert AIM to DICOM-SR and DICOM-SR to AIM and show interoperability between ePAD, OHIF and Slicer 3D + +## Approach and Plan + + + +1. Find some test data and generate test cases for Length and Bidirectional +1. Find some segmentation and/or freehand DICOM-SRs, convert to AIM and back +1. Complete freehand and segmentation implementations and add test cases + +## Progress and Next Steps + + + +1. dcmjs already has full implementation for Length and Bidirectional +2. aimapi has the draft implementation on for the conversion of AIM to DICOM-SR and back +3. Got test data from Andrey ([https://www.dropbox.com/s/98rylgt25b2sm9r/planar_annotations.zip?dl=0]). + - The sample has 1 prostate, one lung. + - They have SCOORD and SCOORD3D instead of measurements to identify polyline and point respectively. dcmjs support is dependent on the measurement existance + - for point dcmjs point object expects a CONTAINS:NUM:center => inferred from: SCOOR3D: point structure, whereas the sample point has CONTAINS:SCOORD3D:Image Region + - for polyline (bounding box), dcmjs has no measurement support implemented for freehand. biridectional implemetation is like CONTAINS:NUM:LongAxis => INFERRED FROM: SCOORD: POLYLINE whereas the sample has CONTAINS:COORD:Image Region=POLYLINE => SELECTED FROM: IMAGE +4. Put sample datasets to data releases + - [Length](https://github.com/dcmjs-org/data/releases/tag/DICOMSR_CCC2017_Length) + - [Bidirectional](https://github.com/dcmjs-org/data/releases/tag/DICOMSR_CCC2018_Bidirectional) + - [Point Planar](https://github.com/dcmjs-org/data/releases/tag/DICOMSR_Prostate_X) + - [Bounding-box Planar](https://github.com/dcmjs-org/data/releases/tag/DICOMSR_PetCtLung_BB) +5. Added reading [tests](https://github.com/dcmjs-org/dcmjs/blob/sr-update-aim/test/test_sr.js) for each shape +6. Added adapter [test sample](https://github.com/dcmjs-org/dcmjs/blob/sr-update-aim/test/test_adapters.js) for bidirectional +7. Implemented freehand DICOMSR writing and reading. And tested it by the conversion of AIM to DICOMSR. [Freehand AIM](https://github.com/RubinLab/epadlite/blob/feature/dicomsr/test/data/sr/freehand.json) [Freehand DICOMSR](https://github.com/RubinLab/epadlite/blob/feature/dicomsr/test/data/sr/freehand.dcm) + - It uses the TID 300 representation and adds the ROI multiple times for each calculation. David Clunie suggested we use TID 1401, put the ROI first and add the measurements as children. The discussion is on [github PR](https://github.com/dcmjs-org/dcmjs/pull/197). We will fix this as a future work but it needs more discussion about changing the implementation for other shapes and how to support the old DICOM SRs. We will add test cases when the implementation is finalized +8. Converted length and bidirectional sample DICOMSRs to AIMs. Loaded in ePAD and OHIF +9. Remaining gaps: + - Support for planar ROIs with no measurements + - Saving and reading TID 1401 template + - Segmentation implementation + - The unit of pixel values are either SUV for PETs or HU (otherwise). I added pixelUnit support to the tool, but currently cornerstone tools doesn't support it so it defaults to this behavior + - Multiple Measurement Groups/ROIs in one DICOMSR + - Point/Probe shape support + +# Illustrations + + +Crowds Cure Cancer 2017 Length in OHIF +![Crowds Cure Cancer 2017 Length in OHIF](ccc2017_length_ohif.png) + +Crowds Cure Cancer 2017 Length in ePAD +![Crowds Cure Cancer 2017 Length in ePAD](ccc2017_length_epad.png) + +Crowds Cure Cancer 2018 Bidirectional in OHIF +![Crowds Cure Cancer 2018 Bidirectional in OHIF](ccc2018_bidirectional_ohif.png) + +Crowds Cure Cancer 2018 Bidirectional in ePAD +![Crowds Cure Cancer 2018 Bidirectional in ePAD](ccc2018_bidirectional_epad.png) + +Freehand DICOMSR in DICOM Image Viewer +![Freehand DICOMSR in DICOM Image Viewer](freehand_sr_screenshot.png) + + +# Background and References + + +1. dcmjs library https://github.com/dcmjs-org/dcmjs +1. aimapi library https://github.com/RubinLab/aimapi-js +2. epadlite dicomsr branch https://github.com/RubinLab/epadlite/tree/feature/dicomsr diff --git a/PW35_2021_Virtual/Projects/DICOM-SR/ccc2017_length_epad.png b/PW35_2021_Virtual/Projects/DICOM-SR/ccc2017_length_epad.png new file mode 100644 index 000000000..880553b7c Binary files /dev/null and b/PW35_2021_Virtual/Projects/DICOM-SR/ccc2017_length_epad.png differ diff --git a/PW35_2021_Virtual/Projects/DICOM-SR/ccc2017_length_ohif.png b/PW35_2021_Virtual/Projects/DICOM-SR/ccc2017_length_ohif.png new file mode 100644 index 000000000..0d7730e9b Binary files /dev/null and b/PW35_2021_Virtual/Projects/DICOM-SR/ccc2017_length_ohif.png differ diff --git a/PW35_2021_Virtual/Projects/DICOM-SR/ccc2018_bidirectional_epad.png b/PW35_2021_Virtual/Projects/DICOM-SR/ccc2018_bidirectional_epad.png new file mode 100644 index 000000000..25898d5b2 Binary files /dev/null and b/PW35_2021_Virtual/Projects/DICOM-SR/ccc2018_bidirectional_epad.png differ diff --git a/PW35_2021_Virtual/Projects/DICOM-SR/ccc2018_bidirectional_ohif.png b/PW35_2021_Virtual/Projects/DICOM-SR/ccc2018_bidirectional_ohif.png new file mode 100644 index 000000000..491a344e0 Binary files /dev/null and b/PW35_2021_Virtual/Projects/DICOM-SR/ccc2018_bidirectional_ohif.png differ diff --git a/PW35_2021_Virtual/Projects/DICOM-SR/freehand_sr_screenshot.png b/PW35_2021_Virtual/Projects/DICOM-SR/freehand_sr_screenshot.png new file mode 100644 index 000000000..898e06a29 Binary files /dev/null and b/PW35_2021_Virtual/Projects/DICOM-SR/freehand_sr_screenshot.png differ diff --git a/PW35_2021_Virtual/Projects/DeepHeart/README.md b/PW35_2021_Virtual/Projects/DeepHeart/README.md new file mode 100644 index 000000000..628658063 --- /dev/null +++ b/PW35_2021_Virtual/Projects/DeepHeart/README.md @@ -0,0 +1,72 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# DeepHeart integration with MONAILabel + +## Key Investigators (subject to change) + +- Matthew Jolley, MD (Children's Hospital of Philadelphia, Philadelphia, PA, United States) +- Christian Herz, MS (Children's Hospital of Philadelphia, Philadelphia, PA, United States) +- Danielle F. Pace, PhD (Martinos Center for Biomedical Imaging, CSAIL, MIT, MGH, HMS, Boston, MA, United States) +- Andras Lasso, PhD (Laboratory for Percutaneous Surgery, Queen’s University, Canada) +- John Witt (Children's Hospital of Philadelphia, Philadelphia, PA, United States) +- Sachidanand Alle (NVIDIA) +- Prerna Dogra (NVIDIA) +- Andrés Díaz-Pinto (King's College London, UK) + +# Project Description + +Creation of a MONAILabel app for leaflet segmentation of heart valves in 3D echocardiographic (3DE) images. In particular, we have been developing AI models for segmentation of Tricuspid Valve leaflets in 3DE images of patients with Hypoplastic left heart syndrome (HLHS). + + + +## Objective + + + +1. Creation of a MONAILabel app +2. Bring own UI elements as FCN will need additional user input + +### Slide 1 +![image](https://user-images.githubusercontent.com/10195822/123651558-91ed2b00-d7f9-11eb-9016-41229ad7f416.png) + +### Slide 2 +![image](https://user-images.githubusercontent.com/10195822/123651594-9addfc80-d7f9-11eb-8f54-f15cba237a1f.png) + +### Slide 3 + +![image](https://user-images.githubusercontent.com/10195822/123651723-b77a3480-d7f9-11eb-889d-f7232dbda0a8.png) + + +### Slide 4 + +![image](https://user-images.githubusercontent.com/10195822/123651929-dd073e00-d7f9-11eb-830b-2f5b940aa2cd.png) + +### Slide 5 + +![image](https://user-images.githubusercontent.com/10195822/123651802-ca8d0480-d7f9-11eb-9b0b-ce38a728fac9.png) + + + + +### Slide 6 + +![image](https://user-images.githubusercontent.com/10195822/123651978-e5f80f80-d7f9-11eb-973b-b7e6a8f377d4.png) + + +## Approach and Plan + + + +1. Use MONAI framework for replacing most of our custom code to minimize overhead +2. Create MONAILabel app based on ported code +3. Create custom UI for additional user inputs + +## Progress and Next Steps + + + +1. MONAILabel team created new sample app with support for multi-label segmentation (https://github.com/Project-MONAI/MONAILabel/tree/main/sample-apps/segmentation_liver_and_tumor) +2. MONAILabel team added option to upload local image to the MONAILabel server +3. DeepHeart MONAILabel app created for the segmentation of tricuspid valve leaflets from 3DE images + +![MONAILabel](https://user-images.githubusercontent.com/10195822/124171163-3380b080-da76-11eb-8ce7-4d38100227a0.gif) diff --git a/PW35_2021_Virtual/Projects/DeepLearningforSubcorticalBrainSegmentation/README.md b/PW35_2021_Virtual/Projects/DeepLearningforSubcorticalBrainSegmentation/README.md new file mode 100644 index 000000000..27799d891 --- /dev/null +++ b/PW35_2021_Virtual/Projects/DeepLearningforSubcorticalBrainSegmentation/README.md @@ -0,0 +1,64 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Deep Learning for Subcortical Brain Segmentation + +## Key Investigators + +- Jarrett Rushmore (BU/BWH/MGH) +- Elizabeth Kenneally (Tufts/BWH) +- Sylvain Bouix (BWH) +- Nikos Makris (MGH/BWH/BU) +- Kyle Sunderland (Queens) + +# Project Description + +The goal of the project is to evaluate and implement deep learning approaches to the segmentation of brain structures on MRIs, particularly those with complex shapes and borders. + +## Objective + + + +1. Objective A. Investigate and evaluate the utility of MONAI and/or 3D Slicer AIAA to accomplish our goal. +1. Objective B. Set up hardware and software. +1. Objective C. Generate training datasets based on 50 manually segmented ventricles. +1. Objective D. Produce segmentation from deep learning and evaluate results. + +## Approach and Plan + + + +1. Speak to experts for MONAI and AIAA. +2. Start process of setting up server and testing out software +3. Produce and distribute lateral ventricles +4. Evaluate segmentation results + +## Progress + + + +1. Learned from and consulted with experts. +2. Identified a server and began setting it up to run MONAI label +3. Benefited from the benificence of the NAMIC community to get input on segmentation results from high-resolution lateral ventricle segmentation and whole brain segmentation. +4. Were very impressed with the power, timeliness, potential and results of the segmentation results (DICE of 0.88 after only 15 training sets!; good whole brain parcellation after less than one minute). + + +## Next Steps +1. Complete server set-up. +2. Build training sets and networks for subcortical structures; test integration of DL-based results with current segmentation workflow. +3. Investigate training whole brain parcellation with 0.7mm voxel size. + + +# Illustrations + + + +![Namic_1](https://user-images.githubusercontent.com/51300488/124282278-a7739500-db18-11eb-8d9d-94352fd832ac.png) +Results of MONAI on Left Lateral ventricle segmentation + +![Namic_2](https://user-images.githubusercontent.com/51300488/124282344-b8bca180-db18-11eb-80da-196b6a80e4e7.png) +Results of Whole Brain Segmentation (HighResNet; https://github.com/fepegar/highresnet) + +# Background and References + + diff --git a/PW35_2021_Virtual/Projects/GPURigidRegistration/README.md b/PW35_2021_Virtual/Projects/GPURigidRegistration/README.md new file mode 100644 index 000000000..a81461e55 --- /dev/null +++ b/PW35_2021_Virtual/Projects/GPURigidRegistration/README.md @@ -0,0 +1,46 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# GPU Rigid Registration + +## Key Investigators +- Gelel Rezig (Ecole de technologie supérieur, Montréal, Canada) +- Houssem Eddine Gueziri (Montreal Neurological Institute and Hopital, Canada) +- Simon Drouin (Ecole de technologie supérieur, Montréal, Canada) + +# Project Description + +With this project, we would like to add a new feature to Slicer. +The goal of this project is to extract code from an opensource software for image-based neurosurgery guidance: IBIS Neuronav. +This code in C++ aims to perform registration between different images using the GPU. It is located in an IBIS Neuronav +plugin. Then, it will be implemented in Slicer to be available for all users. + +## Objective + +1. Objective A. Extract the code from IBIS Neuronav +2. Objective B. Create a file usable by Slicer + + +## Approach and Plan + +1. Isolate the function in IBIS Neuronav +2. Extract the code +3. Create an executable file for Slicer + +## Progress and Next Steps + +1. Identification and preparation of usable code :heavy_check_mark: +- Isolate the code from IBIS and Adapt the code to make it usable outside. +- Creation of a specific library using Cmake. + +(You can find all the code on this github link : https://github.com/IbisNeuronav/GPURigidRegistrationLib) +2. Implementing functionality on Slicer :hourglass_flowing_sand: + + +# Illustrations + +![Registration on real time with GPU](gpu-rigid-reg.gif) +# Background and References + +- http://ibisneuronav.org +- https://github.com/IbisNeuronav/Ibis +- https://github.com/IbisNeuronav/GPURigidRegistrationLib diff --git a/PW35_2021_Virtual/Projects/GPURigidRegistration/gpu-rigid-reg.gif b/PW35_2021_Virtual/Projects/GPURigidRegistration/gpu-rigid-reg.gif new file mode 100644 index 000000000..d944b4087 Binary files /dev/null and b/PW35_2021_Virtual/Projects/GPURigidRegistration/gpu-rigid-reg.gif differ diff --git a/PW35_2021_Virtual/Projects/IGTrain/README.md b/PW35_2021_Virtual/Projects/IGTrain/README.md new file mode 100644 index 000000000..8605803bb --- /dev/null +++ b/PW35_2021_Virtual/Projects/IGTrain/README.md @@ -0,0 +1,92 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# IGT training material for francophone countries + +## Key Investigators + +- Nayra Pumar (Ebatinca, Las Palmas de Gran Canaria, Spain) +- Mohamed El Moctar Septy (Univ. Al Aasriya, Nuakchot, Mauritania) +- Yahya Tfeil (Univ. Al Aasriya, Nuakchot, Mauritania) +- Freddy Gnangnon (The National University of Benin, Cotonou, Benin) +- Asmaa Skareb (Ebatinca, Las Palmas de Gran Canaria, Spain) +- Marilola Afonso (Universidad de Las Palmas de Gran Canaria, Spain) +- Juan Ruiz Alzola (Universidad de Las Palmas de Gran Canaria, Spain) + +# Project Description + + +Review of available IGT tutorials to develop training material for French-speaking countries. + +## Objective + + + +1. Objective A. Recopilation of the existing training materials +1. Objective B. Revision of the training material: current software versions + +## Approach and Plan + + + +1. List of available tutorials and training resources +1. Review of the adequacy of these contents to the current versions. +1. A new pedagogical approach +1. Create training material in french + +## Progress and Next Steps + + +After reviewing and testing existing IGT introductory tutorials, we decided to use a different approach: more oriented to the line already followed in the training materials used for African countries, where the more technical contents appear later, once the basic skills have already been acquired. It is a more "learn by doing" approach, where the user is guided through an example and then all the technical background behind it is explained. + +The idea is to help and guide different teams taking their first steps into IGT, as the tutorials are sometimes way too aimed for developers or are quite equipment-dependent. The list of tutorial is planned to be modular, so new ones will be added as the implementation program advances in the different participating teams. + +This introduction to IGT is also designed to be accessible to all users, so that specific equipment (such as tracking devices, ultrasound scanners, etc.) is dispensed with from the outset and a first practical case is presented using a webcam and optical markers from the Aruco library. An example for the first steps/chapters for this training program would be: + +1. Installation of 3DSlicer and Plus modules. +1. Operation of the modules to be used in the work with IGT. +1. Creation of synthetic scenes in 3DSlicer: drawing of elements, importing of elements. +1. Linear transformations: basics and definition in 3DSlicer. +1. Plus configuration +1. First case study: connect the webcam with 3DSlicer via Plus +1. Second case study: use of Aruco markers +1. Playback of recorded scenes with Plus + +### internationalization + +Tutorials will be published in four languages: + +* French +* English +* Spanish +* Arabic + +In this project week the importance of having multi-lingual content has been assessed. Even if clinicians and engineers worldwide speak and use english in their daily work, discussing with your colleages or providing training in your native language is a necessity, and it makes things more easier, specially for those who are starting. + +There is also the possibility for portuguese versions, but we don't have the translator commitment yet. + +### Aim for this project + +The original aim for this project was the francophone regions, but after several contacts during this project week with members of the Slicer comunity based in latin-america we've decided to extend our reach for the hispanic community as well. + +In Las Palmas de Gran Canaria we are already used to working with African countries, thanks to the geographical proximity and the projects that have been developed in recent years. Now we want to take advantage of another benefit, which is the common language and culture that we have with Latin America and also serve as a gateway for the emerging community of 3D Slicer in Spanish. + +### The website + +In the upcoming weeks we will set up a website for these online training materials. The url will be posted in this page once it's online. + +The team will be integrated by content creators (the ones that will write the tutorials, describing the steps they are taking while setting up their IGT projects) and translators (that will also work as beta-testers for the tutorials, as we want to make them as easy to understand and follow as possible). + +If you are interested in participated, feel free to drop a message to Nayra Pumar at Ebatinca. Contributors are welcome. + + + + + diff --git a/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/Image_InteractiveSliceIntersections.png b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/Image_InteractiveSliceIntersections.png new file mode 100644 index 000000000..6310e9025 Binary files /dev/null and b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/Image_InteractiveSliceIntersections.png differ diff --git a/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/Image_Toolbar.png b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/Image_Toolbar.png new file mode 100644 index 000000000..64d80cab1 Binary files /dev/null and b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/Image_Toolbar.png differ diff --git a/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/README.md b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/README.md new file mode 100644 index 000000000..ac2797596 --- /dev/null +++ b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/README.md @@ -0,0 +1,116 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Interactive Slice Intersections + +## Key Investigators + +- David García-Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Kyle Sunderland (PerkLab, Queen's University, Kingston, ON, Canada) +- Csaba Pinter (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Andras Lasso (PerkLab, Queen's University, Kingston, ON, Canada) + +# Project Description + +Current slice navigation is based on hotkeys like **Shift+drag** for moving the 3D cursor, or **Ctrl+Alt+drag&drop** for rotation of the slice planes when slice intersections are shown. It would be great to have these features available more explicitly that could be used directly with the mouse, avoiding the use of keyboard. + +In this project, we want to develop an **interactive slice intersections mode** to enable users to navigate slices using interaction handles (similar to markups translation/rotation). + +Details: + +- Unlike the intersection lines, the interactive slice representation lines will be drawn from one window border to the opposite, with dash line for the part that does not correspond to the intersection of the planes. +- When the mouse is over the line, a widget appears with translation arrows under the mouse and rotation arrows close to it, with the tip of the arrows oriented towards the center of intersection +- If we click on the translation arrows and move the mouse, the line is translated (these are under the mouse so that this basic feature can be done basically by quick drag&drop) +- If we click on the rotation arrows and move the mouse, the 2 lines are rotated to keep the current angle (the user would need to move the mouse to get to these arrows providing the more advanced feature) +- There is no distinction for the direction of the movement, both sides of the arrows allow to move in both directions +- When the mouse goes close to the intersection center, the widget representation is different (circle at the center, 4 arrows ?) and we can translate both lines (like pressing Shift key) +- When the mouse is over a handle it is highlighted +- When we click on the widget and start moving the mouse, the widget representation is hidden or made mostly transparent (not the lines) to see the underlying image +- We would like to have access to some interaction events, like StartInteractionEvent, ModifiedEvent, EndInteractionEvent at least. + +A sketch is shown on the "Illustrations" section below. + +More information in this GitHub issue: [here](https://github.com/Slicer/Slicer/issues/5544) + +## Objective + + + +1. Create base classes to control interaction handles. + +2. Develop the interactive slice intersections mode. + +3. Customize slice interaction handles. + +4. Test it on sample images. + + +## Approach and Plan + + + +1. Explore the rendering pipeline for current Markups interaction handles (i.e., MarkupsInteractionPipeline class in vtkSlicerMarkupsWidgetRepresentation). :heavy_check_mark: + +2. Move base class and display options to a lower level. :heavy_check_mark: + +3. Create new classes vtkMRMLSliceIntersectionInteractionWidget and vtkMRMLSliceIntersectionInteractionRepresentation in DisplayableManager. :heavy_check_mark: + +4. Customize handles to control slice intersection in an intuitive manner. :heavy_check_mark: + +5. Create new vtkMRMLSliceDisplayNode :heavy_check_mark: + +6. Create property in vtkMRMLSliceDisplayNode to control handles visibility mode :heavy_check_mark: + + +## Progress and Next Steps + + + +1. Migrate pipelines for generic widget interaction handles from Markups module to **new base classes** in DisplayableManager (**vtkMRMLInteractionWidget** and **vtkMRMLInteractionWidgetRepresentation**). :heavy_check_mark: + +2. Improve base interaction classes to allow easy implementation of **custom interaction handles**. :heavy_check_mark: + +3. Implement interaction handles test case for manipulating transform nodes. :heavy_check_mark: + + **BEFORE** + + BEFORE + + **AFTER** + + AFTER + +4. Create new classes to render interaction handles for interactive slice intersection: vtkMRMLSliceIntersectionInteractionWidget and vtkMRMLSliceIntersectionInteractionRepresentation. :heavy_check_mark: + +5. Add new option in ViewersToolbar to select interactive slice intersection mode. This option is only enabled and visible when slice intersection mode is active. :heavy_check_mark: + + drawing + +6. Render interaction handles in slice views and enable interactions. + + HANDLES + +7. Modify interactions to update slice intersections. Currently, sliceToRAS transform of the slice node is being updated. :heavy_check_mark: + +8. Customize handles for interactive slice intersection. :heavy_check_mark: + +9. Test new mode in sample images :heavy_check_mark: + +10. Integrate in Slicer. Pull request: [https://github.com/Slicer/Slicer/pull/6008](https://github.com/Slicer/Slicer/pull/6008) + +# Illustrations + +Final video: + +https://user-images.githubusercontent.com/10816661/150459600-ba59f51b-b2aa-499d-abcd-3f023809a7ed.mp4 + +# Background and References + +- GitHub issue: [here](https://github.com/Slicer/Slicer/issues/5544) + +- Slicer branch for interactive slice intersection: [here](https://github.com/dgmato/Slicer) + +- Current branch for migration of interaction handles: [here](https://github.com/Sunderlandkyl/Slicer/tree/interaction_display_manager) + + + + diff --git a/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/TransformInteraction_Cube.gif b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/TransformInteraction_Cube.gif new file mode 100644 index 000000000..b6ac60343 Binary files /dev/null and b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/TransformInteraction_Cube.gif differ diff --git a/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/TransformInteraction_Handles.gif b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/TransformInteraction_Handles.gif new file mode 100644 index 000000000..02fa2d42b Binary files /dev/null and b/PW35_2021_Virtual/Projects/InteractiveSliceIntersections/TransformInteraction_Handles.gif differ diff --git a/PW35_2021_Virtual/Projects/MONAILabel/README.md b/PW35_2021_Virtual/Projects/MONAILabel/README.md new file mode 100644 index 000000000..cb7be7ca6 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MONAILabel/README.md @@ -0,0 +1,77 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# MONAI Label in Slicer + +## Key Investigators + +- Andres Diaz-Pinto (King's College London) +- Fernando Pérez-García (UCL/KCL) +- Sachidanand Alle (NVIDIA) +- Alvin Ihsani (NVIDIA) +- Vishwesh Nath (NVIDIA) +- Erik Ziegler (Radical Imaging) +- Alireza Sedghi (Queen's University) +- Dennis Bontempi (Maastricht University) +- Andrey Fedorov (Brigham and Women’s Hospital, Boston) +- Steve Pieper (Isomics) + +# Project Description + + + +Introduce MONAI Label, create new Apps, and implement new features to facilitate medical image segmentation using deep learning. + +## Objective + + + +1. Introduce MONAI Label and the Slicer plugin to all participants +3. Implement MONAI Label Apps to segment spine on CT images and brain left ventricle on MR images +4. Implement a MONAI Label App for multiple label segmentation. +5. Design a data wrapper to connect the Image Data Commons [IDC](https://imaging.datacommons.cancer.gov/) with [MONAI Label](https://github.com/Project-MONAI/MONAILabel) +6. Design [OHIF](https://ohif-platform-docs.netlify.app/) plugin for MONAI Label + + +## Approach and Plan + + + +1. Introduce MONAI Label in breakout session +2. Implement the MONAI Label Apps +3. Get more practice on the IDC +4. Create DICOM datastore for MONAI Label +5. Plan and discuss features to have in the OHIF plugin + +## Progress and Next Steps + + + +1. [Introduced MONAI Label](Slicer_demo_MONAILabel_29June2021.pdf) +2. [Create MONAI Label Apps](https://github.com/Project-MONAI/MONAILabel/tree/main/sample-apps) +3. [Added multilabel support](https://github.com/Project-MONAI/MONAILabel/issues/154) +4. Data wrapper for IDC and OHIF plugin development will contnue after Slicer week +5. Data share agreements will be signed to improve vertebra and muscle segmentation after Slicer week + +# Illustrations + + + +![MONAI Label App for vertebra segmentation](deepedit_vertebra.png) + +![MONAI Label App for brain left ventricle segmentation](deepedit_left_ventricle.png) + +# Background and References + + +[MONAI Label](https://github.com/Project-MONAI/MONAILabel) + +[MONAI Label Apps](https://github.com/Project-MONAI/MONAILabel/tree/main/sample-apps) + +[MONAI Label Apps Zoo](https://github.com/diazandr3s/MONAILabel-Apps) + +[Imaging Data Commons](https://imaging.datacommons.cancer.gov/) + +[OHIF](https://ohif-platform-docs.netlify.app/) diff --git a/PW35_2021_Virtual/Projects/MONAILabel/Slicer_demo_MONAILabel_29June2021.pdf b/PW35_2021_Virtual/Projects/MONAILabel/Slicer_demo_MONAILabel_29June2021.pdf new file mode 100644 index 000000000..49508773d Binary files /dev/null and b/PW35_2021_Virtual/Projects/MONAILabel/Slicer_demo_MONAILabel_29June2021.pdf differ diff --git a/PW35_2021_Virtual/Projects/MONAILabel/deepedit_left_ventricle.png b/PW35_2021_Virtual/Projects/MONAILabel/deepedit_left_ventricle.png new file mode 100644 index 000000000..7fc509fbd Binary files /dev/null and b/PW35_2021_Virtual/Projects/MONAILabel/deepedit_left_ventricle.png differ diff --git a/PW35_2021_Virtual/Projects/MONAILabel/deepedit_vertebra.png b/PW35_2021_Virtual/Projects/MONAILabel/deepedit_vertebra.png new file mode 100644 index 000000000..178ed98fb Binary files /dev/null and b/PW35_2021_Virtual/Projects/MONAILabel/deepedit_vertebra.png differ diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/README.md b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/README.md new file mode 100644 index 000000000..dc11d86cc --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/README.md @@ -0,0 +1,98 @@ + +Back to [Projects List](../../README.md#ProjectsList) + +# Markerless tracking with RGBD cameras for low cost neuronavigation + +## Key Investigators + +- Julie Alvarez (Neurotrauma Center) +- Gabriel Vargas Grau (Universidad de Santander) +- Juan Camilo Gamboa (Mc Gill University) +- Andrés Gamboa (Neurotrauma/Universidad Politécnica de Valencia/) +- Rebecca Hisey (Perk Lab/ Queen's University) +- Jhaczon Meza (Optilab/ Technological University of Cartagena) + +# Project Description + +Neuronavigation guided TMS (nTMS) has become an increasingly used tool in neurosurgical clinical practice and has proven to be especially useful in preoperative brain mapping for surgical planning in brain tumor surgery. +However there are not many commercial neuronavigation systems available that meet all the needs for TMS applications, like estimating the electrical field over the cortex. Some other technical issues rela ted to tracking functions are found in these systems. Because of their high cost (near $US 100.000) and technical requirements they remain out of reach for many institutions in low/middle income countries. +There are low-cost tools with which it is possible to implement a system with almost any function provided by commercial systems available today, and with the possibiity of continuos development and customization. --> + +## Objective + +Develop and validate a workflow for the implementation of a prototype markless neuronavigation system for non-invasive functional mapping with nTMS, by combining low-cost optical sensors (INTEL REALSENSE) and open source software 3D SLICER + +1. Objective A Validating a workflow for markless nTMS motor mapping for neurosurgical preoperative planning +1. Objective B. Comparing preoperative non-invasive TMS brain mapping VS direct intraoperative cortical stimulation mapping + +## Approach and Plan + +1. Defining workflow for nTMS motor mapping in 3D slicer with low cost RGB-D cameras +2. Validating preoperative markless tracking nTMS motor mapping results with intraoperatie direct cortical stimulation mapping + +## Progress and Next Steps + +1. Prof of concept: convetional TMS cortical mapping and validating result with intraoperative direct cortical stimulation motor mapping (expert analisys) +2. Identified tools (Modules/extensions) in 3D slicer and designed offline approach for hand knob hotspot by markless nTMS motor mapping +3. Created module for Intel Realsense SR300 RGB and DEPTH streaming in slicer and TMS data recording +4. Created module for obtaning point cloud from depth streaming + +# Illustrations +# Workflow for preoperative conventional(not navigated) TMS cortical mapping and Hand Knob estimation + +![![image](https://user-images.githubusercontent.com/16233997/122260473-f07ce580-ce98-11eb-9e63-291c925b7761.png) +![image](https://user-images.githubusercontent.com/16233997/122260723-3639ae00-ce99-11eb-82c0-39fff6c6d88c.png) +![image](https://user-images.githubusercontent.com/16233997/122260754-3e91e900-ce99-11eb-8998-3f8fc8bc00ee.png) +![image](https://user-images.githubusercontent.com/16233997/122260792-49e51480-ce99-11eb-991a-3be64ac0a659.png) +![image](https://user-images.githubusercontent.com/16233997/122260835-56696d00-ce99-11eb-865e-2386a943ff87.png) +![image](https://user-images.githubusercontent.com/16233997/122260870-61bc9880-ce99-11eb-9310-d1fc46f77b9b.png) +![image](https://user-images.githubusercontent.com/16233997/122262646-3b97f800-ce9b-11eb-863e-8d83538097d4.png) +![image](https://user-images.githubusercontent.com/16233997/122262675-45216000-ce9b-11eb-9dfc-95906d70d363.png) + +https://user-images.githubusercontent.com/16233997/122260947-7862ef80-ce99-11eb-8663-673c1054ffc8.mp4 + +https://user-images.githubusercontent.com/16233997/122261001-8a449280-ce99-11eb-8d87-6e3e497b1f62.mp4 + +# Record TMS Data Module +Demonstrated with Central Line Data + +![image](https://user-images.githubusercontent.com/22460517/124212725-7619ab80-dabd-11eb-98a5-607b40df7d3f.png) +![image](https://user-images.githubusercontent.com/16233997/124270117-61f79d80-db01-11eb-9fde-3252b5647b82.png) + +# Frames to Points Module +Demonstrated with Central Line Data + +https://user-images.githubusercontent.com/22460517/124278099-d50a0f80-db13-11eb-989b-326994265966.mp4 + +![image](https://user-images.githubusercontent.com/22460517/124212338-cd6b4c00-dabc-11eb-93fc-41eac703377f.png) +![image](https://user-images.githubusercontent.com/22460517/124212411-ed9b0b00-dabc-11eb-8eb2-eee89f4a1058.png) + +# Background and References +https://projectweek.na-mic.org/PW31_2019_Boston/Projects/RealSensePatientRegistration/ + +https://github.com/pieper/facenav + +Barker, A. T., Jalinous, R., & Freeston, I. L. (1985). NON-INVASIVE MAGNETIC STIMULATION OF HUMAN MOTOR CORTEX. The Lancet, 325(8437), 1106– 1107. https://doi.org/https://doi.org/10.1016/S0140-6736(85)92413-4 +Butenschön, V. M., Ille, S., Sollmann, N., Meyer, B., & Krieg, S. M. (2018). Cost effectiveness of preoperative motor mapping with navigated transcranial magnetic brain stimulation in patients with high-grade glioma. Neurosurgical Focus, 44(6), E18. https://doi.org/10.3171/2018.3.FOCUS1830 +Coburger, J., Musahl, C., Henkes, H., Horvath-Rizea, D., Bittl, M., Weissbach, C., & Hopf, N. (2013). Comparison of navigated transcranial magnetic stimulation and functional magnetic resonance imaging for preoperative mapping in rolandic tumor surgery. Neurosurgical Review, 36(1), 65–66. +https://doi.org/10.1007/s10143-012-0413-2 +Conti, A., Raffa, G., Granata, F., Rizzo, V., Germanò, A., & Tomasello, F. (2014). Navigated transcranial magnetic stimulation for “somatotopic” tractography of the corticospinal tract. Neurosurgery, 10 Suppl 4, 542–554; discussion 554. https://doi.org/10.1227/NEU.0000000000000502 +Diehl, C. D., Schwendner, M. J., Sollmann, N., Oechsner, M., Meyer, B., Combs, S. E., & Krieg, S. M. (2019). Application of presurgical navigated transcranial magnetic stimulation motor mapping for adjuvant radiotherapy planning in patients with high-grade gliomas. Radiotherapy and Oncology : Journal of the European Society for Therapeutic Radiology and Oncology, 138, 30–37. https://doi.org/10.1016/j.radonc.2019.04.029 +Duffau, H. (2020a). Can Non-invasive Brain Stimulation Be Considered to Facilitate Reoperation for Low-Grade Glioma Relapse by Eliciting Neuroplasticity? Frontiers in Neurology, 11, 582489. https://doi.org/10.3389/fneur.2020.582489 +Duffau, H. (2020b). Functional mapping before and after low-grade glioma surgery: A new way to decipher various spatiotemporal patterns of individual neuroplastic potential in brain tumor patients. In Cancers (Vol. 12, Issue 9, pp. 1–21). MDPI AG. https://doi.org/10.3390/cancers12092611 +Flouty, O., Reddy, C., Holland, M., Kovach, C., Kawasaki, H., Oya, H., Greenlee, J., Hitchon, P., & Howard, M. (2017). Precision surgery of rolandic glioma and insights from extended functional mapping. Clinical Neurology and Neurosurgery, 163, 60–66. +https://doi.org/https://doi.org/10.1016/j.clineuro.2017.10.008 +Frey, D., Schilt, S., Strack, V., Zdunczyk, A., Rösler, J., Niraula, B., Vajkoczy, P., & Picht, T. (2014). Navigated transcranial magnetic stimulation improves the treatment outcome in patients with brain tumors in motor eloquent locations. Neuro-Oncology, 16(10), 1365–1372. https://doi.org/10.1093/neuonc/nou110 +Jensen, R. L. (2014). Navigated transcranial magnetic stimulation: another tool for preoperative planning for patients with motor-eloquent brain tumors. In Neuro oncology (Vol. 16, Issue 10, pp. 1299–1300). https://doi.org/10.1093/neuonc/nou213 +Koh, T. H., & Eyre, J. A. (1988). Maturation of corticospinal tracts assessed by electromagnetic stimulation of the motor cortex. Archives of Disease in Childhood, 63(11), 1347–1352. https://doi.org/10.1136/adc.63.11.1347 +Lefaucheur, J.-P., & Picht, T. (2016). The value of preoperative functional cortical mapping using navigated TMS. Neurophysiologie Clinique = Clinical Neurophysiology, 46(2), 125–133. https://doi.org/10.1016/j.neucli.2016.05.001 +Raffa, G., Picht, T., Scibilia, A., Rösler, J., Rein, J., Conti, A., Ricciardo, G., Cardali, S. M., Vajkoczy, P., & Germanò, A. (2019). Surgical treatment of meningiomas located in the rolandic area: the role of navigated transcranial magnetic stimulation for preoperative planning, surgical strategy, and prediction of arachnoidal cleavage and motor outcome. Journal of Neurosurgery, 1–12. https://doi.org/10.3171/2019.3.JNS183411 +Raffa, G., Scibilia, A., Conti, A., Cardali, S. M., Rizzo, V., Terranova, C., Quattropani, M. C., Marzano, G., Ricciardo, G., Vinci, S. L., & Germanò, A. (2019). Multimodal Surgical Treatment of High-Grade Gliomas in the Motor Area: The Impact of the Combination of Navigated Transcranial Magnetic Stimulation and Fluorescein-Guided Resection. World Neurosurgery, 128, e378–e390. https://doi.org/https://doi.org/10.1016/j.wneu.2019.04.158 +Seidel, K., Häni, L., Lutz, K., Zbinden, C., Redmann, A., Consuegra, A., Raabe, A., & Schucht, P. (2019). Postoperative navigated transcranial magnetic stimulation to predict motor recovery after surgery of tumors in motor eloquent areas. Clinical Neurophysiology : Official Journal of the International Federation of Clinical Neurophysiology, 130(6), 952–959. https://doi.org/10.1016/j.clinph.2019.03.015 +Sollmann, N., Fratini, A., Zhang, H., Zimmer, C., Meyer, B., & Krieg, S. M. (2019). Associations between clinical outcome and tractography based on navigated transcranial magnetic stimulation in patients with language-eloquent brain lesions. Journal of Neurosurgery, 132(4), 1033–1042. +https://doi.org/10.3171/2018.12.JNS182988 +Takahashi, S., Vajkoczy, P., & Picht, T. (2013). Navigated transcranial magnetic stimulation for mapping the motor cortex in patients with rolandic brain tumors. Neurosurgical Focus, 34(4), E3. +https://doi.org/10.3171/2013.1.FOCUS133 +Tarapore, P. E., Findlay, A. M., Honma, S. M., Mizuiri, D., Houde, J. F., Berger, M. S., & Nagarajan, S. S. (2013). Language mapping with navigated repetitive TMS: proof of technique and validation. NeuroImage, 82, 260–272. https://doi.org/10.1016/j.neuroimage.2013.05.018 + +Source code for frame to point cloud module can be found here: https://github.com/RebeccaHisey/RGBD_Tracking diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/CMakeLists.txt b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/CMakeLists.txt new file mode 100644 index 000000000..07639cc32 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/CMakeLists.txt @@ -0,0 +1,27 @@ +cmake_minimum_required(VERSION 3.13.4) + +project(RGBD_Tracking) + +#----------------------------------------------------------------------------- +# Extension meta-information +set(EXTENSION_HOMEPAGE "https://www.slicer.org/wiki/Documentation/Nightly/Extensions/RGBD_Tracking") +set(EXTENSION_CATEGORY "Navigation") +set(EXTENSION_CONTRIBUTORS "John Doe (AnyWare Corp.)") +set(EXTENSION_DESCRIPTION "This is an example of a simple extension") +set(EXTENSION_ICONURL "http://www.example.com/Slicer/Extensions/RGBD_Tracking.png") +set(EXTENSION_SCREENSHOTURLS "http://www.example.com/Slicer/Extensions/RGBD_Tracking/Screenshots/1.png") +set(EXTENSION_DEPENDS "NA") # Specified as a list or "NA" if no dependencies + +#----------------------------------------------------------------------------- +# Extension dependencies +find_package(Slicer REQUIRED) +include(${Slicer_USE_FILE}) + +#----------------------------------------------------------------------------- +# Extension modules +add_subdirectory(ExportFrameToPoints) +## NEXT_MODULE + +#----------------------------------------------------------------------------- +include(${Slicer_EXTENSION_GENERATE_CONFIG}) +include(${Slicer_EXTENSION_CPACK}) diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/CMakeLists.txt b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/CMakeLists.txt new file mode 100644 index 000000000..b32d46412 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/CMakeLists.txt @@ -0,0 +1,31 @@ +#----------------------------------------------------------------------------- +set(MODULE_NAME ExportFrameToPoints) + +#----------------------------------------------------------------------------- +set(MODULE_PYTHON_SCRIPTS + ${MODULE_NAME}.py + ) + +set(MODULE_PYTHON_RESOURCES + Resources/Icons/${MODULE_NAME}.png + Resources/UI/${MODULE_NAME}.ui + ) + +#----------------------------------------------------------------------------- +slicerMacroBuildScriptedModule( + NAME ${MODULE_NAME} + SCRIPTS ${MODULE_PYTHON_SCRIPTS} + RESOURCES ${MODULE_PYTHON_RESOURCES} + WITH_GENERIC_TESTS + ) + +#----------------------------------------------------------------------------- +if(BUILD_TESTING) + + # Register the unittest subclass in the main script as a ctest. + # Note that the test will also be available at runtime. + slicer_add_python_unittest(SCRIPT ${MODULE_NAME}.py) + + # Additional build-time testing + add_subdirectory(Testing) +endif() diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/ExportFrameToPoints.py b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/ExportFrameToPoints.py new file mode 100644 index 000000000..4a6e5d709 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/ExportFrameToPoints.py @@ -0,0 +1,343 @@ +import os +import numpy +import unittest +import logging +import vtk, qt, ctk, slicer +from slicer.ScriptedLoadableModule import * +from slicer.util import VTKObservationMixin + +# +# ExportFrameToPoints +# + +class ExportFrameToPoints(ScriptedLoadableModule): + """Uses ScriptedLoadableModule base class, available at: + https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py + """ + + def __init__(self, parent): + ScriptedLoadableModule.__init__(self, parent) + self.parent.title = "Export Frames To Points" # TODO: make this more human readable by adding spaces + self.parent.categories = ["RGB-D Tracking"] # TODO: set categories (folders where the module shows up in the module selector) + self.parent.dependencies = [] # TODO: add here list of module names that this module requires + self.parent.contributors = ["Rebecca Hisey (Queen's University)"] # TODO: replace with "Firstname Lastname (Organization)" + # TODO: update with short description of the module and a link to online module documentation + self.parent.helpText = """ +This module allows for the conversion of depth images to point clouds +""" + # TODO: replace with organization, grant and thanks + self.parent.acknowledgementText = """ +This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc., Andras Lasso, PerkLab, +and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1. +""" + +# +# ExportFrameToPointsWidget +# + +class ExportFrameToPointsWidget(ScriptedLoadableModuleWidget, VTKObservationMixin): + """Uses ScriptedLoadableModuleWidget base class, available at: + https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py + """ + + def __init__(self, parent=None): + """ + Called when the user opens the module the first time and the widget is initialized. + """ + ScriptedLoadableModuleWidget.__init__(self, parent) + VTKObservationMixin.__init__(self) # needed for parameter node observation + self.logic = None + self._parameterNode = None + self._updatingGUIFromParameterNode = False + + def setup(self): + """ + Called when the user opens the module the first time and the widget is initialized. + """ + ScriptedLoadableModuleWidget.setup(self) + + # Load widget from .ui file (created by Qt Designer). + # Additional widgets can be instantiated manually and added to self.layout. + uiWidget = slicer.util.loadUI(self.resourcePath('UI/ExportFrameToPoints.ui')) + self.layout.addWidget(uiWidget) + self.ui = slicer.util.childWidgetVariables(uiWidget) + + # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's + # "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's. + # "setMRMLScene(vtkMRMLScene*)" slot. + uiWidget.setMRMLScene(slicer.mrmlScene) + + # Create logic class. Logic implements all computations that should be possible to run + # in batch mode, without a graphical user interface. + self.logic = ExportFrameToPointsLogic() + + # Connections + + # These connections ensure that we update parameter node when scene is closed + self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose) + self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose) + + self.ui.depthNodeSelector.setMRMLScene(slicer.mrmlScene) + + # Buttons + self.ui.thresholdSlider.connect('valueChanged(int)',self.onThresholdModified) + self.ui.exportToPointsButton.connect('clicked(bool)', self.onApplyButton) + + # Make sure parameter node is initialized (needed for module reload) + self.initializeParameterNode() + + def cleanup(self): + """ + Called when the application closes and the module widget is destroyed. + """ + self.removeObservers() + + def enter(self): + """ + Called each time the user opens this module. + """ + # Make sure parameter node exists and observed + self.initializeParameterNode() + + def exit(self): + """ + Called each time the user opens a different module. + """ + # Do not react to parameter node changes (GUI wlil be updated when the user enters into the module) + self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode) + + def onSceneStartClose(self, caller, event): + """ + Called just before the scene is closed. + """ + # Parameter node will be reset, do not use it anymore + self.setParameterNode(None) + + def onSceneEndClose(self, caller, event): + """ + Called just after the scene is closed. + """ + # If this module is shown while the scene is closed then recreate a new parameter node immediately + if self.parent.isEntered: + self.initializeParameterNode() + + def initializeParameterNode(self): + """ + Ensure parameter node exists and observed. + """ + # Parameter node stores all user choices in parameter values, node selections, etc. + # so that when the scene is saved and reloaded, these settings are restored. + + self.setParameterNode(self.logic.getParameterNode()) + + # Select default input nodes if nothing is selected yet to save a few clicks for the user + if not self._parameterNode.GetNodeReference("DepthImageNode"): + firstVolumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") + if firstVolumeNode: + self._parameterNode.SetNodeReferenceID("DepthImageNode", firstVolumeNode.GetID()) + + def setParameterNode(self, inputParameterNode): + """ + Set and observe parameter node. + Observation is needed because when the parameter node is changed then the GUI must be updated immediately. + """ + + if inputParameterNode: + self.logic.setDefaultParameters(inputParameterNode) + + # Unobserve previously selected parameter node and add an observer to the newly selected. + # Changes of parameter node are observed so that whenever parameters are changed by a script or any other module + # those are reflected immediately in the GUI. + if self._parameterNode is not None: + self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode) + self._parameterNode = inputParameterNode + if self._parameterNode is not None: + self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode) + + # Initial GUI update + self.updateGUIFromParameterNode() + + def updateGUIFromParameterNode(self, caller=None, event=None): + """ + This method is called whenever parameter node is changed. + The module GUI is updated to show the current state of the parameter node. + """ + + if self._parameterNode is None or self._updatingGUIFromParameterNode: + return + + # Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop) + self._updatingGUIFromParameterNode = True + + # All the GUI updates are done + self._updatingGUIFromParameterNode = False + + def updateParameterNodeFromGUI(self, caller=None, event=None): + """ + This method is called when the user makes any change in the GUI. + The changes are saved into the parameter node (so that they are restored when the scene is saved and loaded). + """ + + if self._parameterNode is None or self._updatingGUIFromParameterNode: + return + + wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch + + self._parameterNode.EndModify(wasModified) + + def onThresholdModified(self): + self.logic.setThresholdValue(self.ui.thresholdSlider.value) + + def onApplyButton(self): + self.logic.exportDepthToPoints(self.ui.depthNodeSelector.currentNode().GetName(),self.ui.thresholdSlider.value) + + +# +# ExportFrameToPointsLogic +# + +class ExportFrameToPointsLogic(ScriptedLoadableModuleLogic): + """This class should implement all the actual + computation done by your module. The interface + should be such that other python code can import + this class and make use of the functionality without + requiring an instance of the Widget. + Uses ScriptedLoadableModuleLogic base class, available at: + https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py + """ + + def __init__(self): + """ + Called when the logic class is instantiated. Can be used for initializing member variables. + """ + ScriptedLoadableModuleLogic.__init__(self) + self.threshold = 25.0 + + def setDefaultParameters(self, parameterNode): + """ + Initialize parameter node with default settings. + """ + pass + + def setThresholdValue(self,value): + self.threshold = value + + def exportDepthToPoints(self,depthImageNode,threshold): + self.threshold = threshold + try: + self.depthImageNode = slicer.mrmlScene.GetFirstNodeByName(depthImageNode) + self.getDepthImageData() + self.convertDepthToPoints() + except slicer.util.MRMLNodeNotFoundException: + logging.info("No depth image node selected") + + def removeColorizing(self): + imdata = self.getVtkImageDataAsOpenCVMat() + shape = imdata.shape + self.depthImage = numpy.array([[self.convertRGBtoD(j) for j in imdata[i]] for i in range(shape[0])]) + + def convertRGBtoD(self,pixel1): + is_disparity = False + min_depth = 0.16 + max_depth = 300.0 + min_disparity = 1.0 / max_depth + max_disparity = 1.0 / min_depth + r_value = float(pixel1[0]) + g_value = float(pixel1[1]) + b_value = float(pixel1[2]) + depthValue = 0 + if (b_value + g_value + r_value) < 255: + hue_value = 0 + elif (r_value >= g_value and r_value >= b_value): + if (g_value >= b_value): + hue_value = g_value - b_value + else: + hue_value = (g_value - b_value) + 1529 + elif (g_value >= r_value and g_value >= b_value): + hue_value = b_value - r_value + 510 + + elif (b_value >= g_value and b_value >= r_value): + hue_value = r_value - g_value + 1020 + + if (hue_value > 0): + if not is_disparity: + z_value = ((min_depth + (max_depth - min_depth) * hue_value / 1529.0) + 0.5); + depthValue = z_value + else: + disp_value = min_disparity + (max_disparity - min_disparity) * hue_value / 1529.0 + depthValue = ((1.0 / disp_value) / 1000 + 0.5) + else: + depthValue = 0 + return depthValue + + + def getVtkImageDataAsOpenCVMat(self): + cameraVolume = self.depthImageNode + '''if cameraVolume.GetClassName() == "vtkMRMLStreamingVolumeNode": + image = cameraVolume.GetFrameData()''' + + image = cameraVolume.GetImageData() + shape = list(cameraVolume.GetImageData().GetDimensions()) + shape.reverse() + components = image.GetNumberOfScalarComponents() + if components > 1: + shape.append(components) + shape.remove(1) + imageMat = vtk.util.numpy_support.vtk_to_numpy(image.GetPointData().GetScalars()).reshape(shape) + return imageMat + + def getDepthImageData(self): + imdata = self.getVtkImageDataAsOpenCVMat() + shape = imdata.shape + if len(shape) > 2: + self.removeColorizing() + else: + self.depthImage = numpy.array([[j for j in imdata[i]] for i in range(shape[0])]) + + + def convertDepthToPoints(self): + try: + self.fiducialNode = slicer.util.getNode("depthFiducials") + numFiducials = self.fiducialNode.GetNumberOfFiducials() + for i in range(numFiducials,0,-1): + self.fiducialNode.RemoveAllMarkups() + except slicer.util.MRMLNodeNotFoundException: + self.fiducialNode = slicer.vtkMRMLMarkupsFiducialNode() + self.fiducialNode.SetName("depthFiducials") + slicer.mrmlScene.AddNode(self.fiducialNode) + max_depth = self.depthImage.max() + imageShape = self.depthImage.shape + fidAddedCount = 0 + for x in range(0,imageShape[0],10): + for y in range(0,imageShape[1],10): + depthValue = self.depthImage[x][y] + if depthValue >= max_depth*(self.threshold/100.0): + self.fiducialNode.AddFiducialFromArray(numpy.array([x,y,depthValue])) + fidAddedCount += 1 + slicer.mrmlScene.Modified() + + + + + + +# +# ExportFrameToPointsTest +# + +class ExportFrameToPointsTest(ScriptedLoadableModuleTest): + """ + This is the test case for your scripted module. + Uses ScriptedLoadableModuleTest base class, available at: + https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py + """ + + def setUp(self): + """ Do whatever is needed to reset the state - typically a scene clear will be enough. + """ + slicer.mrmlScene.Clear() + + def runTest(self): + """Run as few or as many tests as needed here. + """ + self.setUp() diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Resources/Icons/ExportFrameToPoints.png b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Resources/Icons/ExportFrameToPoints.png new file mode 100644 index 000000000..5d83ab4f0 Binary files /dev/null and b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Resources/Icons/ExportFrameToPoints.png differ diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Resources/UI/ExportFrameToPoints.ui b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Resources/UI/ExportFrameToPoints.ui new file mode 100644 index 000000000..41ab374af --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Resources/UI/ExportFrameToPoints.ui @@ -0,0 +1,145 @@ + + + ExportFrameToPoints + + + + 0 + 0 + 279 + 286 + + + + + 0 + 0 + + + + + + + + + Depth Image Node: + + + + + + + true + + + + vtkMRMLScalarVolumeNode + vtkMRMLVectorVolumeNode + + + + true + + + false + + + false + + + + + + + Threshold: + + + + + + + 0 + + + 100.000000000000000 + + + 50.000000000000000 + + + + + + + 0 + + + + + + + + + Advanced + + + true + + + + + + + + true + + + Run the algorithm. + + + Export Frame to Points + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + + ctkCollapsibleButton + QWidget +
ctkCollapsibleButton.h
+ 1 +
+ + ctkSliderWidget + QWidget +
ctkSliderWidget.h
+
+ + qMRMLNodeComboBox + QWidget +
qMRMLNodeComboBox.h
+
+ + qMRMLWidget + QWidget +
qMRMLWidget.h
+ 1 +
+
+ + +
diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Testing/CMakeLists.txt b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Testing/CMakeLists.txt new file mode 100644 index 000000000..9b0730480 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Testing/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(Python) diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Testing/Python/CMakeLists.txt b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Testing/Python/CMakeLists.txt new file mode 100644 index 000000000..c5f7bcff8 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/Testing/Python/CMakeLists.txt @@ -0,0 +1,2 @@ + +#slicer_add_python_unittest(SCRIPT ${MODULE_NAME}ModuleTest.py) diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/__pycache__/ExportFrameToPoints.cpython-36.pyc b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/__pycache__/ExportFrameToPoints.cpython-36.pyc new file mode 100644 index 000000000..1838e6ec8 Binary files /dev/null and b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/ExportFrameToPoints/__pycache__/ExportFrameToPoints.cpython-36.pyc differ diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/RGBD_Tracking.png b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/RGBD_Tracking.png new file mode 100644 index 000000000..6aae6ab70 Binary files /dev/null and b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/RGBD_Tracking/RGBD_Tracking.png differ diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/CMakeLists.txt b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/CMakeLists.txt new file mode 100644 index 000000000..d651e6c36 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/CMakeLists.txt @@ -0,0 +1,31 @@ +#----------------------------------------------------------------------------- +set(MODULE_NAME RecordHerniaData) + +#----------------------------------------------------------------------------- +set(MODULE_PYTHON_SCRIPTS + ${MODULE_NAME}.py + ) + +set(MODULE_PYTHON_RESOURCES + Resources/Icons/${MODULE_NAME}.png + Resources/UI/${MODULE_NAME}.ui + ) + +#----------------------------------------------------------------------------- +slicerMacroBuildScriptedModule( + NAME ${MODULE_NAME} + SCRIPTS ${MODULE_PYTHON_SCRIPTS} + RESOURCES ${MODULE_PYTHON_RESOURCES} + WITH_GENERIC_TESTS + ) + +#----------------------------------------------------------------------------- +if(BUILD_TESTING) + + # Register the unittest subclass in the main script as a ctest. + # Note that the test will also be available at runtime. + slicer_add_python_unittest(SCRIPT ${MODULE_NAME}.py) + + # Additional build-time testing + add_subdirectory(Testing) +endif() diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Config/PlusDeviceSet_Server_IntelRealSenseVideo.xml b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Config/PlusDeviceSet_Server_IntelRealSenseVideo.xml new file mode 100644 index 000000000..2c841160a --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Config/PlusDeviceSet_Server_IntelRealSenseVideo.xml @@ -0,0 +1,83 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Resources/Icons/TMSRecordDataModule.png b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Resources/Icons/TMSRecordDataModule.png new file mode 100644 index 000000000..5d83ab4f0 Binary files /dev/null and b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Resources/Icons/TMSRecordDataModule.png differ diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Resources/UI/RecordHerniaData.ui b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Resources/UI/RecordHerniaData.ui new file mode 100644 index 000000000..24511f595 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Resources/UI/RecordHerniaData.ui @@ -0,0 +1,147 @@ + + + RecordHerniaData + + + + 0 + 0 + 297 + 356 + + + + + + + Advanced + + + true + + + + + + <html><head/><body><p>1st Camera RGB Port: </p></body></html> + + + + + + + 18944 + + + + + + + <html><head/><body><p>2nd Camera RGB Port: </p></body></html> + + + + + + + 18946 + + + + + + + <html><head/><body><p>2nd Camera Depth Port: </p></body></html> + + + + + + + 18947 + + + + + + + <html><head/><body><p>1st Camera Depth Port: </p></body></html> + + + + + + + 18945 + + + + + + + + + + <html><head/><body><p>User ID: </p></body></html> + + + + + + + GS01 + + + + + + + Start Cameras + + + + + + + true + + + Start Recording Data. + + + Start Recording + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + + ctkCollapsibleButton + QWidget +
ctkCollapsibleButton.h
+ 1 +
+ + qMRMLWidget + QWidget +
qMRMLWidget.h
+ 1 +
+
+ + +
diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/RecordHerniaData.bat b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/RecordHerniaData.bat new file mode 100644 index 000000000..3d73cdab5 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/RecordHerniaData.bat @@ -0,0 +1,4 @@ +title Record Hernia Data + +cd "C:\Users\perklab\AppData\Local\NA-MIC\Slicer 4.13.0-2021-06-16\" +START Slicer.exe --python-code "slicer.util.mainWindow().moduleSelector().selectModule('RecordHerniaData')" diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/StartPlusServer.bat b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/StartPlusServer.bat new file mode 100644 index 000000000..6579fc4c9 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/StartPlusServer.bat @@ -0,0 +1 @@ +START /MIN C:\Users\hisey\PlusApp-2.7.0.20190123-Telemed-Win32\bin\PlusServerLauncher.exe --connect --device-set-configuration-dir=d:\TMSRecording\TMSRecordDataModule\Config --config-file=PlusDeviceSet_Server_IntelRealSenseVideo.xml diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/StopPlus.bat b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/StopPlus.bat new file mode 100644 index 000000000..20be845e1 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Scripts/StopPlus.bat @@ -0,0 +1,3 @@ +cd "C:\" +Taskkill /IM PlusServer.exe /F +Taskkill /IM PlusServerLauncher.exe diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/TMSRecordDataModule.py b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/TMSRecordDataModule.py new file mode 100644 index 000000000..6717bb961 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/TMSRecordDataModule.py @@ -0,0 +1,301 @@ +import os +import unittest +import logging +import time +import subprocess +import vtk, qt, ctk, slicer +from slicer.ScriptedLoadableModule import * +from slicer.util import VTKObservationMixin + +# +# RecordHerniaData +# + +class TMSRecordDataModule(ScriptedLoadableModule): + """Uses ScriptedLoadableModule base class, available at: + https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py + """ + + def __init__(self, parent): + ScriptedLoadableModule.__init__(self, parent) + self.parent.title = "Record TMS Data" + self.parent.categories = ["Training"] + self.parent.dependencies = [] + self.parent.contributors = ["Rebecca Hisey (Queen's University)"] + self.parent.helpText = """ +This module is used to record RGB and depth video for Inguinal Hernia Repair using 2 Intel Realsense cameras. +""" + # TODO: replace with organization, grant and thanks + self.parent.acknowledgementText = """ +This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc., Andras Lasso, PerkLab, +and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1. +""" + + # Additional initialization step after application startup is complete + #slicer.app.connect("startupCompleted()", registerSampleData) + +# +# RecordHerniaDataWidget +# + +class TMSRecordDataModuleWidget(ScriptedLoadableModuleWidget, VTKObservationMixin): + """Uses ScriptedLoadableModuleWidget base class, available at: + https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py + """ + ''' + def __init__(self, parent=None): + """ + Called when the user opens the module the first time and the widget is initialized. + """ + ScriptedLoadableModuleWidget.__init__(self, parent) + VTKObservationMixin.__init__(self) # needed for parameter node observation + self.logic = RecordHerniaDataLogic() + self._parameterNode = None + self._updatingGUIFromParameterNode = False''' + + def setup(self): + """ + Called when the user opens the module the first time and the widget is initialized. + """ + ScriptedLoadableModuleWidget.setup(self) + + # Load widget from .ui file (created by Qt Designer). + # Additional widgets can be instantiated manually and added to self.layout. + uiWidget = slicer.util.loadUI(self.resourcePath('UI/RecordHerniaData.ui')) + self.layout.addWidget(uiWidget) + self.ui = slicer.util.childWidgetVariables(uiWidget) + + # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's + # "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's. + # "setMRMLScene(vtkMRMLScene*)" slot. + uiWidget.setMRMLScene(slicer.mrmlScene) + + # Create logic class. Logic implements all computations that should be possible to run + # in batch mode, without a graphical user interface. + self.logic = TMSRecordDataModuleLogic() + self.recordingStarted = False + self.camerasStarted = False + self.moduleDir = os.path.dirname(slicer.modules.tmsrecorddatamodule.path) + self.logic.setupScene() + + # Buttons + self.ui.StartStopRecordingButton.connect('clicked(bool)', self.onStartStopRecordingClicked) + self.ui.startCamerasButton.connect('clicked(bool)',self.onStartStopCamerasClicked) + + def cleanup(self): + """ + Called when the application closes and the module widget is destroyed. + """ + #self.removeObservers() + pass + + def onStartStopRecordingClicked(self): + """ + Run processing when user clicks "Apply" button. + """ + try: + + if not self.recordingStarted: + self.ui.StartStopRecordingButton.setText("Stop Recording") + self.logic.StartRecording(self.ui.userIDLineEdit.text) + self.recordingStarted = True + else: + self.logic.StopRecording() + self.recordingStarted = False + self.ui.StartStopRecordingButton.setText("Start Recording") + + except ValueError: + logging.info("Ports must have numeric values") + + def onStartStopCamerasClicked(self): + if not self.camerasStarted: + rgbPort = int(self.ui.rGBPortLineEdit.text) + depthPort = int(self.ui.depthPortLineEdit.text) + self.logic.setupOpenIGTLinkConnectors(rgbPort,depthPort) + cmd = str(self.moduleDir+"\Scripts\StartPlusServer.bat") + print(cmd) + startupEnv = slicer.util.startupEnvironment() + p = subprocess.Popen(cmd, env=startupEnv) + self.ui.startCamerasButton.text = "Stop Cameras" + self.camerasStarted = True + else: + cmd = str(self.moduleDir + "\Scripts\StopPlus.bat") + startupEnv = slicer.util.startupEnvironment() + p = subprocess.Popen(cmd, env=startupEnv) + self.ui.startCamerasButton.text="Start Cameras" + self.camerasStarted = False + + + +# +# RecordHerniaDataLogic +# + +class TMSRecordDataModuleLogic(ScriptedLoadableModuleLogic): + """This class should implement all the actual + computation done by your module. The interface + should be such that other python code can import + this class and make use of the functionality without + requiring an instance of the Widget. + Uses ScriptedLoadableModuleLogic base class, available at: + https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py + """ + + def __init__(self): + """ + Called when the logic class is instantiated. Can be used for initializing member variables. + """ + ScriptedLoadableModuleLogic.__init__(self) + self.rgbport = 18944 + self.depthPort = 18945 + + + + def setupOpenIGTLinkConnectors(self, rgbPort,depthPort): + try: + self.rgbConnectorNode = slicer.util.getNode('RGBConnector') + self.rgbConnectorNode.SetTypeClient('localhost',int(rgbPort)) + except slicer.util.MRMLNodeNotFoundException: + self.rgbConnectorNode = slicer.vtkMRMLIGTLConnectorNode() + self.rgbConnectorNode.SetName('RGBConnector') + slicer.mrmlScene.AddNode(self.rgbConnectorNode) + self.rgbConnectorNode.SetTypeClient('localhost',int(rgbPort)) + logging.debug('RGB Connector Created') + self.rgbConnectorNode.Start() + + try: + self.depthConnectorNode = slicer.util.getNode('DepthConnector') + self.depthConnectorNode.SetTypeClient('localhost',int(depthPort)) + except slicer.util.MRMLNodeNotFoundException: + self.depthConnectorNode = slicer.vtkMRMLIGTLConnectorNode() + self.depthConnectorNode.SetName('DepthConnector') + slicer.mrmlScene.AddNode(self.depthConnectorNode) + self.depthConnectorNode.SetTypeClient('localhost',int(depthPort)) + logging.debug('Depth Connector Created') + self.depthConnectorNode.Start() + + def setupScene(self): + self.saveScenesDirectory = os.path.join(os.path.dirname(slicer.modules.tmsrecorddatamodule.path), "Resources\SavedScenes") + self.setupOpenIGTLinkConnectors(18944,18945) + + + try: + self.rgbCamera1 = slicer.util.getNode("ImageRGB") + except slicer.util.MRMLNodeNotFoundException: + self.rgbCamera1 = slicer.vtkMRMLStreamingVolumeNode() + self.rgbCamera1.SetName("ImageRGB_ImageRGB") + slicer.mrmlScene.AddNode(self.rgbCamera1) + self.setupVolumeResliceDriver(self.rgbCamera1,"Red") + + try: + self.depthCamera1 = slicer.util.getNode("ImageDEPTH") + except slicer.util.MRMLNodeNotFoundException: + self.depthCamera1 = slicer.vtkMRMLStreamingVolumeNode() + self.depthCamera1.SetName("ImageDEPTH_ImageDEPT") + slicer.mrmlScene.AddNode(self.depthCamera1) + self.setupVolumeResliceDriver(self.depthCamera1, "Yellow") + + def setupVolumeResliceDriver(self,cameraNode,sliceColor): + + layoutManager = slicer.app.layoutManager() + slice = layoutManager.sliceWidget(sliceColor) + sliceLogic = slice.sliceLogic() + sliceLogic.GetSliceCompositeNode().SetBackgroundVolumeID(cameraNode.GetID()) + + resliceLogic = slicer.modules.volumereslicedriver.logic() + if resliceLogic: + sliceNode = slicer.util.getNode('vtkMRMLSliceNode'+sliceColor) + sliceNode.SetSliceResolutionMode(slicer.vtkMRMLSliceNode.SliceResolutionMatchVolumes) + resliceLogic.SetDriverForSlice(cameraNode.GetID(), sliceNode) + resliceLogic.SetModeForSlice(6, sliceNode) + resliceLogic.SetFlipForSlice(False, sliceNode) + # resliceLogic.SetRotationForSlice(180, yellowNode) + #sliceLogic.FitSliceToAll() + + def StartRecording(self,fileName): + self.fileName = fileName + "-" + time.strftime("%Y%m%d-%H%M%S") + self.recordingStartTime = vtk.vtkTimerLog.GetUniversalTime() + self.herniaSequenceBrowserNode = slicer.vtkMRMLSequenceBrowserNode() + self.startSequenceBrowserRecording(self.herniaSequenceBrowserNode) + + def StopRecording(self): + self.stopSequenceBrowserRecording(self.herniaSequenceBrowserNode) + self.saveRecording() + #self.removeRecordingFromScene() + + def startSequenceBrowserRecording(self, browserNode): + if (browserNode is None): + return + + # Indicate that this node was recorded, not loaded from file + browserNode.SetName(slicer.mrmlScene.GetUniqueNameByString("Recording")) + browserNode.SetAttribute("Recorded", "True") + # Create and populate a sequence browser node if the recording started + browserNode.SetScene(slicer.mrmlScene) + slicer.mrmlScene.AddNode(browserNode) + sequenceBrowserLogic = slicer.modules.sequences.logic() + + + modifiedFlag = browserNode.StartModify() + sequenceBrowserLogic.AddSynchronizedNode(None, self.rgbCamera1, browserNode) + sequenceBrowserLogic.AddSynchronizedNode(None, self.depthCamera1, browserNode) + + # Stop overwriting and saving changes to all nodes + browserNode.SetRecording(None, True) + browserNode.SetOverwriteProxyName(None, False) + browserNode.SetSaveChanges(None, False) + browserNode.EndModify(modifiedFlag) + + browserNode.SetRecordingActive(True) + + #self.StartRecordingSeekWidget.setMRMLSequenceBrowserNode(browserNode) + def stopSequenceBrowserRecording(self, browserNode): + if (browserNode is None): + return + browserNode.SetRecordingActive(False) + browserNode.SetRecording( None, False ) + + def saveRecording(self): + savedScenesDirectory = self.saveScenesDirectory + + + recordingCollection = slicer.mrmlScene.GetNodesByClass( "vtkMRMLSequenceBrowserNode" ) + for nodeNumber in range( recordingCollection.GetNumberOfItems() ): + browserNode = recordingCollection.GetItemAsObject( nodeNumber ) + dataNodeNames = ["ImageRGB_ImageRGB","ImageDEPTH_ImageDEPT"] + for dataNode in dataNodeNames: + proxyNode = slicer.util.getNode(dataNode) + sequenceNode = browserNode.GetSequenceNode(proxyNode) + if not sequenceNode.GetStorageNode() or not sequenceNode.GetStorageNode().IsA("vtkMRMLStreamingVolumeSequenceStorageNode"): + sequenceStorageNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLStreamingVolumeSequenceStorageNode") + sequenceNode.SetAndObserveStorageNodeID(sequenceStorageNode.GetID()) + filename = self.fileName + os.extsep + "sqbr" + filename = os.path.join( savedScenesDirectory, filename ) + slicer.util.saveNode(browserNode, filename) + + + def removeRecordingFromScene(self): + slicer.mrmlScene.Clear() + + + +# +# RecordHerniaDataTest +# + +class TMSRecordDataModuleTest(ScriptedLoadableModuleTest): + """ + This is the test case for your scripted module. + Uses ScriptedLoadableModuleTest base class, available at: + https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py + """ + + def setUp(self): + """ Do whatever is needed to reset the state - typically a scene clear will be enough. + """ + slicer.mrmlScene.Clear() + + def runTest(self): + """Run as few or as many tests as needed here. + """ + self.setUp() diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Testing/CMakeLists.txt b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Testing/CMakeLists.txt new file mode 100644 index 000000000..9b0730480 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Testing/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(Python) diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Testing/Python/CMakeLists.txt b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Testing/Python/CMakeLists.txt new file mode 100644 index 000000000..c5f7bcff8 --- /dev/null +++ b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/Testing/Python/CMakeLists.txt @@ -0,0 +1,2 @@ + +#slicer_add_python_unittest(SCRIPT ${MODULE_NAME}ModuleTest.py) diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/__pycache__/RecordHerniaData.cpython-36.pyc b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/__pycache__/RecordHerniaData.cpython-36.pyc new file mode 100644 index 000000000..ba4633496 Binary files /dev/null and b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/__pycache__/RecordHerniaData.cpython-36.pyc differ diff --git a/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/__pycache__/TMSRecordDataModule.cpython-36.pyc b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/__pycache__/TMSRecordDataModule.cpython-36.pyc new file mode 100644 index 000000000..d1962630f Binary files /dev/null and b/PW35_2021_Virtual/Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/TMSRecording/TMSRecordDataModule/__pycache__/TMSRecordDataModule.cpython-36.pyc differ diff --git a/PW35_2021_Virtual/Projects/NCIImagingDataCommons/README.md b/PW35_2021_Virtual/Projects/NCIImagingDataCommons/README.md new file mode 100644 index 000000000..c6e0972f5 --- /dev/null +++ b/PW35_2021_Virtual/Projects/NCIImagingDataCommons/README.md @@ -0,0 +1,184 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# NCI Imaging Data Commons + +## Key Investigators (subject to change) + +- Andrey Fedorov (Brigham and Women's Hospital, Boston) +- Markus Herrmann (Mass General Hospital, Boston) +- Theodore Aptekarev (Independent) +- Steve Pieper (Isomics Inc) +- Ron Kikinis (Brigham and Women's Hospital, Boston) + +Special thanks to Fernando Pérez-García (UCL/KCL) for explaining PyTorch conventions and tensor permutations. + +# Project Description + +**WE ARE HIRING - see [job opportunities here](https://spl.harvard.edu/join-us) if interested!** + +### National Cancer Institute (NCI) Imaging Data Commons + +NCI IDC is a new component of the Cancer Research Data Commons (CRDC). The goal of IDC is to enable a broad spectrum of cancer researchers, with and without imaging expertise, to easily access and explore the value of de-identified imaging data and to support integrated analyses with non-imaging data. IDC maintains cancer imaging data collections in Google Cloud Platform, and is developing tools and examples to support cloud-based analysis of imaging data. + +Some examples of what you can do with IDC: + +* quickly explore the available public cancer imaging datasets using rich metadata, visualize images and annotations, build cohorts of relevant subsets of the data +* retrieve DICOM files corresponding to the selected cohort to a cloud-based Virtual Machine (VM) + +In this project we would like to interact with the project week participants to answer their questions about IDC and understand their needs, collect feedback and suggestions for the functionality users would like to see in IDC, and help users get started with the platform. + +Free cloud credits are available to support the use of IDC for cancer imaging research. + +### GBM series tagging Project Week experiment + +Broad motivation for the experiment is to enrich IDC data offering by improving the richness of metadata accompanying IDC content. + +An experiment that can be completed within the Project Week can implement tool for tagging of the individual series within an MRI exam with the series type. The experiment will follow the catigorization of individual series that was proposed in [Advancing The Cancer Genome Atlas glioma MRI collections with expert segmentation labels and radiomic features](https://www.nature.com/articles/sdata2017117). + +It is a valuable capability currently missing to allow for automatic tagging of individual series within a DICOM study, which is important for feeding data into the subsequent analysis steps. + +The idea for the experiment is to develop a tool allowing to tag individual series, using, as needed, DICOM metadata and content of the image, utilizing the metadata table of the mentioned paper as a source of inspiration if not training/testing. + +An additional and probably key feature of this experiment is that it's cloud native. This means that all resources and data does not leave the cloud datacenter. This is expected to bring insights on efficient working setups that utilize the cloud infrastructure and provide an update on what's the barrier for entry to perform research on cloud resources. + + +## Objective + + + +### NCI IDC + +1. Provide attendees with the opportunity to interact with the platform developers to answer questions. +2. Collect use cases and suggestions + +### GBM series tagging experiment + +1. Create a cloud native workflow for training ML models on IDC data +2. Produce a trained model for tagging of the individual series within an MRI exam with the series type. + +## Approach and Plan + + + +### NCI IDC +1. Work on more examples how to work with IDC. +2. Work on tools to streamline preparation of data for analysis. + +### GBM series tagging experiment + +1. Explore the data overlap between the TCIA-GBM data used in the paper and the data in IDC +2. Produce a training dataset to be used with a 2d classifier +3. Try out MONAI to train a 2d classifier + +## Progress and Next Steps + + + +### NCI IDC + +Visit "IDC-Bot" stream set up by Theodore under the discord project channel to watch short demo videos about IDC. + +1. Discussed IDC with Curt, Nadya, Andres, Fernando; presented at the DICOM breakout session. +2. Based on the feedback, [summarized steps](https://docs.google.com/document/d/1NkAHCS07y8wuvkNUYwSWKM_6qjhWFE-Es2ahzBsYj2Y/edit#) how to launch a COS VM with Slicer - which on the same day were utterly superseded by the [SlicerOnDemand](https://github.com/pieper/SlicerOnDemand) module by Steve! +3. [Summarized steps](https://docs.google.com/document/d/1nuv4qsiDflGYO4EKOK8-y5SWryZrjiprQ_lxVSffXCg/edit#) how to work with a GCP DICOM store to visualize analysis results - this currently relies on a non-production OHIF Viewer test deployment which may not be around for too long, need to find a more stable solution. +4. As an exercise, and to test the instructions, converted cortical segmentation result for a case from IDC done by Fernando and confirmed visualization in the viewer (also see https://github.com/OHIF/Viewers/issues/2462). +5. IDC-MONAILabel coordination meeting is today after the closing remarks at [this link](https://www.google.com/url?q=https://harvard.zoom.us/j/99711834613?pwd%3DajZ1alluQWtya3pUY3p6T1hsUDR6dz09&sa=D&source=calendar&usd=2&usg=AOvVaw1HLyL8Q4F8Lcl8E4UnZZ83). +6. Tutorial videos from [IDC paper](https://doi.org/10.1158/0008-5472.CAN-21-0950) have been published on NCI YouTube channel: + * Introduction to the Portal - https://youtu.be/uTljK2QehS0 + * Introduction to Case Cohorts - https://youtu.be/hGse2jpsb-c + * Custom Dashboards with Google D​ata Studio* - https://youtu.be/kEYcE-mFlzA + * A Case Study Integrating Image Analysis - https://youtu.be/ISJ5z1zLLjg + +![image](https://user-images.githubusercontent.com/313942/124285843-55347300-db1c-11eb-831a-4b3675c74ed2.png) + +![image](https://user-images.githubusercontent.com/313942/124284148-b22f2980-db1a-11eb-9095-10fc01b7e067.png) + +![image](https://user-images.githubusercontent.com/313942/124284165-b5c2b080-db1a-11eb-93e9-91a23cb879b0.png) + + +### GBM series tagging experiment + +#### Setting up + +The only setup requirement for utilizing the power of IDC is a Google Cloud account. This account has to be setup only once and if the user already uses or in the past used Google Cloud products - everything is in place. + +Keep in mind that Google provides [free credits to new users](https://console.cloud.google.com/freetrial) and IDC does the same for existing users ([fill in the form here](https://learn.canceridc.dev/introduction/requesting-gcp-cloud-credits)). + +![CloudAPIs](images/CloudAPIs.jpg) + +This experiment utilized the following APIs: + +- ***Big Query.*** IDC stores the metadata extracted from all DICOM images in Big Query Tables. The [cloud console](https://console.cloud.google.com/bigquery?) provides an interface to develop queries that provides a very pleasant user experience. +- ***Cloud Storage.*** IDC stores DICOM files in a Cloud Storage Bucket. Additionally a bucket is used to store intermediate results. +- ***Colab Notebooks.*** The most basic free version of the Colab Notebooks is really sufficient to run this experiment. The free GPUs that you can attach to the notebook is enough to drastically speed up the training process. + +In real life you would probably want to add the following APIs to the mentioned ones: + +- ***AI Notebooks.*** Managed virtual machines with full Jupyter Lab environment. +- ***Compute Engine.*** Virtual Machines infrastructure for any purpose. With GCE you can create custom VMs that will run cloud instances of many of the popular applications, including Slicer, Jupyter Lab, MONAI Label, OHIF-Viewer etc. + +### The Experiment + +![Workflow](images/Workflow.png) + +The experiment utilized the free tools provided by Google to all it's users to see if such research can be contucted without the cloud infrastructure "heavy-lifting". The main computation platform was the free version of the Colab Notebooks that were stored in a Google Drive folder. + +All the notebooks created for this experiment are available in the [Github repository](https://github.com/piiq/pw35-gbm-tagging). Run them in Google Colab now: + +
Open In Colab 001\_IDC\_&\_ReferenceData\_Exploration.ipynb + +Open In Colab 002\_Data\_Sampling.ipynb + +Open In Colab 003\_Pre\_process\_Data.ipynb + +Open In Colab 004\_Classifier\_Training.ipynb + + +By default Colab provides instances with 2 cores and 12 GB of RAM. With an additional GPU that you can attach to the notebook this is enough for most of the tasks. For comparison analysis the preprocessing was also done on a 12 core 32 GB RAM instance to see if additional multiprocessing can boost performance. + +The use of a dedicated VM can boost performance if the scripts enable multiprocessing for computation. Additionally firing up multiple instances of the `gsutil` commands can speed up data transfer. For example, during the experiment the command + +```bash +cat "$TARGET_CLASS"_gcs_paths.txt | gsutil -u "$MY_PROJECT_ID" -m cp -I ./data/"$TARGET_CLASS" +``` + +was executed in 4 different screen sessions simultaneously to test the download speed. The results were 16 MBps when there is only one `gsutil` command running and 8 MBps if there are 4 `gsutil` commands running. + +### Results and conclusions + +1. As expected the DenseNet showed good results in training with zero configuration. + +![training](images/training.png) + +2. The barier for entering cloud computing lowered significantly with wider adoption of the GPU-enabled Colab notebooks. + +# Illustrations + +* [IDC Portal](https://imaging.datacommons.cancer.gov/) can be used to explore the data available in IDC and buid and save cohorts ![IDC Portal](https://user-images.githubusercontent.com/313942/123643716-ada10300-d7f2-11eb-8500-2232618ab751.png) +* See any of the studies from IDC collections in IDC Viewer, build Viewer URL by referencing DICOM UIDs, e.g., https://viewer.imaging.datacommons.cancer.gov/viewer/1.3.6.1.4.1.32722.99.99.239341353911714368772597187099978969331 ![IDC Viewer](https://user-images.githubusercontent.com/313942/123644439-61a28e00-d7f3-11eb-8da3-68f939fe0de6.png) +* Search all of the DICOM metadata from IDC collections using SQL or DataStudio (as in [this template](https://datastudio.google.com/reporting/ab96379c-e134-414f-8996-188e678f1b70/page/KHtxB/preview)) ![DataStudio](https://user-images.githubusercontent.com/313942/123644907-d37ad780-d7f3-11eb-9654-fed2b13366da.png) +* Access DICOM files defined as IDC cohort or as an SQL query from IDC collections from Google Colab notebook or VM with the following steps (you can get free GCP credits from IDC, which will give you the GCP project ID to use in the commands below) - from example Colab notebook [here](https://github.com/ImagingDataCommons/IDC-Examples/blob/master/notebooks/Cohort_download.ipynb): +``` +# authenticate with Google +from google.colab import auth +auth.authenticate_user() + +# retrieve the cohort content run a direct SQL query against IDC DICOM metadata table +%%bigquery --project=$ cohort_df + +SELECT * +FROM `` + +# save the manifest as text file on the VM: +cohort_df = cohort_df.join(cohort_df["gcs_url"].str.split('#', 1, expand=True).rename(columns={0:'gcs_url_no_revision', 1:'gcs_revision'})) +cohort_df["gcs_url_no_revision"].to_csv("gcs_paths.txt", header=False, index=False) + +# retrieve the DICOM files corresponding to the cohort manifest +!mkdir downloaded_cohort +!cat gcs_paths.txt | gsutil -u -m cp -I ./downloaded_cohort +``` + +# Background and References + +* [IDC Portal](https://imaging.datacommons.cancer.gov/) +* [short paper](https://cancerres.aacrjournals.org/content/early/2021/06/15/0008-5472.CAN-21-0950) accompanied by [videos](https://cancerres.aacrjournals.org/content/early/2021/06/21/0008-5472.CAN-21-0950.figures-only) with the summary of what IDC aspires to accomplish diff --git a/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/CloudAPIs.jpg b/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/CloudAPIs.jpg new file mode 100644 index 000000000..529e7f4d3 Binary files /dev/null and b/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/CloudAPIs.jpg differ diff --git a/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/Workflow.png b/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/Workflow.png new file mode 100644 index 000000000..95b8bc4f4 Binary files /dev/null and b/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/Workflow.png differ diff --git a/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/training.png b/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/training.png new file mode 100644 index 000000000..1885915fd Binary files /dev/null and b/PW35_2021_Virtual/Projects/NCIImagingDataCommons/images/training.png differ diff --git a/PW35_2021_Virtual/Projects/NousNav/2021.07.01_NousNav-Demo-Video.png b/PW35_2021_Virtual/Projects/NousNav/2021.07.01_NousNav-Demo-Video.png new file mode 100644 index 000000000..57f8b7c1c Binary files /dev/null and b/PW35_2021_Virtual/Projects/NousNav/2021.07.01_NousNav-Demo-Video.png differ diff --git a/PW35_2021_Virtual/Projects/NousNav/LandmarkSelection.png b/PW35_2021_Virtual/Projects/NousNav/LandmarkSelection.png new file mode 100644 index 000000000..84aae4a0d Binary files /dev/null and b/PW35_2021_Virtual/Projects/NousNav/LandmarkSelection.png differ diff --git a/PW35_2021_Virtual/Projects/NousNav/README.md b/PW35_2021_Virtual/Projects/NousNav/README.md new file mode 100644 index 000000000..ce86b7f88 --- /dev/null +++ b/PW35_2021_Virtual/Projects/NousNav/README.md @@ -0,0 +1,68 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# NousNav: Low-cost neuronavigation system + +## Key Investigators + +- Alexandra Golby (BWH) +- Sam Horvath (Kitware) +- Sarah Frisken (BWH) +- David Allemang (Kitware) +- Tina Kapur (BWH) +- Steve Pieper (Isomics) +- Jean-Christophe Fillion-Robin (Kitware) +- Sonia Pujol (BWH) + +# Project Description + +The NousNav project is an initiative led by Dr Alex Golby to develop a low-cost neuronavigation system designed for use in low- and middle-income countries. We are developing a 3D Slicer based application focused on supporting segmentation, registration and navigation tasks. + +The project will also include the development of open source hardware desgins for these applications. + +## Objective + +During the project week, we aim to present the project and identify a specific neuronavigation task for which we can team up with anoher group and address. + +1. Put together system demo. +1. Connect with other developers and researchers who are interested in contributing to the project + +## Approach and Plan + + + +1. Complete registration implementation +1. Demo / discuss the software at the IGT brakout + +## Progress and Next Steps + + + +1. :heavy_check_mark: Improved application based on testing of Planning/Registration/Navigation workflow + 1. Integrated UI for placing fiducials during planning. See [PR #98](https://github.com/NousNav/NousNav/pull/98) + 2. Fix minimum threshold value in skin segmentation. See [PR #99](https://github.com/NousNav/NousNav/pull/99) + 3. Created [demo-version](https://github.com/NousNav/NousNav/commits/demo-version) branch with tweaks and improvements + 1. Update Motive profile + 2. Setup registration verification page + 3. Add intial layout for navigation section +1. :heavy_check_mark: Recorded and edited a ~2-min video showcasing the complete Planning, Registration and Navigation workflow. See [video](https://drive.google.com/file/d/1cfWCd2-31rGxKGYWsacnr5M6aInSOAty/view?usp=sharing) +1. ... + + +| Click on the image below to see the video | +|----| +|[![](./2021.07.01_NousNav-Demo-Video.png)](https://drive.google.com/file/d/1cfWCd2-31rGxKGYWsacnr5M6aInSOAty/view?usp=sharing) | + + +# Illustrations + + +![Landmark selection interface](LandmarkSelection.png) +![Segmentation interface](SegmentationInterface.png) +![Trajectory planning](TrajectoryPlanning.png) + +# Background and References + + diff --git a/PW35_2021_Virtual/Projects/NousNav/SegmentationInterface.png b/PW35_2021_Virtual/Projects/NousNav/SegmentationInterface.png new file mode 100644 index 000000000..24eea2d50 Binary files /dev/null and b/PW35_2021_Virtual/Projects/NousNav/SegmentationInterface.png differ diff --git a/PW35_2021_Virtual/Projects/NousNav/TrajectoryPlanning.png b/PW35_2021_Virtual/Projects/NousNav/TrajectoryPlanning.png new file mode 100644 index 000000000..42d7461ca Binary files /dev/null and b/PW35_2021_Virtual/Projects/NousNav/TrajectoryPlanning.png differ diff --git a/PW35_2021_Virtual/Projects/OpenIGTLink/README.md b/PW35_2021_Virtual/Projects/OpenIGTLink/README.md new file mode 100644 index 000000000..adefb1daf --- /dev/null +++ b/PW35_2021_Virtual/Projects/OpenIGTLink/README.md @@ -0,0 +1,51 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# OpenIGTLink + +## Key Investigators + +- Junichi Tokuda + +# Project Description + +OpenIGTLink has been used to share data between software platforms (e.g., 3D Slicer, PLUS, ROS, etc..) in real-time for IGT applications. In this project, we will discuss new features that are potentially incoporated in the future versions of OpenIGTLink. + +## Objective + + + +1. Objective A. Identify missing features that are required in ongoing research projects. + +## Approach and Plan + + + +1. We will have a meeting at 1pm on Tuesday on the 'openigtlink' channel on Discord. (Confirmed attendees: Sarah, Sam, Tina, Junichi) + +## Progress and Next Steps + + + +1. Discuss current status of OpenIGTLink and existing issues + - We have been trying to minimize changes to the protocol to maintain the compatibility, and keep the protocol simple. There has been few protocol change in the past few years. Changes are mostly on the interface implementation. + - OpenIGTLink is a 'low-level' messaging protocol. Applications need to define how the subsystems exchange messages (=messaging scheme). + - Junichi has recieved a new funding to use OpenIGTLink for medical robotics projects. OpenIGTLink is used to brdige Robot Operating System (ROS) and 3D Slicer. ROS uses its own messaging system (Data Distribution Service: DDS). + - Issues in Brainlab connectivity + - Brainlab's IGTLink interface was designed before Protocol version 2. Some of the messages are not fully complient with the current protocol. + - 3D Slicer's OpenIGTLink interface has been overhauled a few years ago. The old version had some workaround to handle non-complient messages from Brainlab. + - The new version relies on an external library (IGSIO) to handle high-level messaging scheme, which makes it harder to implement the workaround. Also the new version of interface does not accept some of the message types that used to be supported by the old versions. +3. Zoom conference with Brainlab + - BWH team (Sarah and Paxy) described the issues they are facing during clinical cases + - Brainlab acknowledge one of the issues and could fix it. Another issue is related to the incomplient message format, and could be fixed without significant effort. + - Potential new features e.g., DTI support etc. DTI support will require testing on the OpenIGTLink side, as it has never been used for such a purpose. + - +# Illustrations + + + +# Background and References + + diff --git a/PW35_2021_Virtual/Projects/PRISM_volume_rendering/README.md b/PW35_2021_Virtual/Projects/PRISM_volume_rendering/README.md new file mode 100644 index 000000000..d0b256b77 --- /dev/null +++ b/PW35_2021_Virtual/Projects/PRISM_volume_rendering/README.md @@ -0,0 +1,53 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# PRISM Volume Rendering + +## Key Investigators + +- Simon Drouin, ETS Montreal +- Steve Pieper, Isomics, Cambridge MA, USA +- Kyle Sunderland, PerkLab, Queen’s University, Canada +- Andrey Titov, ETS Montreal +- Rafael Palomar, Oslo University Hospital / NTNU, Norway + + +# Project Description + +The goal of this project is to enable the development of advanced 3D rendering techniques in Slicer. The goal is to facilitate access to GPU shaders and enable GPU-based filtering in Slicer by improving shader access multipass rendering in VTK and Slicer. The [PRISM Module](https://github.com/ETS-vis-interactive/SlicerPRISMRendering) in Slicer will serve as a test environment for the new capabilities. + +## Objective + +1. Facilitate the development and debugging of GPU shaders for Slicer +2. Extend the principles introduced in the PRISM module to surface rendering and other types of rendering +3. Integrate work by Kyle Sunderland on VTK GPU image filters (see branch [here](https://github.com/Sunderlandkyl/VTK/commits/vtkGPUImageFilter3)) so that the filters are usable in Slicer +4. Integrate GPU filters with volume rendering in such a way that filtered volumes do not have to be transfered back to CPU memory before rendering + +## Progress and Next Steps + +1. All parties met on Monday to discuss the required functionality in VTK and Slicer to enable more advanced rendering effects: + 1. The current VTK mechanism to modify shaders is limited + 2. It makes it difficult to combine different effects + 3. Development is difficult because the complete shader in not accessible + 4. Rafael: In Slicer, there is a need for a system that arbitrate the modification of shaders by different modules + 5. The Slicer shader property node is available only for volume rendering. The feature could easily be ported to surface rendering to facilitate the editing of surface shader +1. Previous efforts by Simon Drouin were made to facilitate shader debugging. Code is available in [this branch](https://gitlab.kitware.com/drouin-simon/vtk/-/tree/volume-shader-readability). The code still needs work to enable a debug mode where shader code can be kept in memory with additional tags to facilitate development. + +### Next steps +* Move vtkShaderProperties to the vtkMRMLDisplayNode level +* Explore custom rendering to simplify integration with the vtk render process. Prior work includes: + * Python scripted Actor/Mappers: https://www.slicer.org/wiki/Slicer3:Python:ScriptedActor + * SimpleMapper: https://github.com/IbisNeuronav/Ibis/tree/master/IbisVTK/vtkExtensions + +# Illustrations + +![Opacity Peeling](opacity-peeling.gif) + +# Background and References + + +- PRISM Module [GitHub repository](https://github.com/ETS-vis-interactive/SlicerPRISMRendering). +- [Original article](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0193636) about the PRISM framework that served as a basis to develop the PRISM module in Slicer +- Previous project weeks + - https://projectweek.na-mic.org/PW30_2019_GranCanaria/Projects/GLSLShaders/ + - https://projectweek.na-mic.org/PW28_2018_GranCanaria/Projects/MultiVolumeRendering/ + - diff --git a/PW35_2021_Virtual/Projects/PRISM_volume_rendering/opacity-peeling.gif b/PW35_2021_Virtual/Projects/PRISM_volume_rendering/opacity-peeling.gif new file mode 100644 index 000000000..96a719a5e Binary files /dev/null and b/PW35_2021_Virtual/Projects/PRISM_volume_rendering/opacity-peeling.gif differ diff --git a/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/README.md b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/README.md new file mode 100644 index 000000000..170fae74b --- /dev/null +++ b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/README.md @@ -0,0 +1,101 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Planar Osteotomies Virtual Surgical Planning And Patient-Specific Surgical Guides + +## Key Investigators + +- MSc. Mauro I. Dominguez +- Dr. Manjula Herath +- PhD. Andras Lasso + +# Project Description + +Slicer infrastructure is ready to plan any kind of planar cuts if involved tissues are segmentable and rigid (e.g. bones) and it is possible to simulate the corresponding reconstructions. + +With the addition of vtkbool to Slicer's Sandbox extension it is now possible to do boolean operations that allow to create patient-specific surgical guides. + +Having in mind the previous comments two Slicer modules were created: +- BoneReconstructionPlanner for virtual surgical planning of mandibular reconstruction with vascularized fibula free flap and generation of patient-specific surgical guides. +- DeformityCorrectionOsteotomyPlanner for virtual surgical planning of long-bone deformity correction by closing-wedge osteotomies and generation of patient-specific guides. + +BoneReconstructionPlanner has been already used in 3 surgeries. + +DeformityCorrectionOsteotomyPlanner has achieved virtual surgical planning and patient-specific guides feature is on development. This module will be added to BoneReconstructionPlanner extension when it's ready. + +## Objective + + + +1. Objective A. Help new developers that want to make available a planning module for a non-covered kind of surgery. +1. Objective B. Add dental implants planning to BoneReconstructionPlanner and drill guides to the fibula-surgical-guide. +1. Objective C. Add kirschner wires planning and positioning to the surgical-guide on DeformityCorrectionOsteotomyPlanner + +## Approach and Plan + + + +For dental implant planning on BoneReconstructionPlanner: +1. Make GUI to position/orient the dental implants. +1. Define transforms from reconstructed-mandible to fibula +1. Create drill guides on the fibula-surgical-guide using the previously created transforms for position/orientation. + +For kirschner wires planning on DeformityCorrectionOsteotomyPlanner: +1. Make a fixation plate 3D model and corresponding well-placed kirschner wires (cylinders). +1. Make a registration of fixation plate algorithm +1. Define transforms from corrected-bone to deformed-bone. +1. Create kirchner-wire-guides on the deformed-bone-surgical-guide using the previously created transforms for position/orientation. + +## Progress and Next Steps + + + +1. New code available at [DentalImplantsBranch](https://github.com/SlicerIGT/SlicerBoneReconstructionPlanner/tree/DentalImplantsBranch) of BoneReconstructionPlanner project. +1. Dental Implants Surgical Planning. +1. Corresponding Drill Guides on Fibula Surgical Guides. + +# Illustrations + + + +## Achieved till the start of Project Week + +- Mandibular Reconstruction Virtual Surgical Planning: + +![](screenshotPlanningMandibularReconstruction.png) + +- Mandibular Reconstruction Patient-specific Surgical Guides (mandible and fibula guides): + +![](screenshotPatientSpecificSurgicalGuidesMandibularReconstruction.png) + +- Deformity Correction Virtual Surgical Planning: + +![](screenshotPlanningDeformityCorrection.png) + +## Desired work to be finished on project week: + +- Dental Implants Virtual Surgical Planning on Mandibular Reconstruction: + +![](dentalImplantsPlanningOnMandibularReconstruction.png) + +- Dental Implants guides on Fibula Surgical Guide + +![](fibulaSurgicalGuideWithDrillGuides0.png) + +![](fibulaSurgicalGuideWithDrillGuides1.png) + +## Achieved on Project Week + +- Dental Implant Planning for reconstructed mandible and Drill Guides on the Fibula Surgical Guide + +![](dentalImplantsPlanningOnBoneReconstructionPlanner.png) + +# Background and References + + + +[BoneReconstructionPlanner project](https://github.com/SlicerIGT/SlicerBoneReconstructionPlanner). + +[DeformityCorrectionOsteotomyPlanner project](https://github.com/mauigna06/SlicerDeformityCorrectionOsteotomyPlanner). diff --git a/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/dentalImplantsPlanningOnBoneReconstructionPlanner.png b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/dentalImplantsPlanningOnBoneReconstructionPlanner.png new file mode 100644 index 000000000..ead9a6a68 Binary files /dev/null and b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/dentalImplantsPlanningOnBoneReconstructionPlanner.png differ diff --git a/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/dentalImplantsPlanningOnMandibularReconstruction.png b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/dentalImplantsPlanningOnMandibularReconstruction.png new file mode 100644 index 000000000..a6a259d00 Binary files /dev/null and b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/dentalImplantsPlanningOnMandibularReconstruction.png differ diff --git a/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/fibulaSurgicalGuideWithDrillGuides0.png b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/fibulaSurgicalGuideWithDrillGuides0.png new file mode 100644 index 000000000..8984f9cdc Binary files /dev/null and b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/fibulaSurgicalGuideWithDrillGuides0.png differ diff --git a/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/fibulaSurgicalGuideWithDrillGuides1.png b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/fibulaSurgicalGuideWithDrillGuides1.png new file mode 100644 index 000000000..730fe0d1c Binary files /dev/null and b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/fibulaSurgicalGuideWithDrillGuides1.png differ diff --git a/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPatientSpecificSurgicalGuidesMandibularReconstruction.png b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPatientSpecificSurgicalGuidesMandibularReconstruction.png new file mode 100644 index 000000000..d41c27f20 Binary files /dev/null and b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPatientSpecificSurgicalGuidesMandibularReconstruction.png differ diff --git a/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPlanningDeformityCorrection.png b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPlanningDeformityCorrection.png new file mode 100644 index 000000000..3f64592b1 Binary files /dev/null and b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPlanningDeformityCorrection.png differ diff --git a/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPlanningMandibularReconstruction.png b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPlanningMandibularReconstruction.png new file mode 100644 index 000000000..6e9a51e3e Binary files /dev/null and b/PW35_2021_Virtual/Projects/PlanarOsteotomiesVSPAndSurgicalGuides/screenshotPlanningMandibularReconstruction.png differ diff --git a/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/README.md b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/README.md new file mode 100644 index 000000000..85a31ef40 --- /dev/null +++ b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/README.md @@ -0,0 +1,70 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer module for planning MR-guided focal cryoablation of prostate cancer + +## Key Investigators + +- Pedro Moreira (BWH) +- Nicholas Fordham (BWH) + +# Project Description +Prostate cancer (PCa) recurrence after radiotherapy may affect 10 to 60% of patients within 5-10 years after treatment. +Salvage prostatectomy of post-radiation recurrent PCa is challenging because of radiation-induced fibrosis and shrinkage of the prostate gland. +Minimally invasive focal cryoablation has been selected as an alternative salvage treatment for PCa post-radiation recurrence. Safe and effective focal cryoablation requires the deployment of the cryo-needles at optimal locations so that the created lethal ablation zone fully encompasses the tumor while preserving surrounding healthy tissues. +Currently, physicians rely on the pre and intraoperative images and their own experience to define the best cryo-needle locations and manually insert them using a grid template. However, predicting the final shape of the lethal ablation zone is challenging as it will depend on several patient-specific factors such as proximity to heat sources and thermal properties of the prostatic tissue. The primary objective of this project is to develop a planner to maximize the amount of the target volume encompassed by the defined isotherm while sparing critical structures. The planner will use a data-driven approach to estimate the visible iceball using a logistic regression model and consider a safety margin to define the best cry-needle placement. Our ultimate goal is to develop a 3D Slicer module for MR-guided focal cryoablation ready to be used in clinical procedures +## Objective + + + +1. Objective A. Modify the current module for Python 3 (Slicer 4.11) +2. Objective B. Implement a iceball prediction algorithm +3. Objective C. Integrate the prediction and the current Module used for MRI-guided cryoablation +4. Objective D. Implement a optimal planning algorithm in 3D Slicer + +## Approach and Plan + + + +1. Change the current module to work with Slicier 4.11 +3. Create a module with the logistic regression model +4. Test the algorithm with retrospective data + +## Progress and Next Steps +### Update to python 3 +Pedro did the modifications to addapt the current module to Python3. Most of the errors were related to dict_keys and managing the markups. We also had a problem loading the DICOM files, but was fixed replacing "slicer.util.loadVolume" by "self.scalarVolumePlugin.load". There are still some miror errors that has to be address within next week. + +Figure 1: ProstateAblation module +![Prediction module](ScreenShot2.png) + + +### Statistical model to predict the iceball +We've developed a module this week that capture the desired probe location defined by the physician on the ProstateAblation module, a few points along the urethra, and uses the logistic regression presented on CARS2020 [1] to estimate the final iceball. The user can also define the threshold to select the sensitivity of the logistic regression. The code is still quite slow as it goes through the entire image, future implementation should use a ROI around the probe location. We are also working on displaying the total volume of the iceball and the minimum ablation margin. According to the literature, an ablation margin around 5mm is desirable. + +Figure 2: The blue segmentation is the estimated iceball given the probe location and the segmented urethra. +![Prediction module](ScreenShot1.png) + +Figure 3: Integration of the iceball estimation and the ProstateAblation module. the ablation target is marked in green, while the estimated iceball is in blue. +![Integration](ScreenShot3.png) + +One of the advantages of the current approach is the prediction and the vizualization of the ablation margins in 3D, However, we still need to discuss the best way to vizualize the prediction and planning results. We should meet the Dr. Tuncali soon to get his input. + +### Data curation +Nick is working on the cryoablation database to segment all intraprocedure images. We currently have data of 44 cases, but it is still not publicly available. + +## Next Steps + +1. Implement the search/optimization algorithm to suggest the optimal number of probes and its locations to the physician. +2. CLEAN THE CODE and fix the current issues. +3. Test the planner and predictor using retrospective data + +# Illustrations + +![Iceball prediction](ScreenShot.png) + + +# Background and References + +[1] Moreira P, Tuncali K, Tempany C and Tokuda J, A data-driven approach to predicting lethal temperature isotherm in MRI-guided focal cryoablation, in CARS 2020 Computer Assisted Radiology and Surgery, Munich, Germany, June 2020. +[2] ProstateAblations module https://github.com/pedrolfm/SlicerProstateAblation +[3] Iceball estiamtion https://github.com/pedrolfm/IceballEstimation + diff --git a/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot.png b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot.png new file mode 100644 index 000000000..58c5d8462 Binary files /dev/null and b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot.png differ diff --git a/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot1.png b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot1.png new file mode 100644 index 000000000..3b850629d Binary files /dev/null and b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot1.png differ diff --git a/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot2.png b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot2.png new file mode 100644 index 000000000..27211c058 Binary files /dev/null and b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot2.png differ diff --git a/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot3.png b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot3.png new file mode 100644 index 000000000..62b072b84 Binary files /dev/null and b/PW35_2021_Virtual/Projects/ProstateCryoablationPlanning/ScreenShot3.png differ diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/README.md b/PW35_2021_Virtual/Projects/PyTorchIntegration/README.md new file mode 100644 index 000000000..4d5402c4d --- /dev/null +++ b/PW35_2021_Virtual/Projects/PyTorchIntegration/README.md @@ -0,0 +1,191 @@ +Back to [Projects List](../../README.md#ProjectsList) + +Go to [Progress and Next Steps](#progress-and-next-steps) +# Integration of PyTorch and Slicer + +## Key Investigators + +- Fernando Pérez-García (University College London & King's College London, UK) +- Andrés Díaz-Pinto (King's College London, UK) +- Andras Lasso (Queen's University, Canada) +- Curtis Lisle (KnowledgeVis, USA) +- Rebecca Hisey (Queen's University, Canada) +- Steve Pieper (Isomics) +- Tamas Ungi (Queen's University, Canada) + +## Project Description + + + +Investigate the potential issues faced by users who would like to use a trained +deep learning model (e.g., a convolutional neural network) inside Slicer, +using PyTorch. + +## Objective + + + +Issues that will be addressed: + +1. How to install PyTorch within Slicer. The main question is whether to install a version with GPU support and, if it does, which version of the CUDA toolkit to install. +1. How to handle the necessary conversion of Slicer nodes (e.g., `vtkMRMLScalarVolumeNode`) to PyTorch objects (e.g., `torch.Tensor`) and vice versa. Look into adding tools to `slicer.util`. +1. Write a tutorial with a toy example using a publicly available dataset. + +## Approach and Plan + + + +1. Investigate issues related to [CUDA versions and GPU drivers](https://docs.nvidia.com/deploy/cuda-compatibility/index.html), and which installation method to use depending on the platform. Maybe, write a GUI to guide the user into choosing an appropriate installation type. +1. Once PyTorch has been installed, look into the best ways to prepare slicer nodes for inference and visualize the results in Slicer. +1. If necessary, write a tutorial (potentially a Jupyter Notebook using [SlicerJupyter](https://github.com/Slicer/SlicerJupyter)) + +## Progress and Next Steps + + + +### PyTorch in Slicer + +#### Optimized installation using [`light-the-torch`](https://github.com/pmeier/light-the-torch) + +1. Fixed `light-the-torch` to detect the best PyTorch version from NVIDIA drivers ([link to PR](https://github.com/pmeier/light-the-torch/pull/31)) +1. Fixed PythonQt so `light-the-torch` can be used within Slicer ([link to PR](https://github.com/MeVisLab/pythonqt/pull/49), to be updated in Slicer fork) + +#### PyTorch extension + +The `PyTorch` extension has been added to the Extensions Index. + +[Link to pull request](https://github.com/Slicer/ExtensionsIndex/pull/1775) – [Link to code](https://github.com/fepegar/SlicerPytorch) + +![PyTorch extension in Extensions Manager](./extensions.png) + +![PyTorchUtils module](./pytorch_extension.png) + +### Demo modules + +The code for these modules can be found at [SlicerParcellation](https://github.com/fepegar/SlicerParcellation). + +#### Brain Resection Cavity Segmentation + +Based on [Pérez-García et al., 2021, *A self-supervised learning strategy for postoperative brain cavity segmentation simulating resections*](https://link.springer.com/article/10.1007/s11548-021-02420-2). More info at the [`resseg-ijcars`](https://github.com/fepegar/resseg-ijcars) repository. + +[![Brain Resection Cavity Segmentation](./cavity.gif)](https://link.springer.com/article/10.1007/s11548-021-02420-2 "Brain Resection Cavity Segmentation") + +#### Brain Parcellation + +Based on [Li et al., 2017, *On the Compactness, Efficiency, and Representation of 3D Convolutional Networks: Brain Parcellation as a Pretext Task*](https://link.springer.com/chapter/10.1007/978-3-319-59050-9_28). More info at the [`highresnet`](https://github.com/fepegar/highresnet) repository. + +[![Brain Parcellation on 3D Slicer](./parcellation_mrhead_frame.png)](https://youtu.be/kKXCv-JPikw "Brain Parcellation on 3D Slicer") + +Parcellation run by @pieper on a synthetic 1 mm isotropic T1 MPRAGE generated from a 6.5 mm anisotropic T2 (using model from [Iglesias et al. 2021](https://www.sciencedirect.com/science/article/pii/S1053811921004833)): + +![Parcellation on T2 and synthetic T1](./parcellation_t2_from_syntht1.png) + +![Parcellation on T2](./parcellation_t2_vs_syntht1.png) + +This is a parcellation run through the [Imaging Data Commons](https://imaging.datacommons.cancer.gov/) framework, visualized online using [OHIF](https://ohif.org/): + +![Parcellation on OHIF](./parc_ohif.png) + +## Illustrations + + + +This is a diagram of the typical usage of Python within 3D Slicer. + +![Example of inference using PyTorch inside Slicer](diagram.svg) + +## Background and References + + + +### Post on Discourse + +The first discussion about this project appeared on the [Slicer forum (PW35) Projects List](https://discourse.slicer.org/t/pw35-projects-list/17905/4). + +### Discussion on GitHub + +Some issues about installing PyTorch in Slicer were discussed in the [pull request](https://github.com/Slicer/ExtensionsIndex/pull/1710) to add [SlicerTorchIO](https://github.com/fepegar/SlicerTorchIO) to the Extensions Index. + +### `light-the-torch` + +This seems to be a Python package designed to help installing PyTorch easily, auto-detecting the computation backend. Probably worth looking into it: [`light-the-torch`](https://github.com/pmeier/light-the-torch). + +The maintainer is [Philip Meier](https://github.com/pmeier), a very active contributor to `torchvision`. + +Also related and worth investigating, from the same author, is [`pytorch-pip-shim`](https://github.com/pmeier/pytorch-pip-shim). + +### Example of a naive `pip` installation + +Tried on Linux, driver 430.50 (`nvidia-smi --query-gpu=driver_version --format=csv`). + +```python +>>> pip_install('torch') +Collecting torch + Downloading torch-1.9.0-cp36-cp36m-manylinux1_x86_64.whl (831.4 MB) +Collecting dataclasses + Downloading dataclasses-0.8-py3-none-any.whl (19 kB) +Requirement already satisfied: typing-extensions in ./opt/Slicer/Nightly/lib/Python/lib/python3.6/site-packages (from torch) (3.10.0.0) +Installing collected packages: dataclasses, torch + WARNING: The scripts convert-caffe2-to-onnx and convert-onnx-to-caffe2 are installed in '/home/fernando/opt/Slicer/Nightly/lib/Python/bin' which is not on PATH. + Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location. +Successfully installed dataclasses-0.8 torch-1.9.0 +WARNING: Running pip as root will break packages and permissions. You should install packages reliably by using venv: https://pip.pypa.io/warnings/venv +``` + +```python +>>> import torch +>>> torch.cuda.is_available() +/home/fernando/opt/Slicer/Nightly/lib/Python/lib/python3.6/site-packages/torch/cuda/__init__.py:52: UserWarning: CUDA initialization: The NVIDIA driver on your system is too old (found version 10010). Please update your GPU driver by downloading and installing a new version from the URL: http://www.nvidia.com/Download/index.aspx Alternatively, go to: https://pytorch.org to install a PyTorch version that has been compiled with your version of the CUDA driver. (Triggered internally at /pytorch/c10/cuda/CUDAFunctions.cpp:115.) + return torch._C._cuda_getDeviceCount() > 0 +False +>>> torch._C._cuda_getCompiledVersion() +10020 +``` + +```shell +$ nvidia-smi +Tue Jun 22 17:12:44 2021 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 430.50 Driver Version: 430.50 CUDA Version: 10.1 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +|===============================+======================+======================| +| 0 GeForce GTX 1060 Off | 00000000:01:00.0 On | N/A | +| N/A 67C P0 33W / N/A | 1694MiB / 6078MiB | 12% Default | ++-------------------------------+----------------------+----------------------+ + ++-----------------------------------------------------------------------------+ +| Processes: GPU Memory | +| GPU PID Type Process name Usage | +|=============================================================================| +| 0 3928 G /usr/lib/xorg/Xorg 576MiB | +| 0 4129 G /usr/bin/gnome-shell 385MiB | +| 0 4615 G ...AAAAAAAAAAAAAAgAAAAAAAAA --shared-files 46MiB | +| 0 5099 G ...AAAAAAAAAAAACAAAAAAAAAA= --shared-files 49MiB | +| 0 6955 G ...AAAAAAAAAAAIAAAAAAAAAA== --shared-files 366MiB | +| 0 8016 G ...AAgAAAAAAAAACAAAAAAAAAA= --shared-files 102MiB | +| 0 8039 G ...o/opt/Slicer/Nightly/bin/SlicerApp-real 112MiB | +| 0 22437 G ...AAAAAAAAAAAIAAAAAAAAAA== --shared-files 30MiB | ++-----------------------------------------------------------------------------+ +``` + +```shell +$ nvcc --version +nvcc: NVIDIA (R) Cuda compiler driver +Copyright (c) 2005-2018 NVIDIA Corporation +Built on Sat_Aug_25_21:08:01_CDT_2018 +Cuda compilation tools, release 10.0, V10.0.130 +``` + +## Glossary + + +| Abbreviation | Meaning | +| -------------- | ----------------------------------- | +| **GPU** | Graphics Processing Unit | +| **CUDA** | Compute Unified Device Architecture | +| **NVCC** | NVIDIA CUDA Compiler | +| **NVIDIA-SMI** | NVIDIA System Management Interface | diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/cavity.gif b/PW35_2021_Virtual/Projects/PyTorchIntegration/cavity.gif new file mode 100644 index 000000000..3a641dafe Binary files /dev/null and b/PW35_2021_Virtual/Projects/PyTorchIntegration/cavity.gif differ diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/diagram.svg b/PW35_2021_Virtual/Projects/PyTorchIntegration/diagram.svg new file mode 100644 index 000000000..3201b8905 --- /dev/null +++ b/PW35_2021_Virtual/Projects/PyTorchIntegration/diagram.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/extensions.png b/PW35_2021_Virtual/Projects/PyTorchIntegration/extensions.png new file mode 100644 index 000000000..f5b892623 Binary files /dev/null and b/PW35_2021_Virtual/Projects/PyTorchIntegration/extensions.png differ diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/parc_ohif.png b/PW35_2021_Virtual/Projects/PyTorchIntegration/parc_ohif.png new file mode 100644 index 000000000..1fb74c0e7 Binary files /dev/null and b/PW35_2021_Virtual/Projects/PyTorchIntegration/parc_ohif.png differ diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_mrhead.png b/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_mrhead.png new file mode 100644 index 000000000..4668c610c Binary files /dev/null and b/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_mrhead.png differ diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_mrhead_frame.png b/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_mrhead_frame.png new file mode 100644 index 000000000..6c7a56f8a Binary files /dev/null and b/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_mrhead_frame.png differ diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_t2_from_syntht1.png b/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_t2_from_syntht1.png new file mode 100644 index 000000000..7cbf437dc Binary files /dev/null and b/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_t2_from_syntht1.png differ diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_t2_vs_syntht1.png b/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_t2_vs_syntht1.png new file mode 100644 index 000000000..2ea6506af Binary files /dev/null and b/PW35_2021_Virtual/Projects/PyTorchIntegration/parcellation_t2_vs_syntht1.png differ diff --git a/PW35_2021_Virtual/Projects/PyTorchIntegration/pytorch_extension.png b/PW35_2021_Virtual/Projects/PyTorchIntegration/pytorch_extension.png new file mode 100644 index 000000000..dfdd23cec Binary files /dev/null and b/PW35_2021_Virtual/Projects/PyTorchIntegration/pytorch_extension.png differ diff --git a/PW35_2021_Virtual/Projects/README.md b/PW35_2021_Virtual/Projects/README.md new file mode 100644 index 000000000..b58adac99 --- /dev/null +++ b/PW35_2021_Virtual/Projects/README.md @@ -0,0 +1,18 @@ +# How to create a new project + + +- Post questions about the project idea and team on the [Project Week forum][forum], our communication mechanism as of PW28. +- When you are ready, add a new entry in the list of **Projects** by creating a new `README.md` file in subfolder in `Projects` folder, and copying contents of [project description template][project-description-template] file into it. Step-by-step instructions for this are: + +1. Open [project description template][project-description-template] and copy its full content to the clipboard +1. Go back to [Projects](https://github.com/NA-MIC/ProjectWeek/tree/master/PW35_2021_Virtual/Projects) folder on GitHub +1. Click on "Create new file" button +1. Type `YourProjectName/README.md` +1. Paste the previously copied content of project template page into your new `README.md` +1. Update at least your project's __title, key investigators, project description sections__ +1. Add a link to your project to the [main project list](..#projects-how-to-add-a-new-project) + +Note: some steps above may require creating a [pull request](https://help.github.com/articles/creating-a-pull-request/) until your account is given write access. + +[forum]: https://discourse.slicer.org/c/community/project-week +[project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW30_2019_GranCanaria/Projects/Template/README.md diff --git a/PW35_2021_Virtual/Projects/ROSMED/README.md b/PW35_2021_Virtual/Projects/ROSMED/README.md new file mode 100644 index 000000000..160696bc7 --- /dev/null +++ b/PW35_2021_Virtual/Projects/ROSMED/README.md @@ -0,0 +1,108 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# ROS-MED: Integration of 3D Slicer and ROS2 for Image-Guided Robot-Assisted Interventions + +## Key Investigators + +- Junichi Tokuda (Brigham and Women's Hospital) +- Tamas Ungi (Queen’s University) +- Axel Krieger (Johns Hopkins University) +- Simon Leonard (Johns Hopkins University) +- Mark Fuge (University of Maryland) +- Lydia Al-Zogbi (Johns Hopkins University) +- Milad Habibi (University of Maryland) +- Pedro Moreira (Brigham and Women's Hospital) + +# Project Description + +The ultimate goal of this project is to provide a software platform to integrate medical image computing +software (3D Slicer) into a system for image-guided robot-assisted interventions, in which 2D/3D medical +images are used for planning, navigation, monitoring, and validation. +Examples of such robot-assisted systems include image-guided robotic needle-guide systems and surgical +CAD/CAM systems. Those systems often require a wide range of image computing capabilities such as +segmentation of anatomical structures, registration of multiple images, 2D/3D image visualization, +image-based planning, and data sharing with the robot controller and the hospital’s picture archiving +and communication systems (PACS). Integration of a solid medical image computing platform into a robotic +system is becoming more important than ever with the growing interest in AI-based treatment planning and guidance. + +However, the engineering effort to implement those features is often underestimated in academic research +due to limited engineering resources or the scope of the project. Fortunately, many of those features have +already been implemented and validated in the research community and often distributed as open-source software. +Therefore it has become essential for academic researchers to take advantage of those existing tools and +incorporate them into their own research instead of reinventing the wheel. + +Our team has been integrating 3D Slicer and Robot Operating System using OpenIGTLink to achieve the above goal +following [our first project](https://www.na-mic.org/wiki/2016_Winter_Project_Week/Projects/SlicerROSIntegration), +and recently received a grant from National Institutes of Health (2R01EB020667). In this year, we will focus on +transition to the new ROS platform (ROS2). + + + +## Objective + + + +1. Objective A. Explore ROS2 as a potential platform for our study on image-guided model-driven needle placement robot. + +## Approach and Plan + + + +1. Run ROS2 on a universal robot arm (UR-10) at JHU. +1. Prototype a new version of ROS-IGTL-Bridge +1. Display a 3D model of the UR-10 on 3D Slicer, and synchronize its posture with the robot by sending the transform of each link. + +## Progress and Next Steps + + + +### First meeting (1:00pm on Discord) +- Participants: Tamas, Junichi Pedro + +### Kick-off meeting (2:30pm on Discord) +- Participants: Tamas, Lydia, Junichi, Simon +- Confirm the goal of the project +- Breakdown the tasks + - Prototype ROS-IGTL-Bridge (Junichi) + - Setup a remote environment for the the UR-10 computer at Dr. Krieger's lab at JHU (Lydia) + - Install ROS2 on the UR-10 computer remotely (Simon) + - Test UR-10 + ROS2 + 3D Slicer (Simon) + - Test ROS2 on non-Linux environment (Tamas) +- New workshop tutorial + - Add AI-based segmentation? + - Recycle a surgical plan generated in Tamas' AI segmentation tutorial + - The new workshop tutorial will focus on ROS, but could direct audience to Tamas' AI segmentation tutorial, if they are interested. + +### ROS 2 testing on other environment +Tamas tried installing ROS2 on windows, but was not straightfoward. It might not be a viable solution for the workshop tutorial. + +### First implementation of ROS2-OpenIGTLink bridge +The original interface for ROS1 (ROS_IGTL_Bridge) has been modified for ROS2. The modified interface is now working with ROS 2 with limited capability (only supports text, transform, and point) +- [Repository](https://github.com/openigtlink/ros2_igtl_bridge) +- [Demo video](https://www.dropbox.com/s/sq5amxkrfjvmvaz/ros2_igtl_bridge_July_1_2021.mov?dl=0) + +### Setup ROS 2 and 3D Slicer on universal robot (UR-10e) +Approach: +- Preload visual models of the links of the universal robot. +- Define a linear transform for each link of the universal robot in the Slicer scene. +- Place each link under the corresponcing transform. + +![Slicer Scene](Screenshot%20from%202021-07-01%2014-18-58.png) + +- The contorller on ROS 2 exports the transform of each link to OpenIGTLink + +Results: +- ROS2 was installed successfully on the computer connected to UR10e +- Visual models of UR-10e was successfully imported to 3D Slicer. However, there seemed to be an issue with the coordinate frame. (Later, Andras pointed out it was related to RAS/LPS issue with the STL files. +- Robot was successfuly controlled from ROS, and 3D Slicer could visualize the posture of the robot in real-time. +- [Video](https://photos.app.goo.gl/ihZ2mtWYTMQx3wmx6) + + +# Background and References +- Frank T, Krieger A, Leonard S, Patel NA, Tokuda J. ROS-IGTL-Bridge: an open network interface for image-guided therapy using the ROS environment. Int J Comput Assist Radiol Surg. 2017 Aug;12(8):1451-1460. doi: 10.1007/s11548-017-1618-1. Epub 2017 May 31. PMID: 28567563; [PMCID: PMC5543207](https://www-ncbi-nlm-nih-gov.ezp-prod1.hul.harvard.edu/pmc/articles/PMC5543207/). +- [ROS-IGTL-Bridge (for ROS 1)](https://github.com/openigtlink/ROS-IGTL-Bridge) +- [ros2_igtl_bridge (for ROS 2)](https://github.com/openigtlink/ros2_igtl_bridge) + + +# Acknowledgement +This work is supported by NIH R01EB020667 (MPI: Tokuda, Krieger, Fuge, Leonard). diff --git a/PW35_2021_Virtual/Projects/ROSMED/Screenshot from 2021-07-01 14-18-58.png b/PW35_2021_Virtual/Projects/ROSMED/Screenshot from 2021-07-01 14-18-58.png new file mode 100644 index 000000000..4bc196d7e Binary files /dev/null and b/PW35_2021_Virtual/Projects/ROSMED/Screenshot from 2021-07-01 14-18-58.png differ diff --git a/PW35_2021_Virtual/Projects/Slicer-Liver/README.md b/PW35_2021_Virtual/Projects/Slicer-Liver/README.md new file mode 100644 index 000000000..d1a45f5c1 --- /dev/null +++ b/PW35_2021_Virtual/Projects/Slicer-Liver/README.md @@ -0,0 +1,66 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer-Liver: liver resection planning in 3D Slicer + +## Key Investigators + +- Rafael Palomar (Oslo Unviersity Hospital and NTNU) +- Gabriella d'Albenzio (Oslo University Hospital) +- Ole Vegard Solberg (SINTEF) +- Geir Arne Tangen (SINTEF) + +# Project Description + + + +This project will kick-start the development of the *Slicer-Liver* extension +that will be developed through the [ALive project](https://alive-research.no). +The objective of the Slicer-Liver extension is to provide researchers +with tools to perform liver analytics towards planning of liver interventions +(resections, ablations). At this point in the project we need to port early +prototypes of our resection planning algorithms into 3D Slicer. + +![3D Bezier Surface Markup](screenshot.png) + +[Early prototype of the resection planning module](https://youtu.be/7M3DULQp81k) + +## Objective + + + +1. Integrate liver resection planning tools in a 3D Slicer extension. + +## Approach and Plan + + + +1. Development of a resection initialization widget using markups and shaders. +1. Development of a deformables surface using markups. +1. Development of interaction between the initialization markups and the deformable surface. +1. Development of distance measurements visualized in the resections using shaders. +1. Add a GUI to manage resections. + +## Progress and Next Steps + +The core components of the planning platform have been developed but not +integrated together. Shaders and pluggable markups infrastructure have been used +for the development of the resection initialization, but are not yet integrated +for visualization of other measurments (e.g., safety margins). + +![3D Bezier Surface Markup](bezier_surface_markup.png) + +![Resection initialization](resection_initialization.png) + +![Resection planning](resection_planning.png) + +Our next steps are : +1. Integrating all the resection components together +2. Develop a Qt UI to drive the process +3. Improve stability -- software testing +4. Improve performance -- wider use of shaders + +# Background and References +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW34_2020_Virtual/Projects/SlicerLiverAnalysis/) (December 2020) +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/NorMIT-Plan/) (january 2020) +1. Palomar, Rafael, et al. "A novel method for planning liver resections using deformable Bézier surfaces and distance maps." Computer Methods and Programs in Biomedicine 144 (2017): 135-45. +1. Palomar, Rafael, et al. "Surface reconstruction for planning and navigation of liver resections." Computerized Medical Imaging and Graphics 53 (2016): 30-42. diff --git a/PW35_2021_Virtual/Projects/Slicer-Liver/bezier_surface_markup.png b/PW35_2021_Virtual/Projects/Slicer-Liver/bezier_surface_markup.png new file mode 100644 index 000000000..9d1fdb191 Binary files /dev/null and b/PW35_2021_Virtual/Projects/Slicer-Liver/bezier_surface_markup.png differ diff --git a/PW35_2021_Virtual/Projects/Slicer-Liver/resection_initialization.png b/PW35_2021_Virtual/Projects/Slicer-Liver/resection_initialization.png new file mode 100644 index 000000000..dcd1f430f Binary files /dev/null and b/PW35_2021_Virtual/Projects/Slicer-Liver/resection_initialization.png differ diff --git a/PW35_2021_Virtual/Projects/Slicer-Liver/resection_planning.png b/PW35_2021_Virtual/Projects/Slicer-Liver/resection_planning.png new file mode 100644 index 000000000..987f1d8f0 Binary files /dev/null and b/PW35_2021_Virtual/Projects/Slicer-Liver/resection_planning.png differ diff --git a/PW35_2021_Virtual/Projects/Slicer-Liver/screenshot.png b/PW35_2021_Virtual/Projects/Slicer-Liver/screenshot.png new file mode 100644 index 000000000..5310aa739 Binary files /dev/null and b/PW35_2021_Virtual/Projects/Slicer-Liver/screenshot.png differ diff --git a/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/README.md b/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/README.md new file mode 100644 index 000000000..cbc7afa35 --- /dev/null +++ b/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/README.md @@ -0,0 +1,54 @@ +Back to [Projects List](../../README.md#ProjectsList) + +Loading Large Microscopy Data in Slicer + +## Key Investigators + +- Sindhura Thirumal (Queen's University) +- Steve Pieper (Isomics) +- Tina Kapur (BWH) + +# Project Description + +We've developed a module in Slicer - TITAN - that is meant to be an end-to-end pipeline of processing and analyzing imaging mass cytometry data. TITAN allows the user to +visualize the different protein channels in the tissue, segment the individual cells in the tissue, and create some simple analysis plots of the data. Currently, user has to export +the raw file from the cytometer into TIFF files using an external software in order to use TITAN. However, we would like to use the raw text files directly with Slicer instead, +in order to eliminate the need for any external software. + +## Objective + + + +1. Be able to import large text files into Slicer without causing Slicer to become unresponsive +2. Update functions in TITAN to work with the text file rather than TIFF images + +## Approach and Plan + + + +1. Implement a custom reader to deal with these text files + + +## Progress and Next Steps + + + +1. Added button to module that opens the user's file explorer for them to select the text file(s) to be imported +1. Doing this just points to the file's location on the computer rather than opening the file in Slicer (which is what slows it down) +1. Data from the files are obtained by parsing through each line and generating the corresponding image arrays, which are then used to create new volume nodes + +# Illustrations + + +Loading text files +![load text files box](https://user-images.githubusercontent.com/21988487/124183446-3daaab00-da86-11eb-9de0-05010474ebe1.PNG) + +Result of generated image from text file +![load text files p2](https://user-images.githubusercontent.com/21988487/124183494-4ef3b780-da86-11eb-81ea-cef6eb9ae278.PNG) + +# Background and References + + diff --git a/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/load text files box.PNG b/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/load text files box.PNG new file mode 100644 index 000000000..514e82228 Binary files /dev/null and b/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/load text files box.PNG differ diff --git a/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/load text files p2.PNG b/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/load text files p2.PNG new file mode 100644 index 000000000..aaf8bff26 Binary files /dev/null and b/PW35_2021_Virtual/Projects/SlicerForMicroscopyData/load text files p2.PNG differ diff --git a/PW35_2021_Virtual/Projects/SlicerOnDemand/README.md b/PW35_2021_Virtual/Projects/SlicerOnDemand/README.md new file mode 100644 index 000000000..88504633a --- /dev/null +++ b/PW35_2021_Virtual/Projects/SlicerOnDemand/README.md @@ -0,0 +1,57 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer On Demand + +## Key Investigators + +- Steve Pieper (Isomics, Inc. Cambridge MA, USA) +- Curt Lisle (Knowledgevis, Maitland, Florida, USA) +- Andrey Fedorov (BWH, Boston, MA, USA) +- Theodore Aptekarev (Independent, Moscow, Russia) + +# Project Description + +The goal is to allow people to quickly transition from viewing images to doing more complex tasks such as segmentation or registration. + +## Objective + + + +1. A quick and easy way to get a functioning Slicer environment +2. Ability to browse data, e.g. in IDC, and load same data in cloud-hosted Slicer +3. Pass login credentials from web to Slicer to allow load/save of confidential data + +## Approach and Plan + + + +1. Build on existing [SlicerMachines GCP boot images](https://github.com/pieper/SlicerMachines) +1. Prototype using [the IDC sandbox](https://idc-sandbox-000.web.app/) that already supports login +1. Use [GCP JavaScript API](https://cloud.google.com/compute/docs/tutorials/javascript-guide) to launch and monitor jobs + +## Progress and Next Steps + + + +1. Steve [implemented one-click creation](https://github.com/pieper/SlicerOnDemand) of a VM using the Google API to launch a GPU-enabled VM, ready to use within about 90 seconds. +2. Theodore created workflow icons to provide visual feedback during the launch process. +3. We met and discussed methods for encrypting traffic to the "pop up" Slicer-in-the-cloud using Google Cloud infrastructure options. +4. A use case was identified where an IDC cohort manifest could be passed to the Slicer VM and the cohort could be automatically loaded for the user. +5. We had a discussion with Kitware regarding the composition of the Slicer Docker containers: It would be nice to consolidate dockerfile of general use into https://github.com/Slicer/SlicerDocker +6. Next Steps: + * Evaluate tradeoffs between simplicity of interface and exposing options + * Test robustness, add more feedback about things like how much money you are spending + * Configure the VM instance with tools and ML models + * Improve the desktop/window managment setup to be more modern + +# Illustrations + +| Click on the image below to see the video | +|----| +|[![](./2021.07.01_NousNav-Demo-Video.png)](https://drive.google.com/file/d/1cfWCd2-31rGxKGYWsacnr5M6aInSOAty/view?usp=sharing) | +|[![SlicerOnDemand full demo (2 minutes)](https://img.youtube.com/vi/ERm2lPzWH0E/0.jpg)](https://youtu.be/ERm2lPzWH0E "SlicerOnDemand")| + + +# Background and References +* [Review of Cloud efforts from last virtual Project Week](https://projectweek.na-mic.org/PW34_2020_Virtual/Projects/Slicer_in_Cloud_Environments/). +* [Prelimary work from last in-person Project Week](https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/OHIFSlicerBridge/) diff --git a/PW35_2021_Virtual/Projects/SlicerVR/README.md b/PW35_2021_Virtual/Projects/SlicerVR/README.md new file mode 100644 index 000000000..038bbf969 --- /dev/null +++ b/PW35_2021_Virtual/Projects/SlicerVR/README.md @@ -0,0 +1,73 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerVR: interactive UI panel in the VR environment + +## Key Investigators + +- Csaba Pinter (Ebatinca, Pixel Medical) +- Adam Rankin (Robarts Research, Canada) +- Jean-Christophe Fillion-Robin (Kitware) +- Simon Drouin (ETS) + +# Project Description + + + +A key infrastructural element that is still missing from SlicerVR is the ability to show and interact with arbitrary Qt widgets in the VR scene. Up until recently, a blocking issue was an incomplete implementation of the vtkQWidgetWidget class in the VTK version that Slicer was using. Now that Slicer has been migrated to VTK9, the implementation of this in-VR widget continued. + +A blocking issue for this to happen is the ability to build the SlicerVR extension against Slicer built with VTK9. Currently, build fails because the build system does not support building VTK remote modules externally (this issue breaks the build of other important extensions as well, such as SlicerVMTK). + +Once build is fixed and the in-VR widget is added, arbitrary UI elements of Slicer can be used from within VR, thus basically exposing the entire Slicer functionality within the VR environment. + +## Objective + + + +1. Build SlicerVR against VTK9 +1. Add interactive Qt panel to VR scene [SlicerVR#43](https://github.com/KitwareMedical/SlicerVirtualReality/issues/43) +1. Make use of the in-VR widget via laser pointer and VR-optimized widgets + +## Approach and Plan + + + +1. Update SlicerVR CMake files to build with VTK9 + 1. Utilize new VTKExternalModule infrastructure to build vtk openvr rendering + 3. Extract the said module from VTK proper with history (JC) + 4. Change main CMake file to use this instead of the VTK remote module approach (Adam?) + 5. Fix build issues arising from the switch to VTK9 in SlicerVR (Csaba?) +1. Try [vtkQWidgetWidget](https://vtk.org/doc/nightly/html/classvtkQWidgetWidget.html) in SlicerVR, confirm that it now works (Csaba) +1. Explore existing possibilities for using a laser pointer emanating from the controllers to control the Qt-based widget (press, click, drag&drop, etc) (?) +1. Add the already implemented but dormant VR-optimized widgets in the SlicerVR user interface (Csaba) + +## Progress and Next Steps + + + +1. Build SlicerVR against VTK9 :heavy_check_mark: + 1. [KitwareMedical/SlicerVirtualReality#84](https://github.com/KitwareMedical/SlicerVirtualReality/pull/84): Update build system to support building against VTK9 + 1. Created [KitwareMedical/VTKExternalModule](https://github.com/KitwareMedical/VTKExternalModule) for externally building any built-in or remote VTK module outside of the VTK source tree. + 1. [vtk/vtk#8123](https://gitlab.kitware.com/vtk/vtk/-/merge_requests/8123): vtkModule: Do not generate files in source tree when building module externally +1. In-VR UI widgets + 1. Rebased VR widgets branch to the latest master to a [new branch](https://github.com/cpinter/SlicerVirtualReality/tree/virtual-widget-2) + 1. Built the SlicerVR branch succesfully with VTK9 + 1. The vtkQWidgetWidget test still crashes unfortunately +1. Proposed some hooks to enable customization of VR interaction from python code in Slicer. [Pull request here](https://github.com/KitwareMedical/SlicerVirtualReality/pull/83) for reference. + +# Illustrations + +![In-VR user interface](https://spie.org/Images/Graphics/Newsroom/2019articles/Crime-920.jpg) + + + +# Background and References + +1. Commit that broke 3rd party module build [here](https://gitlab.kitware.com/vtk/vtk/-/commit/140f8d8bcd85cedd7ac996c806f984add70bb11d) +2. Discussion on remote modules vs. external modules [here](https://discourse.vtk.org/t/remote-modules-vs-external-modules/2003) +3. How to make downstream VTK modules [here](https://discourse.vtk.org/t/example-of-vtk-module-built-after-vtk-is-built/6099) + 1. Links to example [here](https://gitlab.kitware.com/vtk/vtk/-/tree/master/Examples/Build/vtkMy) +4. [https://github.com/KitwareMedical/VTKExternalModule](https://github.com/KitwareMedical/VTKExternalModule) +5. VR virtual widget branch [here](https://github.com/cpinter/SlicerVirtualReality/tree/virtual-widget) diff --git a/PW35_2021_Virtual/Projects/SlimViewer/README.md b/PW35_2021_Virtual/Projects/SlimViewer/README.md new file mode 100644 index 000000000..ba57652e2 --- /dev/null +++ b/PW35_2021_Virtual/Projects/SlimViewer/README.md @@ -0,0 +1,46 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Integration of Slim Pathology Viewer in federated learning platform + +## Key Investigators + +- Maximilian Fischer (German Cancer Research Center) + + +# Project Description + +Integration of the Pathology Viewer to enable support for DICOM microscopy images in federated learning platform + +## Objective + + + +1. enable support of dicom annotations and labelling for microscopy images + + +## Approach and Plan + + + +1. ... +1. ... +1. ... + +## Progress and Next Steps + + + +1. ... +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW35_2021_Virtual/Projects/SpineSegmentation/README.md b/PW35_2021_Virtual/Projects/SpineSegmentation/README.md new file mode 100644 index 000000000..9dd61e102 --- /dev/null +++ b/PW35_2021_Virtual/Projects/SpineSegmentation/README.md @@ -0,0 +1,94 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Development of Deep Learning Segmentation for Spines with Metastatic Disease + +## Key Investigators + +- Ron Alkalay (Beth Israel Deaconess Medical Center) +- Curtis Lisle (KnowledgeVis,LLC) +- Andres Diaz-Pinto (Kings College Longon) +- Andras Lasso (Queens University) + +## Project Description + +We have labeled CT data sets for lumbar and/or thoracic as well as full spine columns for +patients at baseline. For a good # of patients, we also have 3 and 6m follow-up CT, but these are +not yet labeled. It would be great to get some help/advice regarding how to +speed up the segmentation for the labeling and extraction of volume information from the masks. +The segmented volumes are needed for the analytical and computational modeling pipeline +as part of a collaboration with MIT. Lytic spines present a special segmentation challenge because the interior of +vertibrae have degraded and appear like surrounding tissue in CT scans. + +## Objective + + + +1. Objective A. Setup MONAI-based neural network training on the labeled scans. +1. Objective B. Investigate using MONAILabel as a possibly better way to annotate new scans. +1. Objective C. Train a DNN on the labeled data. Can this model be used in MONAILabel? +1. Objective D. Find a path forward to better annotation using DL-assisted segmentation. + +## Approach and Plan + + + +1. Start with publically available VerSe spine dataset. +1. Evaluate MONAILabel from Andres using model trained on VerSe as a segmentation tool for these lytic spines. How good is segmentation? +1. Consider fine-tuning model in MONAILabel with lytic spines from our dataset +1. Train model on only our lytic spines for comparison +1. Install MONAILabel on GPU hardware at BI to setup for better annotation after project week + +## Progress and Next Steps + + + +1. Received permission to share a few cases with collaborators +1. Acquired GPU hardware. Evaluated installing Linux, WSL, and Conda-on-windows. Settled on Windows +2. Andres Diaz-Pinto used the VerSe spine dataset to create a MONAILabel vertebrae segmentation app +3. Curt was able to install and run MONAILabel locally +4. Andras Lasso helped with a script to fill internal holes in the vertebrae +5. Segmentation Editor performance is improved with the VTK9 version of Slicer and Andras engineering fixes +6. Curt started preparing for ML training of a network using Ron's improved annotations, but this is still in progress. +7. **Next Steps:** Improve segmentations for the remaining annotated spines and use in MONAILabel. +8. **Next Steps:** identify any other collaborators interested to continue working on this project; eventually expanding to include nearby muscle and disks + +### Spine segmentation protocol for training data generation + +This protocol can be used to quickly and accurately segmentat the spine (each vertebra in a separate segment; no internal holes), which can be used as training data for deep learning based segmentation: +- Define a bone threshold using Threshold effect and use that as a mask (do not Apply the threshold). Choose a threshold value that only selects bones, not soft tissues. Cancellous bone inside the vertebrae will be missed, but those holes will be filled in a later step. +- Paint a seed in each vertebra with a different color in a sagittal slice. Paint seed in an axial slice for one of the vertebrae. +- Use "Grow from seeds effect" to segment the entire spine automatically. Adjust seeds as needed, then finalize the segmentation by clicking Apply. +- Fill holes inside the vertebrae by copy-pasting [this script](https://gist.github.com/lassoan/0f45db8bae792ea19ccad36ceefbf52d) into the Python console. + +## Illustrations + + +Here is an example of how the degraded vertebrae look in CT scans and the challenges of segmentation in Slicer. +Note the interior holes because of the heterogeneous nature of the lytic bone. + +![Sample lytic vertebrae](lytic-vertebrae-example.png) + +Because the bone in the spines is degraded from cancer, it is difficult to get a good segmentation. Here is the result of several hours of manual segmentation: + +![Segmentation in Slicer](lytic-vertebrae-in-slicer.png) + +During the project week, we received consulting assistance on segmentation techniques from Andras Lasso and Rudolf Bumm. After Andras' wrote us a post-processing script, the final result of registration is much better. These annotations can be used for neural net training: + +![improved spine annotation](https://data.kitware.com/api/v1/item/60de5f0c2fa25629b9c6ee0c/download?contentDisposition=inline) + +Here is the MONAILabel server and Slicer Module running on Curt's workstation. The app created annotations of vertebrae automatically created by a neural network trained using the VerSe spine dataset. This app works for interactively improving the segmentation through editing (using the deepedit algorithm): + +![MONAILabel vertebrae app](https://data.kitware.com/api/v1/item/60de5f0d2fa25629b9c6ee14/download?contentDisposition=inline) + +Development was started for training a standalone neural network model, also using the MONAI framework. Here is a sample training image showing a portion of the spine vertibrae and the corresponding label going into the neural network for training. This shows why it was critical to achieve solid annotations for the interior portions of the vertebrae: + +![Monai spine training image](https://data.kitware.com/api/v1/item/60de5f0d2fa25629b9c6ee1c/download?contentDisposition=inline) + +## Background and References + + + +[VerSe: A Vertebra Labelling and Segmentation Benchmark](https://www.researchgate.net/publication/338853005_VerSe_A_Vertebrae_Labelling_and_Segmentation_Benchmark). diff --git a/PW35_2021_Virtual/Projects/SpineSegmentation/lytic-vertebrae-example.png b/PW35_2021_Virtual/Projects/SpineSegmentation/lytic-vertebrae-example.png new file mode 100644 index 000000000..8e2d69f3a Binary files /dev/null and b/PW35_2021_Virtual/Projects/SpineSegmentation/lytic-vertebrae-example.png differ diff --git a/PW35_2021_Virtual/Projects/SpineSegmentation/lytic-vertebrae-in-slicer.png b/PW35_2021_Virtual/Projects/SpineSegmentation/lytic-vertebrae-in-slicer.png new file mode 100644 index 000000000..8cd216a4e Binary files /dev/null and b/PW35_2021_Virtual/Projects/SpineSegmentation/lytic-vertebrae-in-slicer.png differ diff --git a/PW35_2021_Virtual/Projects/TMS_Slicer_Module/README.md b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/README.md new file mode 100644 index 000000000..e25a2981e --- /dev/null +++ b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/README.md @@ -0,0 +1,74 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Real-time visualization in transcranial magnetic stimulation (TMS) + +## Key Investigators + +- Loraine Franke (University of Massachusetts Boston) +- Lipeng Ning (BWH & Harvard Medical School) +- Yogesh Rathi (BWH & Harvard Medical School) +- Steve Pieper (Isomics, Inc.) +- Daniel Haehn (University of Massachusetts Boston) + +# Project Description + +Transcranial magnetic stimulation is a nonivasive procedure used for treating depression with magnetic and electric fields to stimulate nerve cells. +A TMS coil is slowly moved over the subject's head suface to target certain areas in the brain. +Our project aims to develop a deep-learning powered software for real-time E-Field prediction and a visualization of TMS within 3D Slicer. + +## Objective + +Real-time visualization of an electric field (E-field) for transcranial magnetic stimulation (TMS) on the brain surface as well as visualizing the E-field along fiber bundles (DTI), and later the possibility to optimize the exact coil position or to target certain fibers. + +## Approach and Plan + +Integrate the visualization process as a new module within the MRML scene architecture: + +- Evaluate different methods to visualize the E-field on the surface mesh (find fastest method which is appropriate for a real-time visualization) +- Rendering of a virtual TMS coil in the 3D Slicer module +- Develop an approach to move/rotate the coil model over the brain surface in 3D Slicer (similar to markups/fiducials) + +## Progress and Next Steps + + +1. Visualize Efield (volume) on brain surface (polydata/mesh) by adjusting the models orientation and applying the vtkProbeFilter within a new Slicer Module. +2. Tested the rendering time of our visualization approach with renderer.GetLastRenderTimeInSeconds() resulted in an average of 0.8 milliseconds. +3. Added a functionality to create fiducials and to move it along the brain model's surface. + +Next steps: +- Replace the fiducial with a TMS coil model. +- Compare with further methods for rendering time (vtkpointlocator, cppyy, manual with optimized storage) +- Apply vector field visualization on tractography data / pick fibers (similar to [DBS Navigation](../DBSNavigation/README.md) ). + +## Illustrations + +Fiducial (yellow sphere) moving along the brain surface with mapped vector field: +![Fiducial (Sphere) moving along brain surface](./fiducial_on_brain_surface.png) +The fiducial in this screenshot will later be replaced by a TMS coil model. + + + +The vector field volume and the brain surface mesh overlapping after applying the module's functionalities: +![Moving vector field](./moving_evec.gif) + + + +Visualization goal from another software we want to implement as Module in 3D Slicer: +![Brain surface and DT](./tmsonbrain.png) + + + + +Visualization process also on tractography for fiber bundle targeting: +![Visualization Process](./visualization_process.png) + +# Background and References + +vtkProbeFilter: https://vtk.org/doc/nightly/html/classvtkProbeFilter.html +Moving fiducials with CPYY: https://gist.github.com/pieper/f9da3e0a73c70981b48d0747132526d5 + +Measure rendering time in 3D Slicer: +1. Getting renderer: https://slicer.readthedocs.io/en/latest/developer_guide/script_repository.html#access-vtk-views-renderers-and-cameras +2. Then applying renderer.GetLastRenderTimeInSeconds() + +- Random note following the project presentations: https://cyclotronresearchcentre.github.io/forward/ might be useful for using diffusion-weighted imaging to compute conductivity tensors in the white matter to make the electromagnetic simulations more accurate. diff --git a/PW35_2021_Virtual/Projects/TMS_Slicer_Module/fiducial_on_brain_surface.png b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/fiducial_on_brain_surface.png new file mode 100644 index 000000000..db0d75a51 Binary files /dev/null and b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/fiducial_on_brain_surface.png differ diff --git a/PW35_2021_Virtual/Projects/TMS_Slicer_Module/moving_evec.gif b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/moving_evec.gif new file mode 100644 index 000000000..b60bdc6dc Binary files /dev/null and b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/moving_evec.gif differ diff --git a/PW35_2021_Virtual/Projects/TMS_Slicer_Module/tmsonbrain.png b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/tmsonbrain.png new file mode 100644 index 000000000..b81e904b5 Binary files /dev/null and b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/tmsonbrain.png differ diff --git a/PW35_2021_Virtual/Projects/TMS_Slicer_Module/visualization_process.png b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/visualization_process.png new file mode 100644 index 000000000..86685022b Binary files /dev/null and b/PW35_2021_Virtual/Projects/TMS_Slicer_Module/visualization_process.png differ diff --git a/PW35_2021_Virtual/Projects/Time-Series Segmentation Module/README.md b/PW35_2021_Virtual/Projects/Time-Series Segmentation Module/README.md new file mode 100644 index 000000000..298d65fc6 --- /dev/null +++ b/PW35_2021_Virtual/Projects/Time-Series Segmentation Module/README.md @@ -0,0 +1,68 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Time-Series Segmentation / Annotation Module + +## Key Investigators + +- Rebecca Hisey (Queen's University) +- Tamas Ungi (Queen's University) +- Andras Lasso (Queen's University) +- Andres Diaz-Pinto +- Tina Kapur + +# Project Description + + +Labelling data for training deep neural networks is a tedious and time-consuming task, especially for segmentation problems. Many of the existing tools +available within Slicer are optimized for working with static 3D volumes such as MRI and CT. Annotation of video-based sequences such as ultrasound and RGB video +have unique challenges that are not seen when working with static volumes. The primary goal of this project is to assess the requirements of researchers working with +time-series or video data to design a Slicer module that will be convenient to use for a wide variety of users. We will mostly focus on the task of creating segmentation +labels, but are open to the discussion of annotation for classification and detection problems as well. + +## Objective + + + +1. Objective A. Establish the requirements, challenges and current workflow of researchers working with time-series or sequence data +2. Objective B. Design a module to be implemented in Slicer that will provide the most benefit and convenience for satisfying the requirements above +4. Objective C. If we have time we would like to begin the implementation of the module + +## Approach and Plan + + + +1. We would like to have discussions with as many people who work with video and time-series data to assess their requirements + and their current workflow for labelling data, along with any specific challenges relating to using currently available tools. +2. Compile a list of features that would be most useful and convenient to satisfy the requirements from objective A. +3. If there is time we will begin to design a user-interface for the module and potentially begin some preliminary implementation. + +## Progress and Next Steps + + +Have had many great discussions this week to assess the needs of various different projects. It seems that we can separate the task of segmenting time-series data into +2 distinct categories: +1. Segmentation of regions within the video frames + - The task that we typically think of when we hear the word segmentation in reference to medical imaging + - Requires tools for defining relatively small regions of interest + - There is potential here for integration of online learning, but should treat the entire time-series as one object, rather than splitting into individual frames +2. Segmentation of the time-series as a whole into separate phases + - More closely resembles a classification problem + - Must be able to accommodate time-series with data obtained from mulitple modalities + - Potential need for automatic segmentation of time-series into phases + - Many helpful suggestions given by Steve Pieper about how to improve my current module for labelling this type of data (e.g. linking the range sliders to the sequence slider, automatic navigation to a phase within the series, use of a single button to add a phase to the segmentation at the current time in the series) + +For both of these tasks I will work to update them so that they can accommodate the use of AI to assist in the annotation process, whether that involves running the network locally on the computer, integrated into slicer, or on a server such as used by MONAI projects. + +# Illustrations + +Current module that I've developed for labelling time-series data for classification purposes: + + +https://user-images.githubusercontent.com/22460517/124283562-0be32400-db1a-11eb-9046-36f435ba8cac.mp4 + + + +# Background and References + + +The source code for this module can be found here: https://github.com/SlicerIGT/aigt/tree/master/DeepLearnLive diff --git a/PW35_2021_Virtual/Projects/TimeSequenceRegistration/README.md b/PW35_2021_Virtual/Projects/TimeSequenceRegistration/README.md new file mode 100644 index 000000000..357cffc1b --- /dev/null +++ b/PW35_2021_Virtual/Projects/TimeSequenceRegistration/README.md @@ -0,0 +1,85 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Time Sequence Registration for Deep Learning + +## Key Investigators + +- Curtis Lisle, Ph.D. (KnowledgeVis,LLC) +- Neha Goyal, (U Mass, Boston) +- Greg Sharp, Ph.D. (MGH) + +## Project Description + +We plan to prepare a Lung RadioTherapy patient cohort for deep learning segmentation and annotation. We will first use Slicer and other NA-MIC +tools to register patients' follow-up scans to their original planning CT scans as a preparatory step for deep learning. if time allows, we will train a deep neural network to predict patient outcomes from the time series data. + +## Objective + + + + +1. Objective A. Align follow-up scans to planning CTs using 3D Slicer, Plastimatch, or other algorithms. +2. Objective B. Gain insight on traditional registration methods vs. emerging deep learning methods +3. Objective C. Develop scripts to run on multiple patients and inspect the results using 3D Slicer. + +## Approach and Plan + + + +1. Select candidate patient scans and registration tools in Slicer +2. Compare registration results from a few algorithms in Slicer (Elastix, Plastimatch, etc.) +4. Investigate Deep Learning based methods for image registration (e.g. VoxelMorph, DeepReg) +6. Determine which approaches are suitable for automated use on a large cohort + +## Progress and Next Steps + + + +1. Acquired anonymous patient dataset with preplanning CT&PET,Planning CT, follow-up CT and PET +1. We solicited recommendations: Elastix and Plastimatch both offer a command line option as well as Slicer integration +1. **Plastimatch:** An interactive session with Greg showed us how to register scans (see below example) +1. **MONAI:** - Thanks to Neha for adapting a MONAI / DeepReg example and training a DNN for registration between patient CTs. This is still a work-in-progress, but it shows promise. Curently, this approach requires more computation time during training than traditional registration methods. However, in certain cases, inferencing on a pretrained registration network is reported to be faster than traditional methods, such as B-spline deformable registration. We didn't test the claim this week. +1. Automation: Curt began developing scripts for automatic registration between the planning CT and the follow-ups for each patient in a cohort +1. We didn't get to training a network on the registered cohort this week, but we have tested all the steps individually. +4. **Next Steps:** - Tweak registration parameters to improve results; Run on 100+ patient cohort; train deep learning network; celebrate with clinicians. + +## Illustrations + + +The Planning CTs have excellent annotations: + +![Planning CT has excellent structure segmentation](https://data.kitware.com/api/v1/item/60d92be32fa25629b980f149/download?contentDisposition=inline) + +But since follow-up scans are different times, there is no registration between them. The CTs are "miles apart". Shown below is the annotations from the planning CT superimposed over a follow-up CT, showing the shift between the different patient scans: + +![No registration between successive scans](https://data.kitware.com/api/v1/item/60d92be52fa25629b980f151/download?contentDisposition=inline) + +After a prelimary registration in Plastimatch, the anatomy annotations are much closer, as shown below. The image shows the original segmentation objects superimposed over a registered follow up scan taken 3 months later. Because of the time between scans, there was actual morphology changes to the anatomy as well. This result was encouraging after trying only a few parameter exploration attempts. Since Plastimatch operations can be scripted, this approach can automate registration for multiple patient scans in a cohort: + +![Images after a preliminary registration](https://data.kitware.com/api/v1/item/60df063c2fa25629b9d1ae28/download?contentDisposition=inline) + +Below is a snapshot of how the segmentation mask for the moving image is growing to match the anatomy and mask in the fixed image. Our deep learning registration results this week don't match as well as using traditional methods, but this is an emerging application area for deep learning that will continue to improve. Thanks, Neha! + +![deep learning registration changes](https://data.kitware.com/api/v1/item/60df14472fa25629b9d34d65/download?contentDisposition=inline) + +We also learned that giving a registration system incorrect parameters can warp an moving image too much. After generating a strangely warped image by mistake, we just gave it some coloring to create art. Here are our project team's two submissions to the "Project Week 35 3D-Slicer Art Competition". Vote for your favorite. Vote by editing this page or vote on Curt's facebook page... + +![slicer art](https://data.kitware.com/api/v1/item/60df033a2fa25629b9d17345/download?contentDisposition=inline) + +Votes for #1: 0 + +Votes for #2: 0 + +## Background and References + +The **Image Data Commons** has datasets with annotations across multiple time points, so this is an available dataset to practice registration techniques. Free Google Cloud credentials are available for experimenting without having to download data for processing. Simply select the cohort through IDC for analysis: +https://imaging.datacommons.cancer.gov/explore/?filters_for_load=%5B%7B%22filters%22:%5B%7B%22id%22:%22120%22,%22values%22:%5B%22qin_prostate_repeatability%22%5D%7D%5D%7D%5D + + +Registration tools have been added to Project-MONAI in the 0.5 release. The MONAI tutorials include a registration example now, which we used as a basis for our experimentation: + +https://github.com/Project-MONAI/tutorials/blob/master/3d_registration/paired_lung_ct.ipynb diff --git a/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/README.md b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/README.md new file mode 100644 index 000000000..a40c51c90 --- /dev/null +++ b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/README.md @@ -0,0 +1,70 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# US-CT Vertebra Registration + +## Key Investigators + +- Houssem Gueziri (Montreal Neurological Institute, Montreal, Canada) +- Tamas Ungi (Queen's University, Kingston, Canada) + +# Project Description + +This project aims at evaluating the feasibility of percutaneous US to CT image registration, on a porcine dataset, for minimally invasive spine surgery. +The goal is to combine the registration method for _open_ surgery implemented in IBIS with the segmentation/bone enhancement method in AIGT. + +## Objective + + + +1. Read/Write US data acquired with IBIS into Slicer. +3. Segment the vertebral surface of US data obtained from porcine cadavers +4. Register segmented images with CT images + +## Approach and Plan + + + +1. Convert the data from IBIS acquisitions to ultrasound sequences +2. Generate ground truth segmentation from CT images +3. Use AIGT to train model for axial image segmentation - [Video tutorial](https://youtu.be/l0BcW8c9CnI) +4. Use segmented data with IBIS registration and evaluate registration + +## Progress and Next Steps + + + +- Align US images with CT using ground truth transform and export data as Sequences readable in Slicer :heavy_check_mark: +- Segment data using pre-trained model:heavy_check_mark: +- Generate ground truth segmentation and train model [TODO] +- Segment data with fine-tuned model [TODO] + +# Illustrations + + + +**Data processing workflow:** + +![Workflow](workflow.png) + +**Generate aligned CT-US data:** + +![US-CT Data](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW35__US_CT_VertebraRegistration__US-CTAlignment.gif) + + + +**Segmentation with pre-trained model:** + +![Segmentation](Segmentation.png) + + + + + + + +# Background and References + + diff --git a/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/Segmentation.png b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/Segmentation.png new file mode 100644 index 000000000..b5b5a39a8 Binary files /dev/null and b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/Segmentation.png differ diff --git a/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/navigation.jpg b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/navigation.jpg new file mode 100644 index 000000000..9b430d242 Binary files /dev/null and b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/navigation.jpg differ diff --git a/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/open.png b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/open.png new file mode 100644 index 000000000..feac28305 Binary files /dev/null and b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/open.png differ diff --git a/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/percutaneous.png b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/percutaneous.png new file mode 100644 index 000000000..110fddbc5 Binary files /dev/null and b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/percutaneous.png differ diff --git a/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/registration.png b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/registration.png new file mode 100644 index 000000000..d9d12d12b Binary files /dev/null and b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/registration.png differ diff --git a/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/workflow.png b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/workflow.png new file mode 100644 index 000000000..3868083e1 Binary files /dev/null and b/PW35_2021_Virtual/Projects/US_CT_VertebraRegistration/workflow.png differ diff --git a/PW35_2021_Virtual/Projects/VRBirthDeliveryTraining/README.md b/PW35_2021_Virtual/Projects/VRBirthDeliveryTraining/README.md new file mode 100644 index 000000000..f52e62dfd --- /dev/null +++ b/PW35_2021_Virtual/Projects/VRBirthDeliveryTraining/README.md @@ -0,0 +1,95 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# VR for Birth Delivery Training + +## Key Investigators + +- Mónica García-Sevilla (Universidad Las Palmas de Gran Canaria, Las Palmas de Gran Canaria, Spain) +- Abián Hernández-Guedes (Universidad Las Palmas de Gran Canaria, Las Palmas de Gran Canaria, Spain) +- Nayra Pumar (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- David García-Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Juan Ruiz Alzola (Universidad Las Palmas de Gran Canaria, Las Palmas de Gran Canaria, Spain) +- Javier Pascau (Universidad Carlos III de Madrid, Madrid, Spain) +- Csaba Pinter (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) + +# Project Description + +The World Health Organization recommends a rate of cesareans inferior than 15%. +However, the actual rates in the US double this value, while the use of obstetrical instruments, +a recommended alternative to cesareans but which requires high skill and experience, has significantly decreased in the latest years. +In this context there is a clear demand for simulators, with special interest in learning the correct use of Kielland’s forceps. + +In 2018, we developed a training software in 3D Slicer for the correct use of forceps. +We used anatomical simulators of the mother and fetus, a forceps 3D printed in non-ferromagnetic material, and an electromagnetic tracking system to track the movements of the forceps relative to the simulators. +Further details can be found [here](https://link.springer.com/chapter/10.1007%2F978-3-030-01201-4_9). + +The goal of this project is to translate this software into a Virtual Reality (VR) application using the SlicerVR extension. This way, only the VR device is required for training. + +## Objective + + +1. Visualize the simulators and forceps models in the VR scene. +2. Interact with the models using the controllers. +3. Select the step of the procedure. +4. Check whether the maneuver for the step is correct or not. +5. Enable a collaborative mode. + + +## Approach and Plan + + + +1. Visualize the simulators and forceps models in the VR scene. :heavy_check_mark: +2. Define a correct starting viewpoint. :heavy_check_mark: +3. Decide how to move the forceps with the VR controllers. :heavy_check_mark: +4. Learn how to access buttons from the controllers. (Already tested although with Simon's version) ❗ +5. Define a way of selecting the step for the procedure (assembly, presentation, initial placement, final placement). A panel could be a good idea. +6. For each step, check whether the placement was correct or not. :heavy_check_mark: (2/6) +7. Connect to the same scene from other device. + +## Progress and Next Steps + + + +1. 3D models of the mother and baby are displayed in the VR glasses with an adequate size. +1. The 3D models are displayed in front of the user when starting the application. If the user changes position, the view can be reset to show the model in front of the user again. +2. Controller models are hidden and substituted by the forceps. The position of the forceps is configured as if the user was grabbing them. +3. The first two steps of the procedure (arrangement and presentation) have been added to the module. Forceps are displayed in green when correct and in red when incorrect. +4. The evaluation of each step is performed in real time. It has to be selected by the user in the module. Buttons for all the steps have been added. +5. When a step is selected, the name of the step is displayed on the scene. 3D models of the text have been created to show the message. + +## To Do: +1. Access controller buttons so the user can change step without removing the headset. A panel widget could also be a good solution. +2. Add the remaining steps. +3. Add the collaborative option. + +# Illustrations + + +## Previous setup (non-VR): +![module_scene_and_panel](https://user-images.githubusercontent.com/17642986/123103298-c34aad00-d42d-11eb-925a-15dd4b7bc4f0.png) +![experts_training](https://user-images.githubusercontent.com/17642986/123102863-6222d980-d42d-11eb-9292-e8731f1d4271.jpg) +![novices_training](https://user-images.githubusercontent.com/17642986/123102867-62bb7000-d42d-11eb-9f8b-f53d87b1000f.jpg) + +## VR solution: +![setup](https://user-images.githubusercontent.com/17642986/124196551-9436da00-dac4-11eb-8441-e573675dc887.png) +### View +![setup](https://user-images.githubusercontent.com/17642986/124282685-04844080-db43-11eb-9f43-bdf9785fdf33.gif) +### Arrangement +![arrangement](https://user-images.githubusercontent.com/17642986/124282699-0948f480-db43-11eb-8386-422fc634504e.gif) +### Presentation +![presentation](https://user-images.githubusercontent.com/17642986/124284397-ddc70980-db44-11eb-861a-e394383d6173.gif) +### Controller buttons interaction +![controllerButtons](https://user-images.githubusercontent.com/17642986/124281394-9428ef80-db41-11eb-969f-c3a82a0d5f54.gif) + +VR video: [https://youtu.be/Q8b7IehEQhE](https://youtu.be/Q8b7IehEQhE) + + +# Background and References + + +* [Publication of non-VR training system](https://link.springer.com/chapter/10.1007%2F978-3-030-01201-4_9) +* [Video of non-VR training system](https://www.youtube.com/watch?v=EEasWbH1jZI) diff --git a/PW35_2021_Virtual/Projects/VRDisplayPluginForPyDBSUsingZspace/README.md b/PW35_2021_Virtual/Projects/VRDisplayPluginForPyDBSUsingZspace/README.md new file mode 100644 index 000000000..c5b6bcabc --- /dev/null +++ b/PW35_2021_Virtual/Projects/VRDisplayPluginForPyDBSUsingZspace/README.md @@ -0,0 +1,114 @@ +Back to [Projects List](../../README.md#ProjectsList) + + +# VR display plugin for PyDBS using a zSpace device + + +## Key Investigators + +- Marine CAMBA (STIM team, CENIR - ICM, France) + +- Sara FERNANDEZ VIDAL (STIM team, CENIR - ICM, France) + +- Sinan HALIYO (Multi Scale Interaction team, ISIR, France) + +# Project Description + + +This project aims at developing a visualization tool that could help neurosurgeons who practice Deep Brain Stimulation. This tool will allow to navigate in the brain of patients (eg parkinsonian patients), in complex scenes that comprise MR images, anatomical atlases, stimulation electrodes, functional maps, etc… It will be used to plan trajectories for surgery, and to train future surgeons, as it is prefered to navigate in a 3D space in order to get a better understanding of the complexity of the human brain, and therefore be more precise in the OR. + +zSpace technology is at the interface of VR and 3D holographic-like environments. Thus, it can make navigating into Slicer's 3D scenes easier, and closer to reality. A module had already been made in Slicer 4.3. Unfortunately, after some changing in the zSpace's API, it was no longer running nor maintained. Earlier this year, Paraview, another VTK based software, launched a new plugin to allow a user to get access to the full potential of the zSpace device. + +Thanks to those achievements, we thought it would be interesting to create a new plugin for Slicer to work on this particular device, and help neurosurgical planification and postoperative navigation. The plugin would include head tracking and a stereoscopic view, to display in 3D, and a ray to interact with the objects using the given stylus. A good start was to try if Slicer could do QuadBuffer stereo by itself, prior to connected it to our zSpace, as QuadBuffer is the technology used by this device to display in 3D. + +We would use VTK 9 as Slicer is planning on using it for its future launches, and Slicer 4.13, the lastest version compatible with this VTK. Indeed, using Slicer actual VR extension is not feasible as the technologies are not close enough. + + + +## Objective + + + + + + + +1. Code a plugin to display PyDBS surgery planification module in QuadBuffer mode. + +2. Same but using zSpace API and stylus. + + + +## Approach and Plan + + + + + + + +1. Create a Qt/VTK based plugin to know how to render in Quadbuffer mode. + +2. Make it work on Slicer 4.13. + +3. Upgrating this plugin so it uses zSpace API rather than Qt/VTK functions (can then do head tracking...). + +4. Adding the stylus following Paraview example. + +5. See how PyDBS surgery plannification is working with it. + + + +## Progress and Next Steps + + + + + + + +1. A small VTK widget which display a sphere in QuadBuffer mode using Qt/VTK functions. + +2. 2 widgets one displayed in 3D QuadBuffer mode, the other in classic 2D, to see where I need to make the changes and how. + +3. Creating a scripted module that displays a 3D view outside the Slicer layout, it displays the scene in stereo (red/blue etc) but not in quadbuffer mode for now. + +4. Next step : change the default type widget of Slicer 4.13 so I can have my module working in quadbuffer mode. + + + +# Illustrations + +Already existing QuadBuffer stereo mode in 3DSlicer 4.8, photo taken by phone so quadbuffered image can be seen as it is supposed to without glasses: +IMG_20210628_112539 + + +Module created and tested before implemeting it to Slicer 4.13 using VTK 9 and QT 4.15, still taken by phone: +IMG_20210628_112539 + + +zSpace device: +Automotive_Student + + +Pre-op module for trajectory planning, using PyDBS: +image + + +Scripted module to display a new 3D view outside the actual layout [1] and in different stereo modes [2]. For now even if the button is named QuadBuffer this mode is not working: + +[1] Capture d%u2019écran du 2021-06-30 15-41-07 +[2] Capture d%u2019écran du 2021-06-30 16-01-33 + + + + + +# Background and References + + + + +* [Paraview support for ZSpace](https://blog.kitware.com/zspace-device-support-coming-to-paraview/) +* [Paraview plugin's repository](https://gitlab.kitware.com/paraview/paraview/-/tree/master/Plugins/ZSpace) +* [Slicer/zSpace implementation 2013](https://fr.slideshare.net/zSpace/pieper-slicer-clinicalzspace20131021) diff --git a/PW35_2021_Virtual/Projects/VirtualCameras/README.md b/PW35_2021_Virtual/Projects/VirtualCameras/README.md new file mode 100644 index 000000000..f0d03cb3a --- /dev/null +++ b/PW35_2021_Virtual/Projects/VirtualCameras/README.md @@ -0,0 +1,51 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Virtual Cameras in Slicer (Viewpoints module) + +## Key Investigators + +- Étienne Léger (Concordia University) +- Tamas Ungi (Queen's University) +- Andras Lasso (Queen's University) + +# Project Description + + + +Extend functionalities of the current Viewpoints module. + +## Objective + + + +1. Add possibility to dissociate virtual camera position from 3D view rendering +1. Add model in 3D view to visualize position of camera relative to the scene + +## Approach and Plan + + + +1. Add camera representation in 3D view (wireframe) +1. Generate rendering from tracked camera and store as image +1. Add option to load camera parameters from file + +## Progress and Next Steps + + + +1. Camera wireframe added +1. ... +1. ... + +# Illustrations + + + +1. Add screenshot + +# Background and References + + diff --git a/PW35_2021_Virtual/Projects/mpReview/README.md b/PW35_2021_Virtual/Projects/mpReview/README.md new file mode 100644 index 000000000..400c27625 --- /dev/null +++ b/PW35_2021_Virtual/Projects/mpReview/README.md @@ -0,0 +1,62 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# mpReview: Development of a streamlined Slicer module for (manual) image annotation + +## Key Investigators + +- Andrey Fedorov (BWH) +- Dora Szasz (U.Chicago) +- Masoom Haider (U.Toronto) +- Aytek Oto (U.Chicago) +- Andras Lasso (Queen's) +- Fiona Fennessy (BWH) +- Christian Herz (CHOP) +- Steve Pieper (Isomics) + +## Project Description + +**WE ARE HIRING - see [job opportunities here](https://spl.harvard.edu/join-us) if interested!** + +In the past we have developed mpReview extension to streamline the manual annotation workflow of multiparametric MRI studies, designed specifically for prostate MRI annotation initially. The extension proved useful over time, and was utilized to support annotation of prostate MRI at BWH and U.Chicago. Over the recent years we were not able to maintain this extension to keep up to date with 3D Slicer upgrades. However, currently there is renewed interest in reviving and perhaps rewriting this extension, as it serves a need not addressed by any other capability in Slicer, or in commercial tools. The goal of this project is to evaluate the current status of the extension, collect the requirement and identify next steps for its development. + +## Objective + + + +1. Document the current capabilities of the extension. +2. Document the annotation workflow requirements and the desired capabilities of the extension. +3. Identify relevant components of 3D Slicer that can be used to improve current implementation. +4. Define the next steps and the effort needed to implement them and interest from various groups of potential users to contribute to the development. + +## Approach and Plan + + + +1. Evaluate current status of the module wrt the preview release of Slicer. +2. Document the workflow and desired features of the annotation module. +3. Identify next steps. +4. Revisit the [Slicer PI-RADS module](https://github.com/SlicerProstate/SlicerPIRADS) (WIP years ago) + +## Progress and Next Steps + + +1. Revised documentation for the current version of the module: https://github.com/SlicerProstate/mpReview/wiki/Documentation +2. Updated test dataset +3. Discussed the current implementation, discussion notes [here](https://docs.google.com/document/d/1f6gXrl-u1mkMPVfLLT4oLHwPS8sZp48ent-qyWPzDMk/edit) +4. Tested with the current version of Slicer - some legacy Editor effects no longer work. +5. Based on feedback from Masoom, there is not much interest in patching existing mpReview, would need to rewrite it from scratch to work with the current Slicer infrastructure. + * Instead of a custom module, maybe improve Slicer itself to improve effiency, like [this DICOM thumbnail experiemnt](https://github.com/commontk/CTK/pull/979) + + +## Illustrations + + + +![mpReview UI](mpReview_screenshot.jpg) + +## Background and References + + diff --git a/PW35_2021_Virtual/Projects/mpReview/mpReview_screenshot.jpg b/PW35_2021_Virtual/Projects/mpReview/mpReview_screenshot.jpg new file mode 100644 index 000000000..354c5a1fa Binary files /dev/null and b/PW35_2021_Virtual/Projects/mpReview/mpReview_screenshot.jpg differ diff --git a/PW35_2021_Virtual/README.md b/PW35_2021_Virtual/README.md new file mode 100644 index 000000000..00ad918e0 --- /dev/null +++ b/PW35_2021_Virtual/README.md @@ -0,0 +1,265 @@ + + +## Welcome to the web page for the 35th Project Week! + +This event was held virtually June 28-July 2, 2021. + +## Numbers and Introduction +- **140** Registered Attendees. **44%** First Timers! +- **23** countries. + - Bimodal Time Zone Distribution (UTC-4 and UTC+1). +- **29** Projects. +- All sessions except for work in project teams will happen on Zoom. Please see google calendar entries for links for both zoom and discord. +- Begin with the good old recipe of in-person Project Weeks. + - Each team delegates a member to present the project in a maximum of 90 seconds using the project page. + - Hope your project page is ready and any image you want to show is up there. There will not be enough time to share your screen, we will simply share your project page on the zoom session. +- Presenters - please stick with the allocated time, if your internet connection has problems during your presentation, we will move to the next speaker and we can accommodate your talk at the, end time permitting. +- Audience - please ask the speaker questions using zoom chat or the discord server chat. +- Work in project teams will happen throughout the week. + - A Discord server has been set up with a voice/video channel and a text channel for every project on the main webpage. + - We will have a walkthrough of the discord server and project channels as the last project presentation today. + - You can use the channels to coordinate the work of your team. + - It is recommended to hold a first meeting of the team on the voice channel of the project after project presentations on Monday. + - Please do everything you can to accommodate all team members in terms of schedule, especially the ones with family constraints living in time zones that are less favored by the schedule of Project Week. +- The program includes optional introductory lectures (8-9:30 Tue-Wed-Thu) and breakout sessions (10-12 Tue-Wed-Thu), all happening on Zoom. + - Tuesday: AI-assisted annotations (lectures), What’s new in Slicer and a QnA session that was very popular last year + - Wednesday: Image Guided Surgery lectures and breakout session on the same topic + - Thursday: Web-based tools lecture, and breakout session on Human Brain Atlases +- We will end the week with project results presentation. A member from each team will present their results in a maximum of 90 seconds minutes using project page as a visual support for the presentation, so please make sure it is up to date with your latest results by Friday morning. + + +### Attendees +- **Step 0**: **REGISTER** [here](https://forms.gle/evnWqMu4dnsx3Mei9) +- **Step 1**: Sign-up on the **[discourse forum](https://discourse.slicer.org/c/community/project-week)** to get updates and ask questions. +- **Step 2** (optional): Join the **Discord** server to help prepare and work on your project: [Invite Link](https://discord.gg/5TC5H2g63e) + + +## Agenda + +
+
+ + + + + + + +[How to add this calendar to your own?](../common/Calendar.md) + + + +## Breakout sessions complementary material +* [What's new in Slicer](https://docs.google.com/presentation/d/19LIMkLDmTmuJoA-5CFNjWNosoAYB4OoyvyUNryurMSE/edit?usp=sharing)(Andras Lasso, Jean-Christophe Fillion-Robin, Sam Horvath, Steve Pieper) +* [MONAI introductory presentation](https://drive.google.com/file/d/1haqTUNLGVeE0V-qBVuuFqIhKXZFpkBAf/view) (Wenqi Li, NVIDIA) +* [MONAI Label introductory presentation](https://docs.google.com/presentation/d/1FYv23AbFPloTKcsTrP75bKI1XStULVlUSI6ZF3VCp-Y/edit?usp=sharing) (Andres Diaz-Pinto, King's College London) +* [DICOM Overview](supplementary-material/DICOM-Overview.pdf) (Steve Pieper) +* [OHIF v3 and a glance into Cornerstone 3D](https://docs.google.com/presentation/d/1KYNjuiI8lT1foQ4P9TGNV0lBhM6H-5KBs0wkYj4JJbk/edit?usp=sharing) (Erik Ziegler, Alireza Sedghi) +* [NCI Imaging Data Commons](https://docs.google.com/presentation/d/1-_oHDbYqArylwF2K2aAy1mD0-C0F9gu-UFvJ8lpDWS4/edit?usp=sharing) (Andrey Fedorov) + +## Projects [(How to add a new project?)](Projects/README.md) +### VR/AR and Rendering +1. [PRISM volume rendering](Projects/PRISM_volume_rendering/README.md) (Simon Drouin, Steve Pieper, Kyle Sunderland, Andrey Titov, Rafael Palomar) +1. [SlicerVR build and in-VR widgets](Projects/SlicerVR/README.md) (Csaba Pinter, Adam Rankin, Jean-Christophe Fillion-Robin) +1. [Virtual cameras](Projects/VirtualCameras/README.md) (Étienne Léger, Tamas Ungi, Andras Lasso) +1. [TMS Visualization in Slicer](Projects/TMS_Slicer_Module/README.md) (Loraine Franke, Lipeng Ning, Yogesh Rathi, Steve Pieper, Raymond Yang, Daniel Haehn) +1. [AR in Slicer](Projects/ARinSlicer/README.md) (Alicia Pose Díez de la Lastra, Javier Pascau, Csaba Pinter) +1. [Interactive Slice Intersections](Projects/InteractiveSliceIntersections/README.md) (David García-Mato, Kyle Sunderland, Csaba Pinter) +1. [VR for Birth Delivery Training](Projects/VRBirthDeliveryTraining/README.md) (Mónica García-Sevilla, David García-Mato, Abián Hernández-Guedes, Juan Ruiz Alzola, Javier Pascau, Nayra Pumar, Csaba Pinter) +1. [VR display plugin for PyDBS using a zSpace device](Projects/VRDisplayPluginForPyDBSUsingZspace/README.md) (Marine Camba, Sara Fernandez Vidal, Sinan Haliyo) + +### IGT +1. [NousNav](Projects/NousNav/README.md) (Alexandra Golby, Sam Horvath, Sarah Frisken, David Allemang, Tina Kapur, Steve Pieper, Jean-Christophe Fillion-Robin, Sonia Pujol) +1. [DBS Navigation](Projects/DBSNavigation/README.md) (Simon Oxenford) +1. [Slicer module for planning MR-guided focal cryoablation of prostate cancer](Projects/ProstateCryoablationPlanning/README.md) (Pedro Moreira) +1. [Slicer-Liver: planning liver resections in 3D Slicer](Projects/Slicer-Liver/README.md) (Rafael Palomar, Gabriella d'Albenzio, Ole Vegard Solberg, Geir Arne Tangen) +1. [ROS2 - 3D Slicer Integration](Projects/ROSMED/README.md) (Junichi Tokuda, Tamas Ungi, Axel Krieger, Simon Leonard, Mark Fuge) +1. [IGT training material for francophone countries](Projects/IGTrain/README.md) (Nayra Pumar, Mohamed El Moctar Septy, Yahya Tfeil, Asmaa Skareb, Marilola Afonso, Juan Ruiz Alzola) +1. [GPU Rigid Registration](Projects/GPURigidRegistration/README.md) (Gelel Rezig, Houssem Eddine Gueziri, Simon Drouin) +1. [Planar Osteotomies Virtual Surgical Planning And Patient-Specific Surgical Guides](Projects/PlanarOsteotomiesVSPAndSurgicalGuides/README.md) (Mauro I. Dominguez, Manjula Herath) +1. [Marklerless Tracking for Low-Cost neuronavigation for TMS](Projects/MarkerlessTrackingWithRGBDCamerasForLowCostNeuronavigation/README.md) (Julie Alvarez (Neurotrauma Center), Gabriel Vargas Grau (Universidad de Santander), Juan Camilo Gamboa (Mc Gill University), Andrés Gamboa (Neurotrauma/Universidad Politécnica de Valencia/)) +1. [IGT Equipment for ReUse](https://docs.google.com/spreadsheets/d/1MNkcZFz4GulkjOL4V5PYLzRwrgUT3rNB_mnKSHBq7aw/edit?usp=sharing) (Tina Kapur, Gabor Fichtinger, +) +1. [OpenIGTLink](Projects/OpenIGTLink/README.md) (Junichi Tokuda) +1. [US-CT Vertebra Registration](Projects/US_CT_VertebraRegistration) (Houssem Gueziri, Tamas Ungi) + +### Deep learning and segmentation +1. [DeepHeart MONAILabel integration](Projects/DeepHeart/README.md) (Matthew Jolley, Christian Herz, Danielle F. Pace, Andras Lasso) +1. [Registration for Deep Learning](Projects/TimeSequenceRegistration/README.md) (Curtis Lisle, Neha Goyal, Greg Sharp) +1. [Integration of PyTorch and Slicer](Projects/PyTorchIntegration/README.md) (Fernando Pérez-García, Andrés Díaz-Pinto, Andras Lasso, Curtis Lisle, Rebecca Hisey, Steve Pieper, Tamas Ungi) +1. [Development of Deep Learning Segmentation for Spines with Metastatic Disease](Projects/SpineSegmentation/README.md) (Ron Alkalay, Curtis Lisle, Andres Diaz-Pinto) +1. [Development of Deep Learning Based Brain Masking](Projects/CNN_Brain_Masking/README.md) (Raymond Yang, Lipeng Ning, Yogesh Rathi, Steve Pieper, Loraine Franke, Daniel Haehn) +1. [Deep Learning for Subcortical Brain Segmentation](Projects/DeepLearningforSubcorticalBrainSegmentation/README.md) (Jarrett Rushmore, Elizabeth Kenneally, Sylvain Bouix, Kyle Sunderland, Nikos Makris) +1. [Time-Series Segmentation Module](Projects/Time-Series%20Segmentation%20Module/README.md)(Rebecca Hisey, Tamas Ungi, Andras Lasso, Andres Diaz-Pinto, Tina Kapur) +1. [MONAI Label](https://github.com/NA-MIC/ProjectWeek/tree/master/PW35_2021_Virtual/Projects/MONAILabel)(Andres Diaz-Pinto, Fernando Pérez-García, Sachidanand Alle, Alvin Ihsani, Vishwesh Nath) + +### Cloud, open data and annotation +1. [SlicerOnDemand](Projects/SlicerOnDemand/README.md) (Steve Pieper, Curt Lisle, Andrey Fedorov, Theodore Aptekarev) +1. [NCI Imaging Data Commons](Projects/NCIImagingDataCommons/README.md) (Andrey Fedorov, Markus Herrmann, Theodore Aptekarev, Steve Pieper, Ron Kikinis) +1. [mpReview: Development of a streamlined Slicer module for (manual) image annotation](Projects/mpReview/README.md) (Andrey Fedorov, Dora Szasz, Masoom Haider, Aytek Oto, Andras Lasso, Fiona Fennessy) +1. [DICOM-SR: Extending DICOM-SR support in dcmjs and adding test cases](Projects/DICOM-SR/README.md) (Emel Alkim, Steve Pieper, Andrey Fedorov) +1. [Slicer for Microscopy Data](Projects/SlicerForMicroscopyData/README.md) (Sindhura Thirumal, Steve Pieper, Tina Kapur) +1. [Discourse Meet and Greet](https://discord.com/invite/5TC5H2g63e)(Simon Drouin) + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. [Register here](https://forms.gle/evnWqMu4dnsx3Mei9). + +List of registered participants so far (names will be added here after processing registrations): + +1. Mónica García Sevilla , Universidad de Las Palmas de Gran Canaria , Gran Canaria , Spain +1. ZhenXiao Yu , University Of Western Ontario , Ontario , Canada +1. Shreyas Chandra Sekhar , WPI , CA , USA +1. Tina Kapur , Brigham and Women's Hospital and Harvard Medical School , MA , United States +1. Sam Horvath , Kitware , North Carolina , United States +1. Steve Pieper , Isomics, Inc. , MA , US +1. Simon Oxenford , Charite Berlin , Berlin , Germany +1. Theodore Aptekarev , None , Moscow/Tel Aviv , Israel/Russia +1. Samuelle St-Onge , École de Technologie Supérieure , Montreal , Canada +1. Csaba Pinter , Ebatinca / Pixel Medical , Canarias , Spain +1. Miguel Xochicale , King's College London , London , UK +1. Thibault Pelletier , Kitware , Rhône-Alpes , France +1. Saima Safdar , UWA , Western australia , Australia +1. Juan Ruiz-Alzola , University of Las Palmas de Gran Canaria , Canarias , Spain +1. David García Mato , Ebatinca S.L. , Las Palmas , Spain +1. Mario Banfoldy , Banfoldy and Partners , SP , Brasilien +1. Simon Drouin , École de Technologie Supérieure , Montreal , Canada +1. Ron Kikinis , Harvard Medical School , Boston , United States +1. Herbert Shin , University of Western Ontario , , Canada +1. Ahmedou Moulaye IDRISS , Faculty of Medicine / University of Nouakchott Al Aasriya , Nashville , Mauritania +1. Ron Alkalay , Beth Israel Deaconess Medical Center , Bosotn , US +1. Alicia Pose Díez de la Lastra , Universidad Carlos III de Madrid , Leganés , España +1. Chenglin Zhu , Cornell University , Ithaca , Study in US, live in China +1. Jarrett Rushmore , BU/BWH/MGH , Boston , USA +1. Walia Farzana , Old Dominion University , Norfolk , United States of America +1. Mamadou Samba Camara , University Cheikh Anta Diop of Dakar , Dakar , Senegal +1. Fernando Pérez-García , UCL & King's College London , , United Kingdom +1. Curtis Lisle , KnowledgeVis, LLC , Altamonte Springs , United States +1. Tamas Ungi , Queen's University , Kingston , Canada +1. Andras Lasso , PerkLab, Queen's University , Kingston , Canada +1. Junichi Tokuda , Brigham and Women's Hospital , Boston , United States +1. Maximilian Fischer , German Cancer Research Center , , Germany +1. Marilola Afonso , Universidad de Las Palmas de Gran Canaria , Las Palmas de Gran Canaria , Spain +1. Étienne Léger , Concordia University , Montréal , Canada +1. Idafen Santana-Perez , University of Las Palmas de Gran Canaria , Las Palmas de Gran Canaria , Spain +1. Loraine Franke , University of Massachusetts Boston , Boston , United States +1. Varun Kumar Agarwal , Bareilly International University , Bareilly , India +1. Christian Herz , Children's Hospital of Philadelphia , , United States +1. Mohamed El Moctar SEPTY , Medical School of Nouakchott- UNA , Nouakchott , Mauritania +1. TFeil Yahya , Faculty of Medicine of University of Nouakchott Al Aasriya , Nouakchott , Mauritania +1. Joshua Bierbrier , McGill University , Montreal , Canada +1. Leah Groves , Western University , London , Canada +1. Alfredo Morales Pinzon , BWH , Boston , United States +1. Sylvain Bouix , BWH , Boston , USA +1. Ahmedou Moulaye IDRISS , Faculty of Medicine - University Nouakchott Al Aasriya , Nouakchott , Mauritania +1. Sonia Pujol , Brigham and Women's Hospital, Harvard Medical School , Boston , USA +1. Raymond Yang , University of Massachusetts Boston , Edison , United States of America +1. Sarah Frisken , Brigham and Women's Hospital , Boston , USA +1. Pedro Moreira , Brigham and Women's Hospital , Boston , USA +1. Rebecca Hisey , Queen's University , Kingston , Canada +1. Andrey Titov , École de technologie supérieure , Saint-Bruno-de-Montarville , Canada +1. Li Zhenzhu , HuaMei Hospital, University of Chinese Academy of Sciences , NingBo , China +1. Ed Yeterian , Colby College , Pineville , USA +1. Renzo Phellan Aro , McGill University , Montreal , Canada +1. Fahd Derkaoui Hassani , Cheikh Zaid international University Hospital / UIASS , Rabat , Morocco +1. Elizabeth Kenneally , Tufts University , Somerville , United States +1. Lidia Al-Zogbi , Johns Hopkins University , Baltimore , United States +1. Andrey Fedorov , Brigham and Women's Hospital , Cambridge , United States +1. Jayender Jagadeesan , Brigham and Women's Hospital , Boston , US +1. Gelel Rezig , Ecole de Technologie Supérieure , Montréal , Canada +1. Souleymane Diao , Cheikh Anta Diop University , Dakar , Sénégal +1. Rafael Palomar , Oslo University Hospital / NTNU , Oslo , Norway +1. Gabriella d' Albenzio , Oslo University Hospital , Oslo , Norway +1. Ole Vegard Solberg , SINTEF , Trondheim , Norway +1. Mauro I. Dominguez , M3Dical , Hughes , Argentina +1. Adam Rankin , Robarts Research Institute , London , Canada +1. Javier Pascau , Universidad Carlos III de Madrid , Madrid , Spain +1. Yoga Balagurunathan , Moffitt Cancer Center , Tampa , USA +1. Zhouping Wei , Moffitt Cancer Center , Tampa , US +1. Abián Hernández , Universidad de Las Palmas de Gran Canaria (ULPGC) , Las Palmas de Gran Canaria , Spain +1. Andinet Enquobahrie , Kitware , CARY , United States +1. Andrés Gamboa , Neurotrauma Center/Universidad Politécnica de Valencia , Bucaramanga/Valencia , Colombia/España +1. Marine CAMBA , CENIR, Paris Brain Institute , Paris , France +1. Teodora Szasz , The University of Chicago , Chicago , United States +1. Geir Arne Tangen , SINTEF , Trondheim , Norway +1. John Witt , Georgetown University, CHOP , Philadelphia , United States +1. Andres Diaz-Pinto , KCL , London , United Kingdom +1. Masoom Haider , University of Toronto , Toronto , Canada +1. Neha Goyal , University of Massachusetts Boston , Boston , United States +1. Batuhan Gundogdu , University of Chicago , Chicago , United States +1. Caio A Neves , University of Brasilia , Brasilia , Brazil +1. Laleh Seyyed-Kalantari , Mount Sinai health , Toronto , Canada +1. Nayra Pumar , EBATINCA , Las Palmas de Gran Canaria , Spain +1. Emel Alkim , Stanford University , MOUNTAIN VIEW , United States +1. Jean-Christophe Fillion-Robin , Kitware , Carrboro , USA +1. Lucas Gandel , Kitware , Villeurbanne , France +1. Sara Fernandez Vidal , ICM , Paris , France +1. Daniel Haehn , University of Massachusetts Boston , Boston , United States +1. Alberto Santamaria-Pang , Microsoft , Redmond , United States +1. Jameson Merkow , Microsoft , San Diego , USA +1. Risto Rangel , Microsoft , Redmond , United States +1. David Allemang , Kitware, Inc. , Carrboro , USA +1. Rudolf Bumm , Cantonal Hospital Graubünden, Department of Surgery , Chur , Switzerland +1. Gordon Harris , Massachusetts General Hospital , Boston , United States +1. Amber Wood-Bailey , University of Liverpool , Liverpool , United Kingdom +1. Lucia Cevidanes , University of Michigamn , Ann Arbor , United States +1. Ernest Namdar , University of Toronto , Toronto , Canada +1. Belkis Abufaur , The Cyprus Institute , , Turkey +1. Niral , Johns Hopkins University , Baltimore , USA +1. Rohan Vijayan , Johns Hopkins University , Baltimore , United States of America +1. Miguel Xochicale , King's College London , London , United Kingdom +1. Nayra Pumar , Ebatinca , Las Palmas de Gran Canaria , Spain +1. Izabel Rubira-Bullen , Bauru Dental School - University Sao Paulo , Bauru - Sao Paulo , Brazil +1. Paolo Zaffino , Magna Graecia University of Catanzaro , Lamezia Terme , Italy +1. Nadya Shusharina , Massachusetts General Hospital , Boston , United States +1. Greg Sharp , Massachusetts General Hospital , Boston , USA +1. Nick Jowkar , Brigham and Women's Hospital , Boston , USA +1. leo zekelman , Harvard Medical School , Boston , USA +1. Parikshit Juvekar , Brigham & Women's Hospital , Boston , USA +1. Axel Krieger , Johns Hopkins University , Baltimore , United States +1. Abby Recko , Colby College , Waterville , USA +1. YuhJong Liu , University of Pennsylvania , Philadelphia , US +1. Houssem Gueziri , Montreal Neurological Institute , Montreal , Canada +1. Kyle Sunderland , Queen's University , Kingston , Canada +1. Manjula Herath , Malmo University , Malmö , Sweden +1. Badiaa Ait Ahmed , Abdelmalek Essaâdi University , Tétouan , Morocco +1. Lipeng Ning , Brigham and Women's Hospital , Boston , USA +1. Sindhura Thirumal , Queen's University , Kingston , Canada +1. Swajan Paul , McGill University , Montreal , Canada +1. Jesse Thompson , University of Hawaii John A Burns School of Medicine , Honolulu , Hawaii +1. Michael Halle , Brigham and Women's Hospital , Boston , MA +1. Chenglin Zhu , Cornell University , Ithaca , USA/China +1. Onder Erin , Johns Hopkins University , Baltimore , United States +1. Eberto Benjumea , Universidad Tecnológica de Bolívar , Cartagena , Colombia +1. Clare Tempany , BWH , Boston , USA +1. Nicholas Fordham , Boston University School of Medicine , Boston , United States +1. Dr. Gerard Rushingabigwi , University of Rwanda , , Rwanda +1. Joanna James , Beth Israel Deaconess Center , Boston , USA +1. Mick Ganza , University of Rwanda, CEBE , Kigali , Rwanda +1. Xiaolong Liu , Johns Hopkins University , Baltimore , United States +1. Mushimiyimana Sophie , University of Rwanda , , Rwanda +1. UWIZEYIMANA MEDIATRICE , UR/NYARUGENGE CAMPUS , KIGALI , RWANDA +1. Erik Ziegler , Radical Imaging / Open Health Imaging Foundation , , France +1. Alireza Sedghi , Radical Imaging / Open Health Imaging Foundation , Toronto , Canada +1. Laleh Seyyed-Kalantari , Sinai Health , Thornhill , Canada +1. FELIX HARERIMANA , UNIVERSITY OF RWANDA , RWAMAGAN , RWANDA +1. Xavier Riobé , Certis Therapeutics , Bordeaux , France +1. Lucas Ewing , Harvey Mudd College , Boston , US +1. Naghmeh Ansari , Concordia University , Montreal , Canada +1. Randy Gollub , MGH , Lexington , USA + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). + +## Logistics +- **Dates:** June 28-July 2, 2021. +- **Location:** THE INTERNET diff --git a/PW35_2021_Virtual/supplementary-material/DICOM-Overview.pdf b/PW35_2021_Virtual/supplementary-material/DICOM-Overview.pdf new file mode 100644 index 000000000..9144064cb Binary files /dev/null and b/PW35_2021_Virtual/supplementary-material/DICOM-Overview.pdf differ diff --git a/PW36_2022_Virtual/LocalParticipation.md b/PW36_2022_Virtual/LocalParticipation.md new file mode 100644 index 000000000..bc5f4d458 --- /dev/null +++ b/PW36_2022_Virtual/LocalParticipation.md @@ -0,0 +1,5 @@ +The in-person component of the project week has been cancelled due to the high number of COVID-19 cases. + +We are sorry for the inconvenience and the unfruitful wait. We hope can meet in person at the next project week! + +[Back to main event](https://projectweek.na-mic.org/PW36_2022_Virtual/) diff --git a/PW36_2022_Virtual/Projects/ALICBCT/README.md b/PW36_2022_Virtual/Projects/ALICBCT/README.md new file mode 100644 index 000000000..007d79284 --- /dev/null +++ b/PW36_2022_Virtual/Projects/ALICBCT/README.md @@ -0,0 +1,92 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automatic Landmark Identification in 3D Cone-Beam Computed Tomography scans + +## Key Investigators + +- Maxime Gillot +- Baptiste Baquero +- Juan Carlos Prieto +- Hina Shah + +# Project Description + +We propose a novel approach that reformulates anatomical landmark detection as a classification problem through a virtual agent placed +inside a 3D Cone-Beam Computed Tomography (CBCT) scan. This agent is trained to +navigate in a multi-scale volumetric space to reach the estimated landmark position. The +agent movements decision relies on a combination of Densely Connected Convolutional +Networks (DCCN) and fully connected layers. Our method achieved high accuracy with an average of +less than a 1.3mm error on the landmarks position without failures. + + +## Objective + + + +The goal is to have a model that automatically finds accurate landmarks in CBCT scans. + + + +## Approach and Plan + + +A virtual agent is placed inside a 3D CBCT scan. This agent is trained to +navigate in a multi-scale volumetric space to reach the estimated landmark position. The decision making is processed through a deep neural network. + +## Progress and Next Steps + + + +Done : +1. Prepare the data to be used for training and prediction +2. Train the model with a set of 6 landmarks and 60 CBCTs +3. Test the accuracy of the prediction on new scans + +Next: +1. Train the model on new landmarks and new CBCTs set +2. Create a slicer module that can be used to predict the landmark on various types of file +3. Optimize the training method to make it accessible for clinicians to train on their own dataset + +# Illustrations + + + +Selection of 6 landmark to test the method +![LM_SELECTION](https://user-images.githubusercontent.com/46842010/148439491-f2dd1d7c-65f3-44dc-9590-8c12a143b3ad.png) + +Environment to search the agent +![Environment used for the landmark search](https://user-images.githubusercontent.com/46842010/148282250-a2be2edf-e8b8-4d4e-bc16-c71fd0ea9d38.png) + +Architecture of the agent +![Agent used to find the landmark](https://user-images.githubusercontent.com/46842010/148282323-a423f5a3-1ecf-4cff-b824-e6073c835163.png) + +The 3 steps to search the landmark +![Search_3Steps_labeled](https://user-images.githubusercontent.com/46842010/148439759-e7db327a-f9a4-4d45-93b9-c566f19137ba.png) + +Results : (error in mm) + +Screen Shot 2022-01-21 at 10 50 56 AM + + +# Project week results + +During this project week I learned the basics on how to develop a slicer module. +I spent this week on creating a first sketch of a future module that will be used to launch the landmark prediction. +For now, it allows the user to browse folders where the AI models are located and create a menu where the clinician can choose which landmark to predict. +Our prediction method can be trained with any type of 3D images. This module must be user friendly and flexible so any clinician can easealy train and predict new landmarks. + +Browser to load the trained models +Screen Shot 2022-01-20 at 11 47 24 PM + +Landmarks menu generated after reading the model folder + + + +![2022-01-21 11-12-15](https://user-images.githubusercontent.com/46842010/150562291-1e280a3f-69a4-41e0-9927-f1def0cf9cea.gif) + + + + diff --git a/PW36_2022_Virtual/Projects/ALIDDM/README.md b/PW36_2022_Virtual/Projects/ALIDDM/README.md new file mode 100644 index 000000000..25feb8778 --- /dev/null +++ b/PW36_2022_Virtual/Projects/ALIDDM/README.md @@ -0,0 +1,71 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# ALIIOS - Automatic Landmarks Identification for Intra OralScans + + +## Key Investigators + Baptiste Baquero + Maxime Gillot + Lucia Cevidanes + Juan Carlos Prieto + + + +# Project Description + + +The intraoral scanner is a new tool for clinicians that allows new perspectives of development in the dental field and more particularly in orthodontics. +For clinicians, it is important to quantitatively evaluate and compare their results on a large number of data. They need information about distance and +angle from the 3D coordinates of the dental landmarks. 42 dental landmarks are digitized manually for each patient by orthodontists and clinicians, but manual +processing is really problematic because it's time consuming and accuracy errors can regularly be found. Therefore, proposing a robust, fast and accurate method +to automatically find landmark can assist clinicians in those important but time consuming tasks. In this article, we will explore techniques to automate the +search for these landmarks from a 3D digital dental model and the 3D landmark coordinates. We are going to work on deep learning techniques to allows models to +determine by itself image features to better capture the complex anatomical variation and find the perfect landmark position on the teeth. + +## Objective + + +The goal is to have a model that automatically finds accurate landmarks on the digital dental model. + +## Approach and Plan + + + +1. Develop a deep reinforcement learning (RL) framework using the dental models in an environment. +2. Develop an algorithm that allows virtual agents to navigate the environment +3. The reward function for the agent consists on placing the landmark on the correct location + +## Progress and Next Steps + + +Previous work : +1. The camera is the agent. We had one agent for one landmark. For each camera, we initialized the focal point to the center of the tooth. The output of the +neural network returns the movement of the camera and movement of focal point. We tried this method for just one tooth, and for all the jaw. +2. We tried to create a sphere of cameras and the agent is the center of the sphere. The output of the neural network is just the movement of my agent. We tried with different parameters for exemple with random rotations of the 3D model or we randomly or not the initial point of my agent. + +Work to continue : +3. Develop an algorithm in the pytorch3D framework that uses as input the surfaces and moves the virtual agent in the environment using Reinforcement Learning. + +# Illustrations + + + +![Screenshot 1](presentaion_1.jpeg) +![Searching for landmark](presentation.jpeg) + +# Background and References + + + +# Progress/Results +I began a new method for my project (automatic landmark identification on digital dental model) +The previous method was based on the movement of one agent in the 3D space to reach the perfect postion of the landmark on the tooth, +but this method was not really efficient and precise. +We decided to work on an other method based on segmentation with PyTorch. +The objective is to have different 2D views of the jaw in input of the model and in output the same model of the jaw with a segmentated area in the region of the landmark. +With this segmentation we’ll be able to recover the coordinates of the points in this area and then the position of the landmark. + +# Illustrations +Picture1 + +![Pasted Graphic 1](https://user-images.githubusercontent.com/83285614/150447011-cc7a8b5e-5032-40c6-9024-a9e123599ad4.jpeg) diff --git a/PW36_2022_Virtual/Projects/ALIDDM/presentaion_1.jpeg b/PW36_2022_Virtual/Projects/ALIDDM/presentaion_1.jpeg new file mode 100644 index 000000000..8a98ccde1 Binary files /dev/null and b/PW36_2022_Virtual/Projects/ALIDDM/presentaion_1.jpeg differ diff --git a/PW36_2022_Virtual/Projects/ALIDDM/presentation.jpeg b/PW36_2022_Virtual/Projects/ALIDDM/presentation.jpeg new file mode 100644 index 000000000..d0d598cda Binary files /dev/null and b/PW36_2022_Virtual/Projects/ALIDDM/presentation.jpeg differ diff --git a/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_HoloLens2_OrthopedicOncologicalSurgery.png b/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_HoloLens2_OrthopedicOncologicalSurgery.png new file mode 100644 index 000000000..c50d20d32 Binary files /dev/null and b/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_HoloLens2_OrthopedicOncologicalSurgery.png differ diff --git a/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_CraniosynostosisSurgery.png b/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_CraniosynostosisSurgery.png new file mode 100644 index 000000000..0932bc810 Binary files /dev/null and b/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_CraniosynostosisSurgery.png differ diff --git a/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_NeedleInsertion.png b/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_NeedleInsertion.png new file mode 100644 index 000000000..f9e8a3e85 Binary files /dev/null and b/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_NeedleInsertion.png differ diff --git a/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_PatientCommunication.png b/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_PatientCommunication.png new file mode 100644 index 000000000..da1b76c7a Binary files /dev/null and b/PW36_2022_Virtual/Projects/AR_in_Slicer/Figure_Smartphone_PatientCommunication.png differ diff --git a/PW36_2022_Virtual/Projects/AR_in_Slicer/README.md b/PW36_2022_Virtual/Projects/AR_in_Slicer/README.md new file mode 100644 index 000000000..3fa186447 --- /dev/null +++ b/PW36_2022_Virtual/Projects/AR_in_Slicer/README.md @@ -0,0 +1,87 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# AR in Slicer + +## Key Investigators + +- Alicia Pose Díez de la Lastra (Universidad Carlos III de Madrid, Madrid, Spain) +- Javier Pascau (Universidad Carlos III de Madrid, Madrid, Spain) +- Rafael Moreta Martínez (Universidad Carlos III de Madrid, Madrid, Spain) +- Gabor Fichtinger (PerkLab, Queen's University , Kingston , Canada) +- Andras Lasso (PerkLab, Queen's University , Kingston , Canada) +- Adam Rankin (Robarts Research Institute / Western University, Canada) +- Csaba Pinter (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Lucas Gandel (Kitware, France) +- Jean-Christophe Fillion-Robin (Kitware, USA) +- Simon Drouin (École de Technologie Supérieure , Montreal , Canada) + + + + +# Project Description + +Augmented Reality has increased its adoption in many areas with exciting benefits. Universidad Carlos III de Madrid (Madrid, Spain) has already worked in several medical projects +based on AR (see their progress in https://biig-igt.uc3m.es/augmented-reality/). On these studies, they usually export information from Slicer to an alternative software (Unity). + +The ultimate goal of this project is to develop a new 3D Slicer module that will allow to use augmented reality directly in 3D Slicer. With it, it will be possible +to centralize the working process, at the time of benefiting from all Slicer tools. + +Ebatinca S.L. in Las Palmas de Gran Canaria (Spain), has already started this project in collaboration with Kitware (France) and Universidad Carlos III de Madrid (Spain) and +they all are currently developing OpenXR. + +## Objective + + + +1. Create OpenXR, a code based on the currently existing OpenVR so that the new mixed reality version is compatible with VTK. + + +Some links of interest: +1. [Writing a Holographic Remoting remote app using the OpenXR API](https://docs.microsoft.com/en-us/windows/mixed-reality/develop/platform-capabilities-and-apis/holographic-remoting-create-remote-openxr) +2. [Slicer Documentation on Augmented Reality and Virtual Reality support](https://www.slicer.org/wiki/Documentation/Labs/Augmented_Reality_and_Virtual_Reality_support#Current_approaches) + +## Approach and Plan + +1. Develop a new Slicer extension that streams AR directly to HoloLens 2. + +## Progress and Next Steps + +1. OpenXR in VTK: + +The WIP branch supporting Holographic remoting to stream VTK rendering inside the Hololens has been submitted [here](https://gitlab.kitware.com/vtk/vtk/-/merge_requests/8101). + +2. Our [github](https://github.com/Slicer/Slicer/pull/5978) latest upload +3. We confirmed the prototype developed by Lucas Gandel by streaming VTK to the HoloLens 2 via Holographic Remoting. + + + +# Illustrations + + +Here below you can find some AR implementations in health by Universidad Carlos III de Madrid (Madrid, Spain) in the past years: + +HoloLens 2 in Orthopedic Oncological Surgeries: + +![HoloLens 2 in Orthopedic Oncological Surgeries](Figure_HoloLens2_OrthopedicOncologicalSurgery.png) + +Smartphone app to communicate with the patient and help him/her understand his/her condition: + +![Smartphone app to communicate with the patient and help him/her understand his/her condition](Figure_Smartphone_PatientCommunication.png) + +Real-time guidance during Open Cranial Vault Remodeling using smartphone: + +![Smartphone app to guide open cranial vault remodeling](Figure_Smartphone_CraniosynostosisSurgery.png) + +Needle Insertion Guidance for Sacral Nerve Stimulation using smartphone: + +![Smartphone app to guide needle insertion for sacral nerve stimulation](Figure_Smartphone_NeedleInsertion.png) + + + + +# Background and References + + diff --git a/PW36_2022_Virtual/Projects/AnnotationMR-US/README.md b/PW36_2022_Virtual/Projects/AnnotationMR-US/README.md new file mode 100644 index 000000000..404f94a67 --- /dev/null +++ b/PW36_2022_Virtual/Projects/AnnotationMR-US/README.md @@ -0,0 +1,98 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Annotation of Neurosurgical MR and Ultrasound Images with Corresponding Landmarks + +## Key Investigators + +- Fryderyk Kögl (BWH, TUM) +- Harneet Cheema (BWH, UOttawa) +- Tina Kapur (BWH) +- Simon Drouin (ETS) +- Andrey Titov (ETS) +- Steve Pieper (Isomics) +- Tamas Ungi (Queen's University) +- Sandy Wells (BWH) + +# Project Description + + +Corresponding landmarks between MR and ultrasound images acquired during neurosurgery are valuable for **(a)** +validation of registration algorithms and **(b)** training supervised registration algorithms **(c)** initializing a +registration. In this project we aim to create a tool that makes the process of finding those landmarks easier. + +## Objective + + + +1. Objective A. Create a UI that provides new functionality and gathers existing functionality in one place to +facilitate landmarking +2. Objective B. Investigate the rendering infrastructure that would facilitate the adjustment of landmark position in +the 3D view of Slicer + +## Approach and Plan + + + +1. We use an iterative process for creating the UI - the user(s) give feedback to the developer(s) who then continuously +update(s) the UI + + +## Progress and Next Steps + + + +**Progress** +1. The extension is ready. It can be found +[here](https://github.com/koegl/mthesis-slicerLandmarkingView) on the main branch. A screenshot can be seen +below in **Illustrations**. For more details refer to the +[readme](https://github.com/koegl/mthesis-slicerLandmarkingView#readme). +2. A lot of bug fixes +3. More intuitive control of active views +4. More fine-grained control of viewing options +5. Automatically join corresponding landmarks with curves to visualise brain shift (also sanity check - the curves should be more or less smooth) + +**Next Steps** +1. Fulfill all formal requirements for a pull request +2. Search for bugs/corner cases +3. Push to the ExtensionIndex + +**Next Steps (outside the scope of this project week)** +1. Add volume rendering +2. Automatically detect landmarks (e.g. 3D-SIFT features) and manually choose the best ones + +# Illustrations + +**Current state of the extension** +![Screenshot of the current state of the extension](https://github.com/koegl/SlicerMRUSLandmarking/raw/main/misc/GUIpreview.png) + +**Landmark flow** + +Landmark flow + +**Example landmarks** + +L1-MR1 +
+L2-US1 +
+L3-US2 +
+L4-US3 +
+L5-MR2 + + + +# Background and References + + + +1. [Current version of the extension](https://github.com/koeglfryderyk/mthesis-slicerLandmarkingView) +2. [Mini dataset based on RESECT[1] to use for testing the extension](https://www.dropbox.com/sh/gabm0rqdh8kttj6/AADJfwfJnduJG4GJ92tygPufa?dl=0) + +[1] Xiao, Yiming, et al. "RE troSpective Evaluation of Cerebral Tumors (RESECT): A clinical database of pre‐operative +MRI and intra‐operative ultrasound in low‐grade glioma surgeries." Medical physics 44.7 (2017): 3875-3882. diff --git a/PW36_2022_Virtual/Projects/AnnotationMR-US/misc/extension_screenshot.png b/PW36_2022_Virtual/Projects/AnnotationMR-US/misc/extension_screenshot.png new file mode 100644 index 000000000..01053a457 Binary files /dev/null and b/PW36_2022_Virtual/Projects/AnnotationMR-US/misc/extension_screenshot.png differ diff --git a/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/Explaining_Teeth_Numbers_EU.png b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/Explaining_Teeth_Numbers_EU.png new file mode 100644 index 000000000..74110b076 Binary files /dev/null and b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/Explaining_Teeth_Numbers_EU.png differ diff --git a/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/README.md b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/README.md new file mode 100644 index 000000000..a9e856b26 --- /dev/null +++ b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/README.md @@ -0,0 +1,84 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automatic Segmentation of Teeth and Alveolar bone using MONAI Label + +## Key Investigators + +- Daniel Palkovics (Semmelweis University, Budapest Hungary) +- Csaba Pinter (EBATINCA, Las Palmas de Gran Canaria, Spain) +- Andrés Diaz-Pinto (King's College, NVidia) + +# Project Description + + + +A three-dimensional visualization of dento-alveolar structures can enhance the surgical planning process, however currently there are no reliable fully automated segmentation methods available to acquire realistic 3D virtual models of teeth and alveolar bone. A time consuming semi-automatic method has previously been utilized for diagnostic purposes and surgical planning of regenerative-reconstructive surgical procedures in periodontology and oral surgery. + +## Objective + + + +The aim of this project is to develop an automatic method utilizing MONAI Label to speed up segmentation process of dento-alveolar structures on cone-beam computed tomography datasets. + +1. Objective A. To develop a fast and relible segmentation method that is capable of the separate 3D reconstuction of teeth and alveolar bone on CBCT datasets of periodontally involved patients + +## Approach and Plan + + + +1. Try to create MONAILabel app for segmenting said structures +2. Consult with the experts about the details + +## Progress and Next Steps + + + +1. Through varios conversations with especially Andrés (special thanks to him), we learned the following that will be useful for this project: + * It is possible to segment the teeth as one structure or per-tooth as well. In both cases we will need to use MultiLabelDeepEdit due to the presence of the alveolar bone structure + * It is possible to set up multi-stage inference in which the first stage can be preprocessing such as automatic cropping or removal of artifacts such as implants or bridges + * For initial experimentation a typical desktop GPU with 8GB can be used, however to achieve a well performing model it is recommended to have a professional one with >16GB memory + * A feasible option for this is to use an AWS instance + * This problem is different from the majority of segmentation tasks because missing structures are very commonplace (i.e. missing teeth especially for a patient who needs implant planning) + * The issue of the missing teeth needs to be carefully handled + * Use consistent numbering (see image below) + * Make sure the same teeth have the same label # (i.e. skip those that are not present) + * The way Slicer exports the segmentations does not fully support this use case due to the many missing structures +1. Plan to improve segmentation export in Slicer + * Add option both in segmentation export widget and segmentation logic to use the current terminology context for generating the same label for each structure (see image below) + * Many details to figure out: How to handle modifiers and anatomic regions, What happens when there are more than 255 usable entries, etc. We will start simple. + * Create a custom terminology context for this use case (alveolar bone + the 32 teeth, see image below) + * Update the existing datasets to have the correct terminology of each structure and re-export the segmentations + * Create simple module for single-click batch export of the MRBs for DeepEdit usage (to make sure the segmentation extent is the same as the master volume extent and to use terminology-based label numbers) + * UPDATE: We will try first to do the export using a custom color table we create together with the terminology and use the existing feature that assigns labels based on a selected color table + +# Illustrations + + + +Example segmentation: + +![Sample segmentation](SampleSegmentation_Small.png) + +Tooth numbering scheme (EU): + +![Tooth numbering EU](Explaining_Teeth_Numbers_EU.png) + +New option in segmentation export to consider terminology: + +![New option in segmentation export to consider terminology](SegmentationExportTerminologyOption.PNG) + +Draft of dental segmentation terminology: + +![Dental terminology draft](TerminologyDental.png) + +# Background and References + + + +1. Palkovics D, Mangano FG, Nagy K, Windisch P. (2020) Digital three-dimensional visualization of intrabony periodontal defects for regenerative surgical treatment planning. BMC Oral Health, 20: 351. +2. Palkovics D, Pinter C, Bartha F, Molnar B, Windisch P. (2021) CBCT subtraction analysis of 3D changes following alveolar ridge preservation: a case series of 10 patients with a 6-month follow-up. Int J Comput Dent, 24: 241-251. +3. Palkovics D, Solyom E, Molnar B, Pinter C, Windisch P. (2021) Digital Hybrid Model Preparation for Virtual Planning of Reconstructive Dentoalveolar Surgical Procedures. J Vis Exp, doi:10.3791/62743. +4. Sólyom E, Palkovics D, Pintér C, Mangano FG, Windisch P. (2021) Virtuális tervezés és volumetrikus kiértékelés egy komplex parodontális defektus regeneratív-rekonstruktív sebészi ellátásában: Egy eset bemutatása. Fogorvosi Szemle, 114: 120-130 diff --git a/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/SampleSegmentation_Small.png b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/SampleSegmentation_Small.png new file mode 100644 index 000000000..3296b1d04 Binary files /dev/null and b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/SampleSegmentation_Small.png differ diff --git a/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/SegmentationExportTerminologyOption.PNG b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/SegmentationExportTerminologyOption.PNG new file mode 100644 index 000000000..c86eb012d Binary files /dev/null and b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/SegmentationExportTerminologyOption.PNG differ diff --git a/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/TerminologyDental.png b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/TerminologyDental.png new file mode 100644 index 000000000..09fe1b6ca Binary files /dev/null and b/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/TerminologyDental.png differ diff --git a/PW36_2022_Virtual/Projects/BrainPrediction/README.md b/PW36_2022_Virtual/Projects/BrainPrediction/README.md new file mode 100644 index 000000000..9994e0544 --- /dev/null +++ b/PW36_2022_Virtual/Projects/BrainPrediction/README.md @@ -0,0 +1,60 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Brain Mask Prediction Based on MRI Skin Data + +## Key Investigators + +- Raymond Yang (University of Massachusetts Boston) +- Jax Luo (BWH & Harvard Medical School) +- Cathy Yang (Wellesley College) +- Lipeng Ning (BWH & Harvard Medical School) +- Steve Pieper (Isomics, Inc.) +- Daniel Haehn (University of Massachusetts Boston) + + + +# Project Description + +We postulate that there is a relationship between the shape of ones head and the shape of ones brain. This project aims test that by developing an AI solution for predicting a brain mask given surface data for a head. The eventual goal and application is to map the predicted brain mask to a scanned patient. This project is part of the TMS module project. + +## Objective + + + +1. Objective A. Build and test a CNN model +1. Objective B. Migrate TMS model and implement on Slicer +1. Objective C. Build and test a geometric CNN model* + +## Approach and Plan + + + +1. We have some MRI from the HCP Human Connectome Project +1. Skin masks and Brain Masks were obtained from these MRIs using HDBET and FieldTrip toolbox +1. Using these as ground truths, train a CNN model to see the feasibility +1. Implement TMS model on Slicer as a module +1. Convert ground truth data into surface meshes +1. Using the new mesh data, train a geometric CNN model and compare results + +## Progress and Next Steps + +Not a lot of progress was made. +1. Some issues with the MRI Masks, data misaligned. + - Has been resolved, will start training next week +1. Started a TMS Prediction Module, Source below + - Prediction is working + - Need to create post-processing script to return niftii + +# Illustrations + + +![TMS Prediction UI](TMS_UI.PNG) +![TMS Prediction Output](TMS_OUT.PNG) + +# Background and References + + +- **TMS Prediction** GitHub repository: [TMS Prediction](https://github.com/YangRyRay/TMS_Prediction) diff --git a/PW36_2022_Virtual/Projects/BrainPrediction/TMS_OUT.png b/PW36_2022_Virtual/Projects/BrainPrediction/TMS_OUT.png new file mode 100644 index 000000000..f83fc5e23 Binary files /dev/null and b/PW36_2022_Virtual/Projects/BrainPrediction/TMS_OUT.png differ diff --git a/PW36_2022_Virtual/Projects/BrainPrediction/TMS_UI.PNG b/PW36_2022_Virtual/Projects/BrainPrediction/TMS_UI.PNG new file mode 100644 index 000000000..60a8795d8 Binary files /dev/null and b/PW36_2022_Virtual/Projects/BrainPrediction/TMS_UI.PNG differ diff --git a/PW36_2022_Virtual/Projects/BrainPrediction/TMS_UI_TEMP.PNG b/PW36_2022_Virtual/Projects/BrainPrediction/TMS_UI_TEMP.PNG new file mode 100644 index 000000000..b1b677da7 Binary files /dev/null and b/PW36_2022_Virtual/Projects/BrainPrediction/TMS_UI_TEMP.PNG differ diff --git a/PW36_2022_Virtual/Projects/CIP_Update/README.md b/PW36_2022_Virtual/Projects/CIP_Update/README.md new file mode 100644 index 000000000..a76400e69 --- /dev/null +++ b/PW36_2022_Virtual/Projects/CIP_Update/README.md @@ -0,0 +1,138 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Update the Chest Imaging Platform extenstion to support Slicer 5 + +## Key Investigators + +- Rudolf Bumm (KSGR) +- Raul San Jose Estepar (Brigham) +- Andras Lasso (PerkLab) +- Steve Pieper (Kitware) + +# Project Description + + +The Chest Imaging Platform (CIP) is an extension to 3D Slicer. + +![image](https://user-images.githubusercontent.com/18140094/149629677-6bea2a6f-835d-4ae8-8955-71995e7e716d.png) + +It integrates: +- chest image processing functionality as a toolkit by exposing the CLIs +- Slicer specific modules to provide user-friendly chest CT quantitative solutions +- Visualization of scale-space particles and labelmaps +- Integrated workflows to end-to-end clinical evaluation + +In the current preview versions of 3D Slicer (4.13.0) parts of CIP fail to load the following CIP modules because Slicer's "Editor" module has been removed. + +CIP_CalciumScoring + +CIP_RVLVRatio + +CIP_LesionModel + +CIP_Calibration + +CIP_MIPViewer + +CIP_BodyComposition + +CIP_ParenchymaSubtypeTrainingLabelling + +CIP_ParenchymaAnalysis + +CIP_PAARatio + +CIP_AVRatio + +CIP_InteractiveLobeSegmentation + +The CIP extension currently uses legacy editor module, and charts infrastructure (instead of Segment editor, Plots, and Tables modules). + +## Objective + +- Replace the usage of the "Editor" module in CIP by something different, preferably the SegmentEditor. +- Replace labelmaps with segmentations +- Replace outdated fiducial calls (exceptions) +- Replace the charts infrastructure + + +## Approach and Plan + +A fork and branch of SlicerCIP was created at https://github.com/rbumm/SlicerCIP/tree/Branch_CIPCompatSlicer5 to be used as the source base, all changes will later be included in a PR to https://github.com/acil-bwh/SlicerCIP. + +Resolve compatibility problems step by step + +Deactivate non-working or outdated modules in Slicer 5 + +## Considerations + +## Progress and Next Steps + +01/06/22: + +Removing the "Editor" related imports from "Scripted/CIP_/CIP/ui/__init__.py" results in a complete CIP-startup in Slicer 4.13.0 without initial error messages. + +A github search revealed that "Editor" calls are being made from three of the above modules: + +- CIP_Calibration +- CIP_ParenchymaSubtypeTrainingLabelling +- CIP_BodyComposition + +01/11/22: + +- CIP_Calibration is probably outdated. +- CIP_ParenchymaSubtypeTrainingLabelling is outdated, probably redundant. +- CIP_BodyComposition is needed, but probably much better realized with AI segmentation + +01/17/22: 12pm Meeting with Raul, Andras, Steve and Rudolf (Discord Red Slice) + +- Incompabilities between 4.13 and CIP seem to be caused by CIPLibrary +- CIP, it's CLI functions and their history were demonstrated by @Raul +- CIP_Calibration is not outdated. +- CIP_ParenchymaSubtypeTrainingLabelling should be kept. +- CIP_BodyComposition is needed and should be kept +- CIP Toolkit functions should be included in a future SlicerCIP release +- A CIPLibrary compatibility branch "4.13" or "5.0" will be created +- CIP GitHub write access was requested for @Andras to support merging, @Raul agreed +- As labelmaps are used throughout CIP in nearly every module we will discuss a Slicer-based solution for that problem tomorrow + +01/18/22: 12pm Meeting with Raul, Andras, Rudolf (Discord Red Slice) + +- good follow up meeting +- @rbumm demonstrated the build process +- a first module (Calibration) has been adapted to 4.13 and now works with the Segment Editor instead of Editor +- ParenchymaSubtypeTrainingLabelling was demonstrated by Raul +- we decided to prioritize certain modules during CIP adaptation +- ParenchymaAnalysis probably the next to go +- Andras promised to look into CLI modules and letting them use segmentations instead of labelmaps as an input maybe automatically +- next meeting will be Thursday 11 am Red Slice + +# Final presentation start here + +01/20/22: 11am-12:30pm Meeting with Raul, Andras, Rudolf (Discord Red Slice) + +- The Chest Imaging Platform extension's "Calibrate" module has been finalized for Slicer 5 together with @lassoan and was demonstrated by @rbumm +- see this [SlicerCIP GitHub fork](https://github.com/rbumm/SlicerCIP/tree/Branch_CIPCompatSlicer5) and [this commit](https://github.com/rbumm/SlicerCIP/commit/86c8173639a2ecc3c08993fa311625bec51378c9) +- will serve as a skeleton to adopt the other CIP modules and is now fully functional +- The "Calibration" widget uses a newly created segmentation for each volume instead of labelmaps +- the segmentation can be edited in the embedded Segment Editor +- before pressing "Calibrate" the segmentation is converted to a labelmap for the actual calibration in the logic() +- in addition, we developed a strategy on how to transform the other CIP modules to Slicer 5 +- non-working modules will be excluded from CMake to provide only those functional to the community +- @Raul demonstrated new vessel segmentation techniques and answered many questions +- @Raul agreed on giving @lassoan write access to the SlicerCIP GitHub repository + +# Illustrations + + +New Slicer 5 CIP "Calibration" module with embedded "SegmentEditor" instead of the old "Editor": + + +![image](https://user-images.githubusercontent.com/18140094/150400506-d357ac15-55ef-4f28-a0f6-00cd511b8183.png) + + +# Background and References + +https://chestimagingplatform.org/ + +https://discourse.slicer.org/t/exporting-csv-with-parenchyma-analysis-module/10697/58?u=rbumm diff --git a/PW36_2022_Virtual/Projects/CheapTracking/README.md b/PW36_2022_Virtual/Projects/CheapTracking/README.md new file mode 100644 index 000000000..fc45f2981 --- /dev/null +++ b/PW36_2022_Virtual/Projects/CheapTracking/README.md @@ -0,0 +1,59 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# El Cheapo Tracking + +## Key Investigators + +- Steve Pieper (Isomics) +- Gabor Fichtinger (Queens) +- YOU + +# Project Description + +Investigate less expensive but still good tracking for IGT. Modern AR/VR devices use inside-out tracking with IMUs, cameras, lidar, and other sensors (e.g. in phones and glasses). +These are small enough and becoming (maybe?) good enough to consider for IGT. +Would these be options for NousNav or the SlicerTMS projects? + +## Objective + +1. Make a plan for determining accuracy and utility of options +2. Plan any implementation efforts or further experiments +3. Consider issues like form-factor, sterilization, re-usability, etc. + + +## Approach and Plan + +1. Survey developments in the field pushed by AR/VR devices +2. Look at any prototypes, e.g. [Steve's WebXR](https://github.com/pieper/SlicerWeb/blob/master/WebServer/docroot/WebXR-controller/index.html) experiment +3. Determine next steps + +## Progress and Next Steps + + + +### Progress +1. Improved demo to work with https using letsencript on Google Cloud virtual machine running Slicer +2. Added touch screen events to control attributes of Slicer model (brighter yellow when touching the screen). +3. Gave demos to colleagues at the [Wednesday IGT breakout](https://docs.google.com/document/d/1mwTbzy_ulATfrU97cFfQM_ikhz1CUr1xaocj6lp6c8w/edit#heading=h.296xjyux0jir) and discussed the tradeoffs of intrinsict tracking vs EM and extrinsic optical tracking + +### Next steps +1. Explore the use of phone-based tracking for SlicerTMS research +2. Experiment with local rendering and touch interactions on phone mixed with remote rendering and computation on CPU/GPU with Slicer +3. Consider developing native phone app to avoid https performance overhead vs upgrading Slicer's web server to support web sockets for faster performance +4. Brainstorm about other applications of this technology +5. Monitor developments of intrinsic tracking systems in non-phone form factors for use in other tracking scenarios (e.g. in IGT) + +# Illustrations + + +[![Phone controller demo (click to see video)](https://user-images.githubusercontent.com/126077/150543016-34926be4-7eca-4c47-87c0-95f0fdb29230.png)](https://youtu.be/kQKskHYlpQE "Phone Controller Demo (click to view on youtube") + + +* Phone as a 6 DOF controller demo (no audio). Demo uses moto g100 Android phone that includes Qualcom chips for tracking with WebXR API in JavaScript to communicate with 3D Slicer web server. + +# Background and References + +* https://github.com/pieper/SlicerWeb/blob/master/WebServer/docroot/WebXR-controller/index.html +* https://immersive-web.github.io/webxr-samples/ +* https://www.qualcomm.com/research/cognitive-technologies/immersive-experiences/augmented-reality +* https://www.qualcomm.com/news/onq/2021/08/16/how-snapdragon-xr1-powers-lenovo-thinkreality-a3-smart-glasses-and-moto-g100 diff --git a/PW36_2022_Virtual/Projects/DDMReg/README.md b/PW36_2022_Virtual/Projects/DDMReg/README.md new file mode 100644 index 000000000..bd9ac4597 --- /dev/null +++ b/PW36_2022_Virtual/Projects/DDMReg/README.md @@ -0,0 +1,57 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Deep Diffusion MRI Registration (DDMReg): code release via SlicerDMRI + +## Key Investigators + +- Fan Zhang (BWH) +- William M. Wells III (BWH) +- Lauren J O'Donnell (BWH) + +# Project Description + + + +In this project, we will work on releasing the code of a diffusion MRI registration method (Zhang et al 2022, IEEE TMI). We will provide the trained CNN model and testing cases for demonstration. + +## Objective + + + +1. Code cleaning. +1. Release code. +1. Documentation and example testing data. + +![ddmreg_prediction](https://user-images.githubusercontent.com/7855446/150069553-86a8a899-a8d6-47a6-8609-949b895dfd60.png) + +## Approach and Plan + + + +1. Code released at: [DDMReg](https://github.com/SlicerDMRI/DDMReg.git) +2. Pre-trained models are provided +3. Exampe data and documentation are provided + +## Progress and Next Steps + + + +1. Code released as a standard python package. +2. Intergate into SlicerDMRI so that users can use via Slicer interface. + +# Illustrations + + + +f1_overview_R1 + +# Background and References + + + +[Zhang, Fan, William M. Wells, and Lauren J. O'Donnell. "Deep Diffusion MRI Registration (DDMReg): A Deep Learning Method for Diffusion MRI Registration." IEEE TMI (2022).](https://ieeexplore.ieee.org/document/9665765) + +In this paper, we present a deep learning method, DDMReg, for accurate registration between diffusion MRI (dMRI) datasets. In dMRI registration, the goal is to spatially align brain anatomical structures while ensuring that local fiber orientations remain consistent with the underlying white matter fiber tract anatomy. DDMReg is a novel method that uses joint whole-brain and tract-specific information for dMRI registration. Based on the successful VoxelMorph framework for image registration, we propose a novel registration architecture that leverages not only whole brain information but also tract-specific fiber orientation information. DDMReg is an unsupervised method for deformable registration between pairs of dMRI datasets: it does not require nonlinearly pre-registered training data or the corresponding deformation fields as ground truth. We perform comparisons with four state-of-the-art registration methods on multiple independently acquired datasets from different populations (including teenagers, young and elderly adults) and different imaging protocols and scanners. We evaluate the registration performance by assessing the ability to align anatomically corresponding brain structures and ensure fiber spatial agreement between different subjects after registration. Experimental results show that DDMReg obtains significantly improved registration performance compared to the state-of-the-art methods. Importantly, we demonstrate successful generalization of DDMReg to dMRI data from different populations with varying ages and acquired using different acquisition protocols and different scanners. diff --git a/PW36_2022_Virtual/Projects/DSCIAnonymize/README.md b/PW36_2022_Virtual/Projects/DSCIAnonymize/README.md new file mode 100644 index 000000000..4740b6470 --- /dev/null +++ b/PW36_2022_Virtual/Projects/DSCIAnonymize/README.md @@ -0,0 +1,60 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Anonymization of a batch of DICOM files +Tool to anonymize a dataset of medical images. + +## Key Investigators + +- Hina Shah (UNC Chapel Hill) +- Juan Carolos Prieto (UNC Chapel Hill) +- Fryderyk Kögl (BWH, TUM) + +# Project Description + + +The very first step to make any medical data available to research community is it's anonymization. While there are ways to anonymize a single DICOM/non-dicom image in 3D Slicer, there's no module to do this for a full dataset. + +The proposed tool will: +- Anonymize a dataset of images by deleting any identifiable metadata information +- Have options to rename the files using either UUID or custom name. +- Create a crosswalk to get the correspondence between anonymized and original files +- Work as a standalone app or a slicer extension + +## Objective + + + +1. Objective A. Write tests +1. Objective B. Push the extension to Slicer Extension Index +1. Objective C. Find out what other features/enhancements can be added to this extension + +## Approach and Plan + + + +1. Identify existing anonymization pipelines for DICOM +2. Modify code to make the extension be available as an extension (not a standalone app), and push it to Slicer Extension Index +3. Within the community try to find out what other features would be useful to add to the extension. + +## Progress and Next Steps + + + +1. Worked on a couple of issues for the extension +2. The extension has been pushed to the Slicer Extension Index. +3. Had a productive discussion with a few folks in the community to understand what are the existing tools/conformances for DICOM anonymization - this needs more introspection and research on our part, and deciding how we want to proceed - especially for the dental research data sharing purposes. +4. Will add a few suggested features, for example: letting users chose which metadata to anonymize. + +# Illustrations + + +![image](https://user-images.githubusercontent.com/22948571/149800624-b1468449-96a1-467c-ad49-7559e68fb74b.png) + + +# Background and References + + +1. [Source code in Github repository](https://github.com/hina-shah/SlicerBatchAnonymize) diff --git a/PW36_2022_Virtual/Projects/DeepFiberClustering/README.md b/PW36_2022_Virtual/Projects/DeepFiberClustering/README.md new file mode 100644 index 000000000..49155edbd --- /dev/null +++ b/PW36_2022_Virtual/Projects/DeepFiberClustering/README.md @@ -0,0 +1,56 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# A deep learning framework for white matter fiber clustering, code release via SlicerDMRI + +## Key Investigators + +- Yuqian Chen (BWH & USYD) +- Chaoyi Zhang (USYD) +- Yang Song (UNSW) +- Tengfei Xue (BWH & USYD) +- Nikos Makris (BWH) +- Yogesh Rathi(BWH) +- Weidong Cai (USYD) +- Fan Zhang (BWH) +- Lauren J O'Donnell (BWH) + +# Project Description + + +We propose an unsupervised deep learning framework for fast and effective white matter fiber clustering (WMFC) (Chen et al 2021, MICCAI). It enables parcellation of white matter tractography. Current WMFC methods are facing several challenges such as fiber computation efficiency, sensitivity to fiber direction, combination of spatial and anatomical information, existence of outlier fibers as well as correspondence across subjects. To overcome these challenges, we propose a self-supervised learning strategy to achieve fast and effective WMFC. In this project, we will work on releasing the code of this method. We will provide the trained model and testing samples for demonstration. + +## Objective + + + +1. Build deep learning training model for white matter fiber clustering and evaluate it on testing data. +2. Code cleaning and releasing. + +## Approach and Plan + + + +1. In our method, we use a convolutional neural network to learn embeddings of input fibers and improved anatomical coherence of fiber clusters by incorporating brain anatomical information. Outlier removal is performed in a natural way by rejecting fibers with low cluster assignment probability. +4. Experiments are implemented through coding with python. +5. Evaluate our method by performing experiments on three independently acquired datasets. + +## Progress and Next Steps + + + +1. Code released at: https://github.com/SlicerDMRI/DFC. +2. Trained models and example testing data are provided. + +# Illustrations + + +![image](https://user-images.githubusercontent.com/59594831/149714486-3e57731f-e146-42b9-8bba-687f9fb13c2d.png) +![image](https://user-images.githubusercontent.com/59594831/149785097-cb71b90c-6713-4a93-b748-c1521aeecf1d.png) + +# Background and References + + +Chen, Yuqian, Chaoyi Zhang, Yang Song, Nikos Makris, Yogesh Rathi, Weidong Cai, Fan Zhang, and Lauren J. O’Donnell. ["Deep Fiber Clustering: Anatomically Informed Unsupervised Deep Learning for Fast and Effective White Matter Parcellation."](https://link.springer.com/chapter/10.1007/978-3-030-87234-2_47) International Conference on Medical Image Computing and Computer-Assisted Intervention. Springer, Cham, 2021. diff --git a/PW36_2022_Virtual/Projects/EchoVolumeRenderUI/README.md b/PW36_2022_Virtual/Projects/EchoVolumeRenderUI/README.md new file mode 100644 index 000000000..c006e8a4a --- /dev/null +++ b/PW36_2022_Virtual/Projects/EchoVolumeRenderUI/README.md @@ -0,0 +1,53 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Update the Echo Volume Render interface + +## Key Investigators + +- Samuelle St-Onge (ÉTS, Montreal) +- Simon Drouin (ÉTS, Montreal) +- Andrey Titov (ÉTS, Montreal) + +# Project Description + + + +In this project, we aim to rework the interface of the Echo Volume Render module in order to make it more intuitive and better adapted to clinical users' needs. + +## Objective + + + +Update and polish the module's interface to improve its usability for clinical users. + +## Approach and Plan + + + +1. Have a good understanding of the Echo Volume Render's parameters +1. Get input from users on what could be improved (Children's Hospital of Philadelphia (CHOP)) +1. Compare the module with commercial platforms to see which 3DE parameters are familiar to clinicians +1. Determine the modifications to be made to improve the interface +1. Implement these modifications in the UI +1. Send the updated interface to collaborators from CHOP to get feedback + +## Progress and Next Steps + + + +1. Discussion on previous versions of the module to understand the changes that have been made in the last 2 years +1. Discussion with a collaborator from CHOP on aspects to be improved in the module +1. Determined modifications that could potentially improve visualization of volume rendered 3DE : + - Implement a [Phase Symmetry filter](https://pypi.org/project/itk-phasesymmetry/) to reduce noise in images while preserving anatomical details prior to volume rendering + - Implement Gaussian filtering in the GPU + +# Illustrations +![Echo Volume Render UI](https://user-images.githubusercontent.com/57685132/149667633-524c8285-3f81-4c91-92c8-87b22a3d29c1.jpg) + +## Long-term Objectives + +1. Implement new features similar to [TrueView and Glass from Philips](https://www.usa.philips.com/healthcare/resources/landing/epiq/cardiology) +1. Implement Color Doppler + +# Background and References + diff --git a/PW36_2022_Virtual/Projects/FlirCameraInSlicer/README.md b/PW36_2022_Virtual/Projects/FlirCameraInSlicer/README.md new file mode 100644 index 000000000..0bc8ea64d --- /dev/null +++ b/PW36_2022_Virtual/Projects/FlirCameraInSlicer/README.md @@ -0,0 +1,51 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Integration of Flir Thermal Camera in Slicer in order to capture and process images + +## Key Investigators + +- Juan Bautista Ruis Alzola (ULPGC - Universidad de Las Palmas de Gran Canaria) +- Robabeh Salehiozoumchelouei (instituto astrofísico de canarias-IAC) +- Mónica García Sevilla (ULPGC - Universidad de Las Palmas de Gran Canaria) +- Yousef Rajaeitabrizi (instituto astrofísico de canarias-IAC) + +# Project Description +Integration of Flir Thermal Camera in Slicer in order to capture and process images + +## Objective + + + +1. Objective A. Learning Slicer +1. Objective B. Integration of Flir Thermal Camera in Slicer +1. Objective C. Capture and process the images + +## Approach and Plan + + + +1. Communicating with the experts of this field who have participated in this PW for getting information of the best way of getting start +1. Communicating with the experts if they can facilitate us some Links of the tutorials and Slicer documentation +1. Study the SDK of the thermal Camera Flir in order to find the Python library and the instruction of connecting the camera with the embedded Python of Slicer +1. Studding the documentation of Slicer for learning the way of using the functions of the Slicer extensions into de python + +## Progress and Next Steps + + + +1. We have developed the interface class for the Flir thermal Camera with all necessary functions for connecting and capturing the images +1. We import the mentioned interface class into de Python of Slicer and we were able to capture images by simulating the camera +1. We started to acquaint with the extensions of Slicer +1. We will try to learn more about the abilities of Slicer extensions in order to apply them in Python + + +# Illustrations + + + +# Background and References + + diff --git a/PW36_2022_Virtual/Projects/GPURigidRegistration/README.md b/PW36_2022_Virtual/Projects/GPURigidRegistration/README.md new file mode 100644 index 000000000..f2751bc30 --- /dev/null +++ b/PW36_2022_Virtual/Projects/GPURigidRegistration/README.md @@ -0,0 +1,58 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# GPU Rigid Registration for Neuronavigation (from Ibis) + +## Key Investigators +- Gelel Rezig (Ecole de technologie supérieur, Montréal, Canada) +- Houssem Eddine Gueziri (Montreal Neurological Institute and Hopital, Canada) +- Simon Drouin (Ecole de technologie supérieur, Montréal, Canada) + +# Project Description + +With this project, we would like to add a new feature to Slicer. +The goal of this project is to extract code from an opensource software for image-based neurosurgery guidance: IBIS Neuronav. +This code in C++ aims to perform registration between different images using the GPU. It is located in an IBIS Neuronav +plugin. Then, it will be implemented in Slicer to be available for all users. +The objective of the second project is to recover another functionality in the same way: the conversion of a minc file into mha ([SequenceIO](https://github.com/IbisNeuronav/Ibis/tree/master/IbisPlugins/SequenceIO)). + +## Long-term Objective +1. Extract GPU registration code from IBIS Neuronav to an independent library (done in PW35, see extracted lib [here](https://github.com/IbisNeuronav/GPURigidRegistrationLib) ) +2. Create a standalone command-line application to register images (partly done in PW35) +3. Create a Slicer module that replicates the functionality of Ibis using the independent library (TODO) + +2nd project (New) +1. Extract the converter minc/mha (SequenceIO) code from IBIS Neuronav to an independent library +2. Create a standalone command-line application to convert +3. Create a Slicer module that replicates the functionality of Ibis using the independent library + +## Approach and Plan +1. Implement command-line parameters in the standalone app to support all options available in the Ibis GUI for the registration plugin +2. Write a test suite using the command-line application. +3. Build a prototype for the Slicer registration module. Some questions remain: + * Should the first iteration use the CLI interface? + * How to build and distribute Slicer modules with OpenCL support? + +2nd project +1. Implement command-line parameters in the standalone app to support all options available in the Ibis GUI for the convertor plugin +2. Write a test suite using the command-line application. + +## Progress and Next Steps + +1. The GPU RegidRegistration Lib is available to be used by command line [here](https://github.com/IbisNeuronav/GPURigidRegistrationLib) . (DONE) +2. Make modifications for Ibis neuro uses this library to avoid code duplication. (In progress) +3. Implementation of the solution in slicer. (Next Step) + + The second project +1. Extract the converter minc/mha (SequenceIO) code from IBIS Neuronav to an independent library (in progress) [here](https://github.com/rggelel/SequenceIo) +2. Make modifications for Ibis neuronav uses this library to avoid code duplication. (Next step) +3. Implementation of the solution in slicer. (Next Step) + +# Illustrations +![Registration on real time with GPU](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/GPURigidRegistration/gpu-rigid-reg.gif) + +# Background and References +Webpage and GitHub repositories with relevant code: +- [Ibis Neuronav](http://ibisneuronav.org) +- [Ibis Neuronav on GitHub](https://github.com/IbisNeuronav/Ibis) +- [New GPURigidRegistration lib](https://github.com/IbisNeuronav/GPURigidRegistrationLib) +- [SequenceIO on Ibis Neuronav](https://github.com/IbisNeuronav/Ibis/tree/master/IbisPlugins/SequenceIO) diff --git a/PW36_2022_Virtual/Projects/IDCBodyPartRegression/README.md b/PW36_2022_Virtual/Projects/IDCBodyPartRegression/README.md new file mode 100644 index 000000000..ae1c72d07 --- /dev/null +++ b/PW36_2022_Virtual/Projects/IDCBodyPartRegression/README.md @@ -0,0 +1,81 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Using Imaging Data Commons to Perform Deep-Learning Based Body Part Regression + +## Key Investigators + +- Deepa Krishnaswamy (Brigham and Women's Hospital) +- Khaled Younis (Philips) +- Andrey Fedorov (Brigham and Women's Hospital) + +# Project Description + +One issue in using deep learning for segmentation of anatomical regions is the ability to obtain datasets that focus on the area of interest. For instance, some DL algorithms may require preprocessing of datasets (cropping volumes before training the algorithm) or postprocessing of the segmentation label output by the removal of false positives. + +Within DICOM data, the body part examined tag may provide some information as to the region captured. Unfortunately, it may be list the incorrect region, or be blank because of removal during the anonymization process. Therefore this tag cannot always be relied upon. + +A deep learning method was developed (reference below) that creates a new coordinate system that maps each axial slice to a "slice score". These scores are associated with specific anatomy and therefore can be used for a smarter way to crop volumes to aid in preprocessing. + +We plan to leverage the strengths of Imaging Data Commons by using it to obtain data from TCIA, and perform queries. We will obtain a varied CT dataset where the body part regression model can be tested on, and will hopefully demonstate the usefulness of IDC for this type of analysis and visualization. + +## Objective + + + +1. Objective A. We will demonstate how the body part examined tag is unreliable for describing anatomical regions +1. Objective B. We will show how the slice scores (that correspond to anatomical regions) can be used crop volumes in an efficient manner. +1. Objective C. We will also show how the body part regression model can be used on a variety of CT data. + +## Approach and Plan + + + +1. We will use BigQuery to obtain a more varied dataset that captures differences in CT volumes (pixel spacing, slice thickness, manufacturer). +1. We will then use the trained model from the author (below) to test the neural network on the dataset obtained from TCIA using IDC. +1. Next we will compare the "ground truth" regions from the RTSTRUCT/SEG files to the regions cropped by using BPR, and see if they are within the bounds. +1. We will show the difference between the body part examined tag from the original DICOM files to the ones predicted by BPR. +1. We will visualize the results by populating the results to DICOM data stores and interacting with them using the OHIF viewer. + +## Progress and Next Steps + + + +1. We have obtained a small, but varied, CT dataset. +1. We have used the trained model from the author to test the regression network on a sample of data. +1. We have created our own SEG DICOM files that hold for each patient the "ground truth" anatomical region versus the cropped region produced by BPR. +1. We have created our own SEG DICOM files that holds the predicted body part examined regions. +1. We have populated DICOM data stores and used the OHIF viewer to interact with them. +1. It would be beneficial to test on a larger dataset. + +# Illustrations + +We can browse our DICOM data stores and use OHIF (thanks to this [project!](https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/OHIFonGCP/)) to show a comparison of the original lung segmentation along with the predicted cropped volume as a bounding box. We can see that the bounding box captures the lung, demonstrating the usefulness of this method for pre or post-processing for segmentation algorithms. + +![LCTSC-Train-S3-010_anatomy_vs_cropped_volume](https://user-images.githubusercontent.com/59979551/150238766-e2d24776-9fa3-4cdc-8801-dd0fbdcf48d6.gif) + +We can also compare the body part examined tag distrubtion from the original DICOM files vs the tag predicted by Body Part Regression. In this particular dataset we included patients with kidney and lung segmentations, and by observing these tags, we can see that areas outside of these regions were included in the CT scans. + +![Body part examined tag distributions](pie_charts_initial.png) + +Using the same viewer as above, we can also observe the predicted body part examined regions. For this particular example, the body part examined was LUNG, but it can be seen that the predicted regions include ABDOMEN-CHEST-NECK-HEAD. If we scroll in the axial direction, we can see some slices that have two colors - this indicates that the slice was classified as having both regions, for instance both ABDOMEN and CHEST. + +![LCTSC-Train-S3-010_body_part_examined_regions](https://user-images.githubusercontent.com/59979551/150240139-0f01b20c-af63-4156-9579-632ce6b883ee.gif) + +We can look at MPR views to better view the predicted regions. We can see that by looking at the sagittal view, that each axial slice may include multiple predicted regions. We can see that including the regions ABDOMEN-CHEST-NECK-HEAD is more accurate than only LUNG. + +![Body part examined predicted regions](body_part_examined_regions_screenshot.JPG) + + + +# Background and References + + + +Schuhegger S. Body Part Regression for CT Images. arXiv preprint arXiv:2110.09148. 2021 Oct 18. https://arxiv.org/abs/2110.09148?context=eess + +Github link to code from thesis: https://github.com/mic-dkfz/bodypartregression + +[Link to the colab notebook](https://colab.research.google.com/drive/1Udqz74i2I6W69t0G3aiJ-5UmlK0f6xxG?usp=sharing) diff --git a/PW36_2022_Virtual/Projects/IDCBodyPartRegression/body_part_examined_regions_screenshot.JPG b/PW36_2022_Virtual/Projects/IDCBodyPartRegression/body_part_examined_regions_screenshot.JPG new file mode 100644 index 000000000..db51b2d1f Binary files /dev/null and b/PW36_2022_Virtual/Projects/IDCBodyPartRegression/body_part_examined_regions_screenshot.JPG differ diff --git a/PW36_2022_Virtual/Projects/IDCBodyPartRegression/pie_charts_initial.png b/PW36_2022_Virtual/Projects/IDCBodyPartRegression/pie_charts_initial.png new file mode 100644 index 000000000..ebafca8bd Binary files /dev/null and b/PW36_2022_Virtual/Projects/IDCBodyPartRegression/pie_charts_initial.png differ diff --git a/PW36_2022_Virtual/Projects/IDCBodyPartRegression/thesis_cropped_lung.JPG b/PW36_2022_Virtual/Projects/IDCBodyPartRegression/thesis_cropped_lung.JPG new file mode 100644 index 000000000..28b405f1e Binary files /dev/null and b/PW36_2022_Virtual/Projects/IDCBodyPartRegression/thesis_cropped_lung.JPG differ diff --git a/PW36_2022_Virtual/Projects/KaapanaXNATExploration/README.md b/PW36_2022_Virtual/Projects/KaapanaXNATExploration/README.md new file mode 100644 index 000000000..af3fc8add --- /dev/null +++ b/PW36_2022_Virtual/Projects/KaapanaXNATExploration/README.md @@ -0,0 +1,65 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Investigation of Kaapana and XNAT as platforms for data management and collaborative research + +## Key Investigators + +- Nadya Shusharina (BWH & MGH) +- Klaus Kades (DKFZ) +- Hanno Gao (DKFZ) +- Randy Gollub (MGH) +- Andrey Fedorov (BWH) + + +# Project Description + +We have installed the platforms on the Googe Cloud Virtual Machines. To investigate the functionalities, we populated the platforms with publicly available datasets. + +## Objective + + +Ultimately, our goal is to have a platform that can be used to support data management needs for internal research activities. +1. Archival of DICOM images +2. Flexible and extensible interface to explore archived data +3. In-browser visualization of images and annotations +4. In-browser segmentation of images +5. Integration of analysis tools and their application to the data available in the platform + +## Approach and Plan + +1. Continue working on the evaluation of the platforms +2. Connect with any of the groups that worked on either Kaapana or XNAT to share experience +3. Meet with Kaapana maintainers to debug outstanding issues. +4. Work on resolving issues in the GCP installation. +5. Clarified status of XNAT OHIF plugin in the OHIF session (see [minutes](https://docs.google.com/document/d/1JYqLYsjaSJDLQPG0VPGKWMUjoDn8y8d1ySnafTmv8Bs/edit)) + +## Progress and Next Steps + +Current unresolved issues + +XNAT: +* not clear how to extend the search of DICOM metadata to include arbitrary attributes +* not clear if/how to integrate analysis tools +* workflow to access existing SEG/RTSTRUCT from XNAT-OHIF plugin is unclear (related post [here](https://groups.google.com/g/xnat_discussion/c/1Whl7kmjEh8)) +* not clear if integration of desktop annotation tools like Slicer is possible + +Kaapana: +* fixed MITK segmentation flow +* need to build platform from source - deployment from prebuilt solution has limitations +* need to work on integration of Slicer the same way as MITK - should be possible, can follow example, just need time +* overall, no blocking issues identified, just need time to debug/implement + + +# Illustrations + + + +# Background and References + +* [Kaapana docs](https://kaapana.readthedocs.io/en/latest/) +* [XNAT](https://www.xnat.org/) + + diff --git a/PW36_2022_Virtual/Projects/LowCostUltrasoundTraining/README.md b/PW36_2022_Virtual/Projects/LowCostUltrasoundTraining/README.md new file mode 100644 index 000000000..3d77c231f --- /dev/null +++ b/PW36_2022_Virtual/Projects/LowCostUltrasoundTraining/README.md @@ -0,0 +1,108 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Low-Cost Ultrasound Training + +## Key Investigators + +- David García Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Csaba Pinter (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Rebecca Hisey (Queen's University, Kingston, ON, Canada) +- Leah Groves (Queen's University, Kingston, ON, Canada) +- Ahmed Mahran (Toronto General Hospital, ON, Canada) +- Matt McCormick (Kitware, Inc., United States) +- Steve Pieper (Isomics, Inc., United States) +- Fryderyk Kögl (BWH, TUM) + +# Project Description + +[**Ebatinca S.L.**](https://ebatinca.com/) is currently developing a **low-cost training platform for ultrasound imaging and ultrasound-guided procedures** in low- and middle-income countries. We are developing a 3D Slicer based application to perform training exercises and evaluate participants. The app is called **TrainUS** and it will be available soon with open-source license. + +Currently, we have already integrated some basic features: + +- Participant/recording management: create, edit, delete, filter + +- Hardware selection and configuration: connection with PLUS toolkit, US imaging parameters... + +![Configuration](https://user-images.githubusercontent.com/10816661/149749292-03676c38-4aef-4590-a3cb-48cd1533694b.PNG) + +- Selection of training exercises + +![TrainingSession](https://user-images.githubusercontent.com/10816661/149749209-3063512a-4b55-4372-a2cd-79d4e131cf07.PNG) + + + +## Objective + + + +1. Identify features to improve US training applications in 3D Slicer +2. Identify best approach to calibrate the ultrasound probe for US-guided procedures. +3. Discuss best exercises to train ultrasound imaging and US-guided procedures. +4. Identify specific clinical procedures of interest for low- and middle- income countries. +5. Discuss how to improve low-cost US training in 3D Slicer: virtual reality, artificial intelligence... + +## Ideas +1. Discuss best approach (and required additional developments, if any) to record an ultrasound (US) image volume and US probe position, and then enable trainees to simulate US imaging in that recorded volume by freely moving an US probe with respect to a phantom (instead of patient). This would enable the recording of US images of real anatomy and pathologies by expert radiologists. This would be really useful to create custom training exercises for medical students regarding detection of pathologies in US images, and others. + - Can we use US volume reconstruction + Registration + Volume reslice driver to achieve this? + - Does reslicing of a reconstructed US volume generates realistic US images? + +2. A useful feature for ultrasound training will be to ask multiple-choice questions to users during the session. These questions could be used to ensure that, apart from hand-eye coordination, users understand the whole workflow for US-guided procedures. Customized questions could be included for each procedure to be trained. + - How could we show this questions in Slicer? Could we generate custom Qt widgets for this? Pop-up windows? + +## Approach and Plan + + + +1. Bring together researchers interested in low-cost ultrasound training +1. Establish multi-institutional collaborations towards improving ultrasound training in 3D Slicer +1. Define useful features for ultrasound training that can be added to 3D Slicer as extensions. + +## Progress and Next Steps + + + +1. We have identified that for training applications in 3D Slicer, it would be very useful to be able to ask questions to the users and ensure that the concepts are clear.Therefore, we have decided to develop a new extension to enable the creation and visualization of multiple choice questions in 3D Slicer. The idea would be to create questions banks which are saved in CSV files and then enable the visualization of the selected questions by custom Qt widgets. This extension will be called [**SlicerEducation**](https://github.com/EBATINCA/SlicerEducation). + +2. We have started the development of a 3D Slicer module to facilitate the calibration of tracked ultrasound probes. This modules allows users to use two different methods for calibration: (1) a [stylus-based method](https://onedrive.live.com/view.aspx?resid=7230D4DEC6058018!3712&ithint=file%2cpptx&authkey=!ACNGX3PqH0BLg74), where the stylus tip position is recorded in the US image and a point-based registration is then performed, or (2) a [line-based method](https://link.springer.com/article/10.1007/s11548-016-1390-7), where a tracked needle (or stylus) can be moved across the US plane recording point along the needle and a line-to-point registration is performed. + +3. We have discussed the possibility of developing a module to train echocardiography in 3D Slicer using US simulation and a [**heart atlas from Toronto General Hospital**](https://sketchfab.com/apil_tgh/collections/toronto-heart-atlas). Example of web app developed by APIL research group: [https://apil-slice.web.app/#](https://apil-slice.web.app/#) + +# Illustrations + + +- Pop-up window with quiz: + + drawing + + drawing + + drawing + +- Module for tracked US probe calibration: + + drawing + + drawing + +- Echocardiography simulator ([https://apil-slice.web.app/#](https://apil-slice.web.app/#)): + + drawing + +# Background and References + + + +- **TrainUS** GitHub repository: [TrainUs app](https://github.com/EBATINCA/TrainUS) + +- **SlicerEducation** GitHub repository: [SlicerEducation](https://github.com/EBATINCA/SlicerEducation) + +- Google doc: [here](https://docs.google.com/document/d/1ettQu9WYvy-Dlz7vt42-5Hm7xJOltJJQ69PJZ_WBffg/edit?usp=sharing) + +- Tracked US probe calibration methods: [stylus-based method](https://onedrive.live.com/view.aspx?resid=7230D4DEC6058018!3712&ithint=file%2cpptx&authkey=!ACNGX3PqH0BLg74) & [line-based method](https://link.springer.com/article/10.1007/s11548-016-1390-7) + +- Echocardiography simulator: [https://apil-slice.web.app/#](https://apil-slice.web.app/#) + +- Heart atlas from Toronto General Hospital: [here](https://sketchfab.com/apil_tgh/collections/toronto-heart-atlas) diff --git a/PW36_2022_Virtual/Projects/MandibleReconstructionAutomaticPlanning/README.md b/PW36_2022_Virtual/Projects/MandibleReconstructionAutomaticPlanning/README.md new file mode 100644 index 000000000..1ab695411 --- /dev/null +++ b/PW36_2022_Virtual/Projects/MandibleReconstructionAutomaticPlanning/README.md @@ -0,0 +1,44 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Mandible Reconstruction Automatic Planning + +## Key Investigators + +- Mauro I. Dominguez +- Andras Lasso (PerkLab) +- Manjula Herath (Malmö University) + +# Project Description + +Ability to create a mandible reconstruction only from drawing a mandibleCurve and setting up some input parameters + +## Objective + + + +1. Objective A. Achieve automatic planning +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Develop the code, test and debug to make it work +1. ... +1. ... + +## Progress and Next Steps + +1. One algorithm to decimate optimally the input mandibleCurve was made. The mandible reconstruction is based on that decimated polyline (curve). The visualization is done using the previous algorithms of BoneReconstructionPlanner. +1. ... +1. ... + +# Illustrations + +![automaticPlanningCaption](https://user-images.githubusercontent.com/19158307/150552080-65fe30ec-a483-48c3-8296-5709cfc3eccb.png) + + +# Background and References + +Branch with the code will soon be shared diff --git a/PW36_2022_Virtual/Projects/MultiOrganSegmentation/DICE.png b/PW36_2022_Virtual/Projects/MultiOrganSegmentation/DICE.png new file mode 100644 index 000000000..9e22b7146 Binary files /dev/null and b/PW36_2022_Virtual/Projects/MultiOrganSegmentation/DICE.png differ diff --git a/PW36_2022_Virtual/Projects/MultiOrganSegmentation/README.md b/PW36_2022_Virtual/Projects/MultiOrganSegmentation/README.md new file mode 100644 index 000000000..f85ca6a57 --- /dev/null +++ b/PW36_2022_Virtual/Projects/MultiOrganSegmentation/README.md @@ -0,0 +1,54 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Multi-organ Segmentation in Fetal Mice + +## Key Investigators + +- Murat Maga (Seattle Children's Research Institute, UW) +- Sara Rolfe (Seattle Children's Research Institute, UW) +- Andres Diaz-Pinto (Kings College, Nvidia) + +# Project Description + + + +## Objective + + + +1. We would like to assess whether MONAILabel can segment internal organs of fetal mice in similar quality to the deformable atlas-based registration. +2. Evaluate pros and cons of both approach. + +## Approach and Plan + + + +1. [3D Fetal mice data comes from International Mouse Phenotyping Consortium](https://www.mousephenotype.org/). VOlumes are approximately 210x250x400 voxels. +2. [E15 Fetal Mice atlas comes from](http://www.mouseimaging.ca/technologies/mouse_atlas/mouse_embryo_atlas.html). There are a total of 50 unique labels (plus background). +3. We used R implementation of [ANTs](https://github.com/ANTsX/ANTsR) to transfer atlas labels to individual mice using a two-pass deformable registration (initial low-res with SyN + MI, and high-resolution used SyN + CC). It takes approximately 4h to register one sample using 24 threads. +4. Reviewed the transfered labels on subject space (all resultant templates considered good enough to be used as is, without manual editing +5. Edited the MultiLabel version of DeepEdit to match our label names. +6. Used 86 such labeled volumes for training data in MonaiLabel with default 0.8/0.2 split. We retained 14 additional samples with labels for inference tests. + +## Progress and Next Steps + + + +1. Initial model training for mouse embryo segmentation was done a spatial resolution of 128x128x128. Training carried out to 500 epochs. While successeful, there wasn't sufficient anatomical detail due to low spatial resolution of model. +2. Edited the spatial resolution to 192x192x192 (highest we can fit on Nvidia A6000, with 48GB of RAM). Used the initial model weights, and rerun the tranining for further 100 epochs. This improved the detail considerably. We also achievied higher training dice scores on this higher resolution modes. +3. Currently running inference on unseen data. Inference takes approximately one-minute when monaiserver is local to the Slicer application. +4. Average DICE coefficient of 0.81 between inferred reference labels registered using Syn + CC. Scores similar to other state-of-the-art methods for individual organ labels. + +# Illustrations + + +![Comparison of low-res model inference to reference label](lowres_model.png) + +![Comparison of high-res model inference to reference label](highres_model.png) + +![DICE coefficient between inferred and reference labels](DICE.png) + +# Background and References + + diff --git a/PW36_2022_Virtual/Projects/MultiOrganSegmentation/highres_model.png b/PW36_2022_Virtual/Projects/MultiOrganSegmentation/highres_model.png new file mode 100644 index 000000000..4cccab434 Binary files /dev/null and b/PW36_2022_Virtual/Projects/MultiOrganSegmentation/highres_model.png differ diff --git a/PW36_2022_Virtual/Projects/MultiOrganSegmentation/lowres_model.png b/PW36_2022_Virtual/Projects/MultiOrganSegmentation/lowres_model.png new file mode 100644 index 000000000..5e0e0a04f Binary files /dev/null and b/PW36_2022_Virtual/Projects/MultiOrganSegmentation/lowres_model.png differ diff --git a/PW36_2022_Virtual/Projects/NousNav/README.md b/PW36_2022_Virtual/Projects/NousNav/README.md new file mode 100644 index 000000000..993582a15 --- /dev/null +++ b/PW36_2022_Virtual/Projects/NousNav/README.md @@ -0,0 +1,43 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# NousNav: Low-cost neuronavigation system + +## Key Investigators + +- Étienne Léger (BWH) +- Alexandra Golby (BWH) +- Sam Horvath (Kitware) +- Sarah Frisken (BWH) +- David Allemang (Kitware) +- Tina Kapur (BWH) +- Steve Pieper (Isomics) +- Jean-Christophe Fillion-Robin (Kitware) +- Sonia Pujol (BWH) +- Kelly Wang (MIT) + +# Project Description + +The NousNav project is an initiative led by Dr Alex Golby to develop a low-cost neuronavigation system designed for use in low- and middle-income countries. We are developing a 3D Slicer based application focused on supporting segmentation, registration and navigation tasks. + +The project will also include the development of open source hardware designs for these applications. + +## Objective + +1. Discuss next steps and current progress +2. Explore ideas to improve surface registration +3. Explore ideas for skin segmentation + +## Approach and Plan + +1. Discuss specific requirements +2. Test new methods +3. Fix existing bugs + +## Progress and Next Steps + +1. Added tracing registration to the workflow (see picture below) +2. See other related projects for other updates on tracking, segmentation, etc. + +# Illustrations + +![Tracing](trace.jpeg) diff --git a/PW36_2022_Virtual/Projects/NousNav/trace.jpeg b/PW36_2022_Virtual/Projects/NousNav/trace.jpeg new file mode 100644 index 000000000..c5beebbb3 Binary files /dev/null and b/PW36_2022_Virtual/Projects/NousNav/trace.jpeg differ diff --git a/PW36_2022_Virtual/Projects/OHIFModeGallery/README.md b/PW36_2022_Virtual/Projects/OHIFModeGallery/README.md new file mode 100644 index 000000000..8fcf33f83 --- /dev/null +++ b/PW36_2022_Virtual/Projects/OHIFModeGallery/README.md @@ -0,0 +1,134 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# OHIF-v3 Mode Gallery + +## Key Investigators + +- Alireza Sedghi (OHIF) +- James Petts (OHIF) +- Erik Ziegler (OHIF) + + +# Project Description + +`OHIF-v3` architecture has been re-designed to enable building applications that are easily extensible to various use cases (Modes) that behind the scene would utilize desired functionalities (Extensions) to reach the goal of the use case. A mode can be thought of as a viewer app configured to perform a specific task, such as tracking measurements over time, 3D segmentation, a guided radiological workflow, etc. Addition of modes enables application with many applications as each mode become a mini app configuration behind the scene. + +Currently OHIF developers have to copy paste the source code of a sample Mode/Extension and edit the source code to let OHIF know about the new Mode/Extension they are developing, which has its limitations. The purpose of this project is to overcome this by enabling self-registration of Modes and Extensions and provide a ohif-cli tool to automatically generate templates and link Mode/Extension(s) internally. + +## Objectives + +- Create a CLI tool for making new extensions/modes (similar to e.g. create-react-app), and automagically installing extensions/modes from npm and including them in the source. + - Commands to include: + - create-mode, create-extension : for generating template extension and mode + - add-mode, add-extension : for adding mode/extensions to OHIF (either linking locally or installing published modes and extensions from npm) + - remove-extension, remove-mode : same above but for removing + - list : list all extensions and modes that are installed in OHIF +- Update OHIF to dynamically install extensions and modes from config files rather than having to hard code their inclusion. + - For example one could install OHIF, then a set of modes/extensions, programmatically. +- Parse information from npm to populate the markdown of the OHIF page for installable modes and extensions. + +Stretch Goals: + +- Versioning errors + conflict resolution for mode dependencies. +- Installing a mode installs required extensions. +- Type the contract interfaces for extensions and modes in typescript. + +Super stretch goal: type *all* the things + +## Approach and Plan + +Complete all of the primary objectives as fast as possible and then play with the stretch goals. + +## Progress and Next Steps + +### Primary Goals + +Core: +- [x] Self registering extensions from configuration JSON (dynamically build JavaScript required in node and inject these files at build time ). +- [x] Basic CLI tool codebase. +- [x] Basic working create-mode command. +- [x] Basic working create-extension command. +- [x] Basic working add-mode, remove-mode commands. +- [x] Basic working add-extension, remove-extension commands. +- [x] Basic list command. +- [x] Test CLI tools with actual extensions. +- [ ] WIP Documentation in the docs + +Gallery +- [x] Example mode and extension + publish to NPM. (@ohif-test/mode-clock , @ohif-test/mode-extension) +- [x] Create mode gallery page which consumes markdown files to generate a page with a title, description, dependencies, images and copyable install commands. + + +### Stretch Goals + +Stretch Goals + +Core: +- [x] Installing a mode installs required extensions (not versioned). +- [ ] Augment mode schema to optionally specify semantic version for required ohif-extensions. +- [x] Automatically download extensions of correct version when installing modes. +- [ ] WIP Verify that npm packages fetched by CLI are _actually_ conforming to extensions/modes so we don't just cross our fingers. +- [ ] WIP Error handling for extension conflicts. +- [x] Error handling when constructing modes at runtime for missing extensions/extension version mismatches. +- [ ] Type mode and extension schema and make these types publically available somewhere. +- [ ] Add the type contracts to the templates produced by ohif-cli create-extension and ohif-cli create-mode. + +Gallery: +- [x] Create "whitelist" for extensions. +- [x] Parse information from npm repo for whitelisted extensions to populate ohif-modes gallery page. +- [x] Search NPM for ohif-extension and ohif-mode keywords within cli and show name and short description + + +# Illustrations + +CLI illustrations + +1) `create-mode` command to create a new template to write modes + + drawing + +It generates the template files for you to write your own mode + + ![image](https://user-images.githubusercontent.com/7490180/150410470-987847ce-0316-45d4-865d-27c03be3422f.png) + +2) `create-extension` command to create a new template to write extensions. + + drawing + +It also generates template for an extension + + ![image](https://user-images.githubusercontent.com/7490180/150410992-9738437d-1222-4b64-a33f-860a150cbec8.png) + + +3) `add-mode ` will install any ohif-mode that has been published on npm registry and make it available on OHIF. + + + ![ezgif com-gif-maker (11)](https://user-images.githubusercontent.com/7490180/150447751-1428c1d0-26a4-4079-a99e-942ca7d11352.gif) + + +After installation the mode becomes avaiable. + +4) `remove-mode ` + + ![ezgif com-gif-maker (12)](https://user-images.githubusercontent.com/7490180/150454474-1356a0d5-af35-41ec-b262-1100d077e86e.gif) + +5) `search` will search in all npm packages for those who have `ohif-extension` or `ohif-mode` in their keywords and display their information + + drawing + +6) OHIF website update + +Using Github GraphQL we showcase a list of modes that are published on the npm registry. + +![image](https://user-images.githubusercontent.com/7490180/149446827-ece3aa65-32a5-439d-803b-0492ad964a42.png) + +The README file of each mode repository is fetched and shown in its detailed page + +![image](https://user-images.githubusercontent.com/7490180/150454823-14fe56e5-0327-494a-ac36-e55f9ee88a87.png) + + +# Background and References + +- OHIF: https://ohif.org/ +- OHIF-v3 Demo: https://v3-demo.ohif.org/ +- OHIF Documentation: https://v3-docs.ohif.org/ diff --git a/PW36_2022_Virtual/Projects/OHIFonGCP/README.md b/PW36_2022_Virtual/Projects/OHIFonGCP/README.md new file mode 100644 index 000000000..820cf299a --- /dev/null +++ b/PW36_2022_Virtual/Projects/OHIFonGCP/README.md @@ -0,0 +1,59 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# OHIF deployment using Google Cloud Platform and Firebase + +## Key Investigators + +- Andrey Fedorov (BWH) +- Igor Octaviano (Radical) +- Deepa Krishnaswamy (BWH) +- Bill Longabaugh (Institute for Systems Biology) +- Khaled Younis (Philips) +- Steve Pieper (Isomics) + + +# Project Description + +This project aims to document the instructions of deploying [OHIF Viewer](https://github.com/OHIF/Viewers) v2 on [Google Cloud](https://cloud.google.com/sdk/docs/install) as a [FireBase](https://firebase.google.com/) hosted application. Such hosted OHIF Viewer can be used to connect the viewer to Google Healthcare DICOM stores. The main motivation for this is to support users analyzing the data available in [NCI Imaging Data Commons (IDC)](https://imaging.datacommons.cancer.gov) in visualizing results of IDC data analysis. + +## Objective + + +1. As simple as possible instructions of deploying OHIF Viewer +2. Collect user feedback + + +## Approach and Plan + + + +1. Go over the prerequisites (GCP OAuth, Firebase) +2. Clean up the [Colab notebook](https://colab.research.google.com/drive/1PbYm6HVgsXaUYrcujBr_bPWS5hrBMSUW?usp=sharing) draft prepared earlier +3. Prepare examples of populating a GCP Healthcare DICOM store with analysis results and visualizing those +4. Update [IDC documentation](https://learn.canceridc.dev/) +5. Collect feedback from anyone interested + +## Progress and Next Steps + +Tutorial for the most part is completed, and (almost!) ready for testing - see [here](https://docs.google.com/document/d/1v4Syu_yOV6yH--QBLGzsL9fJ7-XyD1CnQu4iTIoPVD8/edit?usp=sharing) the google doc, which links to 2 colab notebooks that are intended to allow you to deploy the viewer without having to install any of the dev tools on your computer, and populate a cloud-based DICOM store. + +Unfortunately, there are outstanding issues: +* Deepa encountered an error described in [this thread](https://discourse.canceridc.dev/t/google-cloud-deployment-of-the-ohif-viewer/246/2) - Andrey cannot explain this +* Andrey ran into a permissions issues while populating a DICOM store in [this notebook](https://colab.research.google.com/drive/1KwvAuBmTRKyt8PrYKUE5nDTZcwkWEEdc?usp=sharing) - amazingly, this also has not been explained yet, even with all the experience Bill L put into this +* on the second attempt to complete, Deepa ran into "Error 500: Internal error" deploying with Firebase + + +# Illustrations + + + +# Background and References + +* [NCI Imaging Data Commons (IDC)](https://imaging.datacommons.cancer.gov) +* [OHIF Viewer](https://github.com/OHIF/Viewers) +* [IDC tutorial Google Colab notebooks](https://learn.canceridc.dev/cookbook/notebooks) + + diff --git a/PW36_2022_Virtual/Projects/PRISMRendering/README.md b/PW36_2022_Virtual/Projects/PRISMRendering/README.md new file mode 100644 index 000000000..5432422c8 --- /dev/null +++ b/PW36_2022_Virtual/Projects/PRISMRendering/README.md @@ -0,0 +1,67 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# PRISM Volume Rendering + +## Key Investigators + +- Simon Drouin, ETS Montreal +- Steve Pieper, Isomics, Cambridge MA, USA +- Andrey Titov, ETS Montreal +- Rafael Palomar, Oslo University Hospital / NTNU, Norway + +# Project Description + +The goal of this project is to enable the development of advanced 3D rendering techniques in Slicer. The goal is to facilitate access to GPU shaders and enable GPU-based filtering in Slicer by improving shader access multipass rendering in VTK and Slicer. The [PRISM Module](https://github.com/ETS-vis-interactive/SlicerPRISMRendering) in Slicer will serve as a test environment for the new capabilities. + +## Long-term Objective + +1. Facilitate the development and debugging of GPU shaders for Slicer +2. Extend the principles introduced in the PRISM module to surface rendering and other types of rendering +4. Integrate GPU filters in Slicer and connect them with volume rendering in such a way that filtered volumes do not have to be transfered back to CPU memory before rendering. See work by Kyle Sunderland on VTK GPU image filters (branch [here](https://github.com/Sunderlandkyl/VTK/commits/vtkGPUImageFilter3)). +5. Explore custom rendering to simplify integration with the vtk render process. Prior work includes: + * Python scripted Actor/Mappers: https://www.slicer.org/wiki/Slicer3:Python:ScriptedActor + * SimpleMapper: https://github.com/IbisNeuronav/Ibis/tree/master/IbisVTK/vtkExtensions + +## PW36 Objective + +1. Adapt the PRISMRendering module to the new Markup interface in Slicer 5. +2. Enable opening shaders with tags in a text editor while running Slicer + * Previous efforts by Simon Drouin were made to facilitate shader debugging by leaving tags in the shader code. Code is available in [this branch](https://gitlab.kitware.com/drouin-simon/vtk/-/tree/volume-shader-readability). + * In vtkShaderProgram class, debug functionality is available by setting the string variable FileNamePrefixForDebugging, which loads a shader from a file before rendering or dumps the shader to a file if doesn't already exists. However, this functionality is private. Mappers should have public functions to enable this debugging mechanism. +4. Generalize the mechanism that allows the VolumeRendering module to store vtkShaderProperties in the display node to obtain the same behavior with the Models module. + +## Progress and Next Steps + +### Adaption code to new Markup interface +1. Identified documentation on changes here: + * [Migration guide](https://www.slicer.org/wiki/Documentation/Nightly/Developers/Tutorials/MigrationGuide#Markups) + * [Infrastructure changes](https://www.slicer.org/wiki/Documentation/Labs/Improving_Markups) +2. The Region of Interest (ROI) of the volume was transformed to use the new Markups Module with native scaling and rotations. This also fixed some of the issues that caused the module to crash. + +### Generalizing vtkShaderProperties +1. Move the management of vtkShaderProperty object from vtkMRMLVolumeRenderingDisplayNode to base class vtkMRMLDisplayNode +2. TODO: Find out if the base class of displayable manager able to take over the assignment vtkShaderProperty to view actors to replace the work of vtkMRMLVolumeRenderingDisplayableManager? + +### Future of Slicer advanced rendering +A discussion between Steve Piper, Rafael Palomar, Simon Drouin and Andrey Titov has allowed to identify a few requirements for the future of rendering in VTK and Slicer: +1. Allow for GPU preprocessing pipelines, available for volumes, geometry and textures. +2. Allow for an arbitrary number of textures, scalar and vector fields that can easily be fed into mappers and easily accessed in shaders. +3. An arbitrary number of transfer functions can be fed into mappers and easily accessed in shaders. + +# Illustrations + +PRISM - Markup ROI: +![PRISM_Markups](images/PRISM-Markups.PNG?raw=true) + +PRISM - Markup ROI (rotated and scaled): +![PRISM_Markups_rotated](images/PRISM-Markups_rotated.PNG?raw=true) + +# Background and References + + +- PRISM Module [GitHub repository](https://github.com/ETS-vis-interactive/SlicerPRISMRendering). +- [Original article](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0193636) about the PRISM framework that served as a basis to develop the PRISM module in Slicer +- Previous project weeks + - [PW35](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/PRISM_volume_rendering/) + - [PW30](https://projectweek.na-mic.org/PW30_2019_GranCanaria/Projects/GLSLShaders/) + - [PW28](https://projectweek.na-mic.org/PW28_2018_GranCanaria/Projects/MultiVolumeRendering/) diff --git a/PW36_2022_Virtual/Projects/PRISMRendering/images/PRISM-Markups.PNG b/PW36_2022_Virtual/Projects/PRISMRendering/images/PRISM-Markups.PNG new file mode 100644 index 000000000..f377194a2 Binary files /dev/null and b/PW36_2022_Virtual/Projects/PRISMRendering/images/PRISM-Markups.PNG differ diff --git a/PW36_2022_Virtual/Projects/PRISMRendering/images/PRISM-Markups_rotated.PNG b/PW36_2022_Virtual/Projects/PRISMRendering/images/PRISM-Markups_rotated.PNG new file mode 100644 index 000000000..f41619c29 Binary files /dev/null and b/PW36_2022_Virtual/Projects/PRISMRendering/images/PRISM-Markups_rotated.PNG differ diff --git a/PW36_2022_Virtual/Projects/PRISMRendering/images/README.md b/PW36_2022_Virtual/Projects/PRISMRendering/images/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/PW36_2022_Virtual/Projects/README.md b/PW36_2022_Virtual/Projects/README.md new file mode 100644 index 000000000..ee5a3ba63 --- /dev/null +++ b/PW36_2022_Virtual/Projects/README.md @@ -0,0 +1,18 @@ +# How to create a new project + + +- Post questions about the project idea and team on the [Project Week forum][forum], our communication mechanism as of PW28. +- When you are ready, add a new entry in the list of **Projects** by creating a new `README.md` file in subfolder in `Projects` folder, and copying contents of [project description template][project-description-template] file into it. Step-by-step instructions for this are: + +1. Open [project description template][project-description-template] and copy its full content to the clipboard +1. Go back to [Projects](https://github.com/NA-MIC/ProjectWeek/tree/master/PW36_2022_Virtual/Projects) folder on GitHub +1. Click on "Create new file" button +1. Type `YourProjectName/README.md` +1. Paste the previously copied content of project template page into your new `README.md` +1. Update at least your project's __title, key investigators, project description sections__ +1. Add a link to your project to the [main project list](..#projects-how-to-add-a-new-project) + +Note: some steps above may require creating a [pull request](https://help.github.com/articles/creating-a-pull-request/) until your account is given write access. + +[forum]: https://discourse.slicer.org/c/community/project-week +[project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW30_2019_GranCanaria/Projects/Template/README.md diff --git a/PW36_2022_Virtual/Projects/ROS-MED/README.md b/PW36_2022_Virtual/Projects/ROS-MED/README.md new file mode 100644 index 000000000..14f2906a5 --- /dev/null +++ b/PW36_2022_Virtual/Projects/ROS-MED/README.md @@ -0,0 +1,54 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Integration of ROS and 3D Slicer using OpenIGTLink + +## Key Investigators + +- Junichi Tokuda (BWH) +- Tamas Ungi (Queen's) +- Simon Leonard (JHU) +- Axel Krieger (JHU) +- Mark Fuge (UMD) +- Yiwei Jiang (WPI) + +# Project Description + +We have been developing [ROS-IGTL-Bridge](https://rosmed.github.io), which is an interface for Robot Operating System (ROS) to exchange data with 3D Slicer. +See [the project page](https://rosmed.github.io) for detail. + +![ROSMED](rosmed_ismr_2021_2.jpg) + +## Objective + +There are three goals for this project week: + +1. Refine the tutorial page, which was created for International Symposium on Medical Robotics (ISMR) 2021. +2. Document the virtual platform (i.e., Docker) used in the tutorial. +3. Ask for feedback from the 3D Slicer community + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW36_2022_Virtual/Projects/ROS-MED/rosmed_ismr_2021_1.jpg b/PW36_2022_Virtual/Projects/ROS-MED/rosmed_ismr_2021_1.jpg new file mode 100644 index 000000000..b7a7d8352 Binary files /dev/null and b/PW36_2022_Virtual/Projects/ROS-MED/rosmed_ismr_2021_1.jpg differ diff --git a/PW36_2022_Virtual/Projects/ROS-MED/rosmed_ismr_2021_2.jpg b/PW36_2022_Virtual/Projects/ROS-MED/rosmed_ismr_2021_2.jpg new file mode 100644 index 000000000..e7f4064d1 Binary files /dev/null and b/PW36_2022_Virtual/Projects/ROS-MED/rosmed_ismr_2021_2.jpg differ diff --git a/PW36_2022_Virtual/Projects/SkinSegmentation/README.md b/PW36_2022_Virtual/Projects/SkinSegmentation/README.md new file mode 100644 index 000000000..3e0d26a46 --- /dev/null +++ b/PW36_2022_Virtual/Projects/SkinSegmentation/README.md @@ -0,0 +1,58 @@ + +# Skin Segmentation on MR to facilitate NousNav Registration + +## Key Investigators + +- Reuben Dorent (King's College London, UK) +- Tina Kapur (Brigham and Women's Hospital, USA) +- Sarah Frisken (Brigham and Women's Hospital, USA) +- Mohammad Jafari (Brigham and Women's Hospital, USA) +- Samantha Horvath (Kitware) +- Jean-Christophe Fillion-Robin (Kitware) +- Harneet Cheema (Brigham and Women's Hospital, USA) +- Fryderyk Kögl (BWH, TUM) + +# Project Description +Neuronavigation systems allow for visualization of pre-operative images and planning information to estimate the precise location of target surgical areas. Patient-to-image mapping is a key step in the workflow of these neuronavigation systems. Registration approaches typically rely on landmarks on pre-operative images as well as on the patient in the operating room (OR) [1]. An alternative approach would be to directly map the patient's skin surface [2]. While extracting the skin surface of the patient in the OR can be performed using existing technologies (e.g., a pointer, a laser) [3], automatic skin surface extraction on scans remains an open problem. This project aims at developing an automated skin segmentation tool for pre-operative scans. + + +## Objective + + + +1. Create a database with manual annotations of the skin on T1w scans. +2. Develop a basic deep learning approach to perform skin segmentation. +3. Integrate the framework in Slicer. + +## Approach and Plan + + + +1. Develop a method that is: 1/ robust to different acquisition protocols (e.g., scanner manufacturer, slice thickness, acquisition orientation); 2/ robust to different imaging modalities (e.g. CT, T1w MR, T2-FLAIR MR); relatively fast (~1-2min max). +5. Assess the performance of the proposed technique. +6. Develop a Slicer module with the pre-trained model. + +## Progress and Next Steps + + + +1. The algorithm has been developed. +2. The Slicer module is now available [here](https://github.com/ReubenDo/SlicerSkinExtractor/) +3. The algorithm still needs to be benchmarked and qualitatively assessed + + +# Illustrations + + +![Example of skin surface extraction using Slicer](./misc/example.png) + + +# Background and References + + +1. Gerard, Ian J et al. “New Protocol for Skin Landmark Registration in Image-Guided Neurosurgery: Technical Note.” Neurosurgery vol. 11 Suppl 3 (2015): 376-80; discussion 380-1. doi:10.1227/NEU.0000000000000868 +2. Shamir, R. R., Freiman, M., Joskowicz, L., Spektor, S., & Shoshan, Y. (2009). Surface-based facial scan registration in neuronavigation procedures: a clinical study, Journal of Neurosurgery JNS, 111(6), 1201-1206. Retrieved Jan 3, 2022, [Link](https://thejns.org/view/journals/j-neurosurg/111/6/article-p1201.xml) +3. [BrainLab Softouch®](https://www.equiphos.com/wp-content/uploads/2015/05/Flyer-Softtouch-regiitration.pdf) diff --git a/PW36_2022_Virtual/Projects/SkinSegmentation/misc/example.png b/PW36_2022_Virtual/Projects/SkinSegmentation/misc/example.png new file mode 100644 index 000000000..3fb404675 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SkinSegmentation/misc/example.png differ diff --git a/PW36_2022_Virtual/Projects/SkinSegmentation/misc/slicer_screenshot.png b/PW36_2022_Virtual/Projects/SkinSegmentation/misc/slicer_screenshot.png new file mode 100644 index 000000000..b890e9467 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SkinSegmentation/misc/slicer_screenshot.png differ diff --git a/PW36_2022_Virtual/Projects/Slicer5/README.md b/PW36_2022_Virtual/Projects/Slicer5/README.md new file mode 100644 index 000000000..d923212e5 --- /dev/null +++ b/PW36_2022_Virtual/Projects/Slicer5/README.md @@ -0,0 +1,47 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer 5 Release Preparation + +## Key Investigators + +- Sam Horvath +- Jean-Christophe Fillion-Robin +- Steve Pieper +- Andras Lasso + +# Project Description + +Finalizing the remaining tasks for the Slicer 5 release by the end of Project Week! + +[Remaining Issues](https://github.com/Slicer/Slicer/milestone/1) + + +## Objective + + + +- Finish pending tasks on the Slicer 5 milestone +- Prepare for SLicer 5 release +- Cut Slicer 5 release end of week + + + + +## Approach and Plan + + + +Issues have already been assigned to community members, but please reach out if there is something in the milestone that you could help with (please post to the issue on GitHub). + + +## Progress and Next Steps + + + +- Updates made to Slicer to prepare for Python update +- Update CMake and VS on Slicer factories +- Reviewing and retargeting issues +- Adding disclaimers to older wiki pages + +Next steps: +- Finalize python update diff --git a/PW36_2022_Virtual/Projects/SlicerCollaboration/README.md b/PW36_2022_Virtual/Projects/SlicerCollaboration/README.md new file mode 100644 index 000000000..1258ed0b6 --- /dev/null +++ b/PW36_2022_Virtual/Projects/SlicerCollaboration/README.md @@ -0,0 +1,81 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Collaborative Slicer session + +## Key Investigators + +- Csaba Pinter (EBATINCA) +- Mónica García (ULPGC - Universidad de Las Palmas de Gran Canaria) +- Jean-Christophe Fillion-Robin (Kitware) + +# Project Description + + + +Slicer module (packaged in an extension) for connecting two or more running Slicer sessions one being the server the other(s) the client(s). The module synchronizes properties and data via OpenIGTLink that have not been supported so far, but are needed for shared session, for example all markup types and display properties of any nodes. This is one of the main components needed for AR/VR collaboration. + +## Objective + + + +1. Objective A. Showcase current progress +1. Objective B. Collect feedback, add select features +1. Objective C. Fix remaining issues + +## Approach and Plan + + + +1. List all currently used node types in Slicer and decide which are supported, and those that are not how will be supported. Create plan. Special attention to: + 1. Segmentations + 1. Subject hierarchy (folders, reparenting etc.) +3. Remote connections + 1. Try connecting computers on the same network + 1. Brainstorm about how to facilitate connection of instances without fixed IP +4. Integrate necessary commit into SlicerOpenIGTLink + +## Progress and Next Steps + + + +1. Demo and discussion about the extension and features, talk about use cases :heavy_check_mark: +1. SlicerOpenIGTLink contribution integrated, see [commit](https://github.com/openigtlink/SlicerOpenIGTLink/commit/a28d381af4542063f60e885c0505e45fbd5e9006) :heavy_check_mark: +1. List of currently used node types in Slicer. + 1. Supported by OpenIGTLink: + - Models + - Linear Transforms + - Text + - Volumes + - Fiducial Markups + 1. Not supported by OpenIGTLink, but supported by SlicerCollaboration: + - Display nodes + - Remaining Markups + - Parent transforms + 1. To be supported by SlicerCollaboration: + - Segmentations + - Subject hierarchy structure + +1. Discussion about how to solve remote connections through OpenIGTLink with Slicer running in a computer with no externally visible fixed IP. Suggested solutions: + 1. Setup port forwarding on your router. This way you can call this address from outside and the router automatically redirects traffic to the specified computer on the local network. Downsides: the setup needs to be redone every time your router allocates a new local ip to your computer. It is usually not possible to setup like that in the office unless you have control of the network node going out. + 2. See if there's a way to make a SlicerSharing app for one of the services that already does real time sharing. Teams and Zoom both have hooks for apps. + 3. VNC connect, TeamViewer, etc. use a rendezvous server that allows clients and servers to find each other and connect directly. See [here](https://stackoverflow.com/questions/53479668/how-to-make-2-clients-connect-each-other-directly-after-having-both-connected-a). + 4. Use a VPN server installed somewhere (Ebatinca or a University) that leases IP addresses that could be private but allow interconnection. This way, a possible collaborative-slicer user only needs to install the VPN client and connect. + +# Illustrations + + + +Screenshot showing VR usage: +![SlicerCollaboration screenshot VR](SlicerCollaboration_VR.PNG) + +Screenshot showing server/client: +![SlicerCollaboration screenshot server/client](SlicerCollaboration_ServerClient.PNG) + +# Background and References + + + +* [GitHub repository](https://github.com/EBATINCA/SlicerCollaboration) diff --git a/PW36_2022_Virtual/Projects/SlicerCollaboration/SlicerCollaboration_ServerClient.PNG b/PW36_2022_Virtual/Projects/SlicerCollaboration/SlicerCollaboration_ServerClient.PNG new file mode 100644 index 000000000..44f4354b0 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerCollaboration/SlicerCollaboration_ServerClient.PNG differ diff --git a/PW36_2022_Virtual/Projects/SlicerCollaboration/SlicerCollaboration_VR.PNG b/PW36_2022_Virtual/Projects/SlicerCollaboration/SlicerCollaboration_VR.PNG new file mode 100644 index 000000000..5330317ff Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerCollaboration/SlicerCollaboration_VR.PNG differ diff --git a/PW36_2022_Virtual/Projects/SlicerInternationalization/README.md b/PW36_2022_Virtual/Projects/SlicerInternationalization/README.md new file mode 100644 index 000000000..298bb3c5b --- /dev/null +++ b/PW36_2022_Virtual/Projects/SlicerInternationalization/README.md @@ -0,0 +1,62 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer Internationalization + +## Key Investigators & Translators + +- Sonia Pujol (Brigham and Women's Hospital, Harvard Medical School, Boston, MA, USA) +- Steve Pieper (Isomics Inc., Cambridge, MA, USA) +- Andras Lasso (Queen's University, Kingston, Canada) +- Mamadou Camara (Université Cheikh Anta Diop, Dakar, Senegal) +- Ibrahima Fall (Université Cheikh Anta Diop, Dakar, Senegal) +- Samba Diaw (Université Cheikh Anta Diop, Dakar, Senegal) +- Aissa Mboup (Université Cheikh Anta Diop, Dakar, Senegal) +- Jean-Christophe Fillion-Robin (Kitware Inc., Chapel Hill, NC, USA) +- Fryderyk Kögl (BWH, TUM) +- Attila Nagy (University of Szeged, Department of Medical Physics and Informatics, Hungary) +- Adriana Vilchis González (Universidad Autónoma del Estado de México, México) +- Theodore Aptekarev (Slicer Community, Russia) +- Pedro Moreira (Brigham and Women’s Hospital/Harvard Medical School, USA) +- Andrey Fedorov (Brigham and Women’s Hospital/Harvard Medical School, USA) +- Ahmedou Moulaye Idriss, Faculty of Medicine, University of Nouakchott Al Asriya , Mauritania +- Yahya Tfeil, Faculty of Medicine, University of Nouakchott Al Asriya, Mauritania + +# Project Description + + +The goal of the project is to develop a novel software infrastructure to enable the localization of 3D Slicer to multiple languages. The project is funded through an Essential Open Source Software for Science grant of the Chan Zuckerberg Initiative. + +## Objective + + + +To identify members of the Slicer community interested in contributing translations in their native language. + +## Approach and Plan + + +1. Extraction of Qt strings from Slicer +3. Daily translation hackathons with members of the Slicer community from Senegal, Mauritania, Hungary, Ukraine, Germany, Russia, Brazil, Mexico, USA and France + + +## Progress and Next Steps + + + +1. New translators for Hungarian, Spanish, Portuguese, Arabic, German and Polish +2. Translation of the user interface of the Volumes module into seven different languages in SlicerCrowdin +3. Implementation of the Language selector in Slicer + + + + +# Illustrations + +VolumesTranslations2 +LanguageSelector2 + + +# Background and References + +- [3D Slicer in My Language](https://chanzuckerberg.com/eoss/proposals/3d-slicer-in-my-language-internationalization-and-usability-improvements/) +- https://github.com/Slicer/Slicer/wiki/I18N diff --git a/PW36_2022_Virtual/Projects/SlicerLiver/README.md b/PW36_2022_Virtual/Projects/SlicerLiver/README.md new file mode 100644 index 000000000..fbc86594e --- /dev/null +++ b/PW36_2022_Virtual/Projects/SlicerLiver/README.md @@ -0,0 +1,76 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer-Liver + +## Key Investigators + +- Rafael Palomar (Oslo Unviersity Hospital/NTNU, Norway) +- Gabriella d'Albenzio (Oslo University Hospital, Norway) +- Ole Vegard Solberg (SINTEF, Norway) +- Geir Arne Tangen (SINTEF, Norway) +- Javier Pérez de Frutos (SINTEF, Norway) + +# Project Description + +This project will continue the development of the *Slicer-Liver* extension +that will be developed through the [ALive project](https://alive-research.no). +The objective of the Slicer-Liver extension is to provide researchers +with tools to perform liver analytics towards planning of liver interventions +(resections, ablations). At this point in the project we need to port early +prototypes of our resection planning algorithms into 3D Slicer. + +![3D Bezier Surface Markup](screenshot.png) + +[Early prototype of the resection planning module](https://youtu.be/7M3DULQp81k) + +## Objectives + +1. Integrate the components developed during the [last + ProjectWeek](https://github.com/NA-MIC/ProjectWeek/tree/master/PW35_2021_Virtual/Projects/Slicer-Liver + "Slicer-Liver in the last ProjectWeek") in a resection planning working prototype. + +2. Add a Slicer module for computation of liver vascular territories. + +## Approach and Plan + +### Liver resection planning module + +1. Integration of resection deformation and resection initialization. +1. Add complex interactions (move groups of control points). +1. Adding load/saving functionality. +1. Development of distance measurements visualized in the resections using shaders. +1. Add a GUI to manage resections. + +### Liver analysis module + +1. Creation of a new Slicer module. +1. Implementation of end-points placement (markups). +1. GUI to manage segments (add, remove, edit). +1. Loading/saving vascular territories. + +## Illustrations + +![3D Bezier Surface Markup](bezier_surface_markup.png) + +![Resection initialization](resection_initialization.png) + +![Resection planning](resection_planning.png) + +## Progress and Next Steps + +During Project Week, we have been able to add *real-time computation of safety +margins* to our resection planning module, as well as create a new module for +*computation of vascular territories* + +[![Alt text](https://img.youtube.com/vi/--dIcE97RVQ/0.jpg)](https://www.youtube.com/watch?v=--dIcE97RVQ) + +Our next step is to work on the user interface and the loading/saving +functionality needed to make this a complete Slicer extension. + +# Background and References +1. [Slicer-Liver PW35](https://github.com/NA-MIC/ProjectWeek/tree/master/PW35_2021_Virtual/Projects/Slicer-Liver + "Slicer-Liver in the last ProjectWeek") (June 2021) +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/NorMIT-Plan/) (january 2020) +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW34_2020_Virtual/Projects/SlicerLiverAnalysis/) (December 2020) +1. Palomar, Rafael, et al. "A novel method for planning liver resections using deformable Bézier surfaces and distance maps." Computer Methods and Programs in Biomedicine 144 (2017): 135-45. +1. Palomar, Rafael, et al. "Surface reconstruction for planning and navigation of liver resections." Computerized Medical Imaging and Graphics 53 (2016): 30-42. diff --git a/PW36_2022_Virtual/Projects/SlicerLiver/bezier_surface_markup.png b/PW36_2022_Virtual/Projects/SlicerLiver/bezier_surface_markup.png new file mode 100644 index 000000000..9d1fdb191 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerLiver/bezier_surface_markup.png differ diff --git a/PW36_2022_Virtual/Projects/SlicerLiver/resection_initialization.png b/PW36_2022_Virtual/Projects/SlicerLiver/resection_initialization.png new file mode 100644 index 000000000..dcd1f430f Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerLiver/resection_initialization.png differ diff --git a/PW36_2022_Virtual/Projects/SlicerLiver/resection_planning.png b/PW36_2022_Virtual/Projects/SlicerLiver/resection_planning.png new file mode 100644 index 000000000..987f1d8f0 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerLiver/resection_planning.png differ diff --git a/PW36_2022_Virtual/Projects/SlicerLiver/screenshot.png b/PW36_2022_Virtual/Projects/SlicerLiver/screenshot.png new file mode 100644 index 000000000..5310aa739 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerLiver/screenshot.png differ diff --git a/PW36_2022_Virtual/Projects/SlicerPipelines/CaseIterator.png b/PW36_2022_Virtual/Projects/SlicerPipelines/CaseIterator.png new file mode 100644 index 000000000..48683a54e Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerPipelines/CaseIterator.png differ diff --git a/PW36_2022_Virtual/Projects/SlicerPipelines/PipelineCreator.png b/PW36_2022_Virtual/Projects/SlicerPipelines/PipelineCreator.png new file mode 100644 index 000000000..dff6acfc6 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerPipelines/PipelineCreator.png differ diff --git a/PW36_2022_Virtual/Projects/SlicerPipelines/README.md b/PW36_2022_Virtual/Projects/SlicerPipelines/README.md new file mode 100644 index 000000000..39800810d --- /dev/null +++ b/PW36_2022_Virtual/Projects/SlicerPipelines/README.md @@ -0,0 +1,65 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerPipelines + +## Key Investigators + +- Connor Bowley (Kitware, USA) +- Sam Horvath (Kitware, USA) + +# Project Description + +A new extension, SlicerPipelines, is currently being developed and allows for GUI create-able modules (pipelines) in 3DSlicer. The term "pipeline" in this context is a module whose logic has a specific interface (or is wrapped to meet the interface), which allows for one MRML node input to one MRML node output algorithms. + +In general, the SlicerPipelines extension is meant to help improve efficiency when dealing with simple workflows. Two main modules currently exist, a PipelineCreator module that allows for the creation of pipelines, and a PipelineCaseIterator module that allows for automated running of a pipeline over all files in a directory. + +![Pipeline Creator](PipelineCreator.png) + +![Pipeline Creator Select Module](SelectModuleForPipeline.png) + +![Pipeline Case Iterator](CaseIterator.png) + +## Objective + + + +1. Increase usefulness and useability of the extension +1. Allow easier creation of new pipelines from existing modules + +## Approach and Plan + + + +1. Wrap more existing modules as pipelines to increase usefulness of the extension + - Pipelines to convert between MRML types (segmentation to model, scalar volume to label map volume, etc). Some of these already exist, but finish them out for the core MRML types. +1. Update [existing wrapper generator for CLI modules](https://github.com/Connor-Bowley/SlicerPipelines/blob/0db7dcb8bf05e14307a2ee7dfdcb009eb0a6c1b0/PipelineModules/PipelineModulesLib/CLIModuleWrapping.py) to be able to be used for more CLIs. Currently implementation is limited in types it supports. +1. Add ability to load newly created pipeline module when it is created, and add it to the additional module paths + +## Progress + + + +1. Added new pipelines + 1. Export Segmentation to Model + 2. Export LabelMap to Scalar Volume + 3. Export Scalar Volume to LabelMap + 4. Export LabelMap to Segmentation +2. Added ability to load new created pipeline module on creation and add it to the additional module paths (similar to ExtensionWizard) +3. Update select module pop up to default the next input type to the last output type + +## Next Steps +1. Pipeline for Surface Wrap Solidify segment editor effect +2. Add to extension manager + + + + + +# Background and References + +- [Discourse post on SlicerPipelines](https://discourse.slicer.org/t/pipelines-in-3d-slicer/20107) +- [Main Github Repository](https://github.com/KitwareMedical/SlicerPipelines) +- [Github fork actively being developed](https://github.com/Connor-Bowley/SlicerPipelines) diff --git a/PW36_2022_Virtual/Projects/SlicerPipelines/SelectModuleForPipeline.png b/PW36_2022_Virtual/Projects/SlicerPipelines/SelectModuleForPipeline.png new file mode 100644 index 000000000..2691c18d2 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerPipelines/SelectModuleForPipeline.png differ diff --git a/PW36_2022_Virtual/Projects/SlicerTMS_Module/README.md b/PW36_2022_Virtual/Projects/SlicerTMS_Module/README.md new file mode 100644 index 000000000..f81701160 --- /dev/null +++ b/PW36_2022_Virtual/Projects/SlicerTMS_Module/README.md @@ -0,0 +1,81 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Real-time visualization for transcranial magnetic stimulation (TMS) + +## Key Investigators + +- Loraine Franke (University of Massachusetts Boston) +- Lipeng Ning (BWH & Harvard Medical School) +- Yogesh Rathi (BWH & Harvard Medical School) +- Steve Pieper (Isomics, Inc.) +- Daniel Haehn (University of Massachusetts Boston) + +# Project Description + +Transcranial magnetic stimulation is a nonivasive procedure used for treating depression with magnetic and electric fields to stimulate nerve cells. +A TMS coil is slowly moved over the subject's head suface to target certain areas in the brain. +Our project aims to develop a deep-learning powered software for real-time E-Field prediction and a visualization of TMS within 3D Slicer. + +## Objective + +Real-time visualization of an electric field (E-field) for transcranial magnetic stimulation (TMS) on the brain surface, possible visualization through an AR app (over browser). + +## Approach and Plan +- TMS module mapping NifTi file onto brain mesh +- OpenIGTLinkIF used to transfer data into 3D Slicer +- Next steps include connecting 3DSlicer to the web browser (via WebSocket) and +- Using a mobile device via WebXR to view/control 3D Slicer and later the TMS module + +## Progress and Next Steps + +1. Connecting Slicer with a mobile phone via SlicerWeb (https://github.com/pieper/SlicerWeb) +2. Explore WebXR: WebXR needs https, so either generate local certificate (https://blog.anvileight.com/posts/simple-python-http-server/) and make modifications in the SlicerWeb WebServer.py file. OR alternatively run with USB cable connected to computer (USB debugging in developer tools for Android. iPhone requires an Apple Developer Account for this) +3. Evaluating different approaches for AR with WebXR by testing different libraries: ThreeJS, A-Frame or React. +4. Visualizing our created 'butterfly' TMS coil in WebXR: A-Frame only allows obj and gltf file formats as models, Threejs also more like stl files. A-Frame allowed only image-, location- and marker-based AR tracking. Recently, AR.js enables markerless AR. Using WebXR without a marker is still in early stages of development. +5. Moving the TMS coil and scene by tapping on it on the mobile device. + +Next steps: +- More precise interaction with objects in AR on the phone: scaling and rotating of the coil via mobile screen gestures (Current WebXR approaches only allow static interaction without handling user's finger gestures on screen.) +- Retrieving user's and coil position coordinates. +- Send the current coil position coordinates into Slicer via SlicerWeb connection. + +## Illustrations + +- TMS coil model visualized in AR with A-Frame: + +![Moving TMS coil in AR](./render_coil_A-frame.gif) + + +- Current TMS module inside Slicer: + +![Slicer Module](./TMSModule_normedcoil_fmricolor.png) + + +-Visualization goal in Slicer from another software: + +![Brain surface and DT](./tmsonbrain.png) + + +# Background and References + +## Infos for running WebXR: + +Phones need a Depth sensorto run AR/VR. A list of supported devices can be found here: https://developers.google.com/ar/devices + +On an Android Phone via USB: +- PlayStore: Download Google VR Services and Google AR Services App +- Update Chrome/Camera apps etc. +- On the phone: Enable Developer tools (https://developer.android.com/studio/debug/dev-options) and USB debugging (description here: https://developer.chrome.com/docs/devtools/remote-debugging/) +- Run chrome://inspect#devices in the browser on your computer and it should detect USB connected devices + +For iPhone: +- Mozilla offers a WebXR Emulator that can be downloaded from the Apple Store for any iPhone and iPad: https://labs.mozilla.org/projects/webxr-viewer/ + +## For Slicer TMS Module (see previous project week ![PW 35](https://github.com/NA-MIC/ProjectWeek/tree/master/PW35_2021_Virtual/Projects/TMS_Slicer_Module)): + +vtkProbeFilter: https://vtk.org/doc/nightly/html/classvtkProbeFilter.html +Moving fiducials with CPYY: https://gist.github.com/pieper/f9da3e0a73c70981b48d0747132526d5 + +Measure rendering time in 3D Slicer: +1. Getting renderer: https://slicer.readthedocs.io/en/latest/developer_guide/script_repository.html#access-vtk-views-renderers-and-cameras +2. Then applying renderer.GetLastRenderTimeInSeconds() diff --git a/PW36_2022_Virtual/Projects/SlicerTMS_Module/TMSModule_normedcoil_fmricolor.png b/PW36_2022_Virtual/Projects/SlicerTMS_Module/TMSModule_normedcoil_fmricolor.png new file mode 100644 index 000000000..dc87ba1ab Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerTMS_Module/TMSModule_normedcoil_fmricolor.png differ diff --git a/PW36_2022_Virtual/Projects/SlicerTMS_Module/render_coil_A-frame.gif b/PW36_2022_Virtual/Projects/SlicerTMS_Module/render_coil_A-frame.gif new file mode 100644 index 000000000..5d62acb45 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerTMS_Module/render_coil_A-frame.gif differ diff --git a/PW36_2022_Virtual/Projects/SlicerTMS_Module/tmsonbrain.png b/PW36_2022_Virtual/Projects/SlicerTMS_Module/tmsonbrain.png new file mode 100644 index 000000000..b81e904b5 Binary files /dev/null and b/PW36_2022_Virtual/Projects/SlicerTMS_Module/tmsonbrain.png differ diff --git a/PW36_2022_Virtual/Projects/SpineSegmentation/README.md b/PW36_2022_Virtual/Projects/SpineSegmentation/README.md new file mode 100644 index 000000000..eb7afa85c --- /dev/null +++ b/PW36_2022_Virtual/Projects/SpineSegmentation/README.md @@ -0,0 +1,58 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Spine Segmentation + +## Key Investigators + +- Ron Alkalay (Beth Isreal Deaconess, Boston) +- Steve Pieper (Isomics) +- Andres Diaz-Pinto (KCL) +- Juan Ruiz (Ebatinca, ULPGC) +- YOU + +# Project Description + +Investigate and implement methods to segment the human spine from CT scans. See [last Project Week's page for background](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/SpineSegmentation/). + +## Objective + +1. Ideal segmentation will independently segment and label the vertebral bodies. +2. We want the system to integrate with Slicer's segmentation infrastructure. +3. We think a deep learning approach using MONAILabel will be useful for this. + +## Approach and Plan + +1. Learn as much as possible about MONAILabel +2. Investigate [VerSe](https://arxiv.org/abs/2001.09193) and if possible port it to Slicer/MONAI +3. Figure out if/how we can use spine CTs from IDC for training. + +## Progress and Next Steps + + + +1. Held many productive discussions and worked on training with the VerSe public data +1. Exchanged notes with the other MONAI Label projects +1. Installing MONAI Label at BIDMC machines to train on cadeveric and patient spine scans +2. Plan to make single-vertebra models for faster training of high resolution models (tractable on smaller GPU memory footprint) + +# Illustrations + + +# Current effort +![image](https://user-images.githubusercontent.com/126077/150421004-2185ad15-02a1-47ba-be09-45f4921e6741.png) +![image](https://user-images.githubusercontent.com/126077/150421056-48c5cb5b-a328-4142-823a-8d8831efccd8.png) + + + +# Initial effort +![image](https://user-images.githubusercontent.com/126077/149805728-25491bc0-f2ea-4799-84b3-3289f58e4f8f.png) +![image](https://user-images.githubusercontent.com/126077/149805758-ed6f30da-2817-47fa-ad04-eedb10c5a9e8.png) + + +# Background and References + +* https://github.com/anjany/verse +* https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/SpineSegmentation/ diff --git a/PW36_2022_Virtual/Projects/StaticDicomWeb/README.md b/PW36_2022_Virtual/Projects/StaticDicomWeb/README.md new file mode 100644 index 000000000..2884a7026 --- /dev/null +++ b/PW36_2022_Virtual/Projects/StaticDicomWeb/README.md @@ -0,0 +1,61 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Static DICOM Generator + +## Key Investigators + +- Chris Hafey (AWS) +- Gang Fu (AWS) +- Jordan Kojouharov (AWS) +- Qing Liu (AWS) +- Dmitry Pavlov (AWS) +- Andres Diaz-Pinto (NVIDIA) +- Sachidanand Alle (NVIDIA) +- Steve Pieper (Isomics) +- YOU + +# Project Description + +In many cases, a user will have DICOM P10 files and want to access them for viewing or analysis purposes in AWS. +Today this is typically done by loading these images into an open source DICOM server such as Orthanc or DCM4CHEE and accessed via the included DICOMweb interface. +While these servers work, a simpler more scalable approach is possible for many access use cases by creating a static HTTP site in S3 with objects that correspond to DICOMweb responses. +Amazon S3 is a highly scalable, highly available object store that is perfect to build serverless applications, like serving DICOMWeb client requests from MONAILabel. +The primary outcome of this project to to demonstarte the usability of AI assissted annotations using MONAILabel and 3D Slicer, with DICOM P10 files on Amazon S3. +We will validate the usefullness of this solution using this [National Lung Screening Trial](https://wiki.cancerimagingarchive.net/display/NLST/National+Lung+Screening+Trial#5800702d1a85fbd42314c9eb5cdaef39d568cb8) dataset. + +## Objective + + + +1. Upload and convert large scale Lung CT scans in DICOM P10 files to Amazon S3 +2. Visualize and Annotate images using 3D Slicer +3. AI assisted annotations of spines in Lung CT scan + +## Approach and Plan + + + +1. Upload and convert Lung CT scans in DICOM P10 files to Amazon S3 +2. Train spine segementation model using Amazon SageMaker +3. Deploy model to MONAILabel +4. Visualize and Annotate images using 3D Slicer +5. AI assisted annotations of spines in Lung CT scan + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + +Here is the architecture diagram of this solution on AWS: + +![arch](./arch.jpg) + +# Background and References + +https://catalog.us-east-1.prod.workshops.aws/v2/workshops/ff6964ec-b880-45d4-bc1e-468b0c7fa854/en-US/ +https://wiki.cancerimagingarchive.net/display/NLST/National+Lung+Screening+Trial#5800702d1a85fbd42314c9eb5cdaef39d568cb8 diff --git a/PW36_2022_Virtual/Projects/StaticDicomWeb/arch.jpg b/PW36_2022_Virtual/Projects/StaticDicomWeb/arch.jpg new file mode 100644 index 000000000..83bbcb45e Binary files /dev/null and b/PW36_2022_Virtual/Projects/StaticDicomWeb/arch.jpg differ diff --git a/PW36_2022_Virtual/Projects/SupWMA/README.md b/PW36_2022_Virtual/Projects/SupWMA/README.md new file mode 100644 index 000000000..dd2eb281f --- /dev/null +++ b/PW36_2022_Virtual/Projects/SupWMA/README.md @@ -0,0 +1,73 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# A deep learning framework for superficial white matter parcellation, code release via SlicerDMRI + +## Key Investigators + +- Tengfei Xue (BWH & Usyd) +- Fan Zhang (BWH) +- Chaoyi Zhang (Usyd) +- Yuqian Chen (BWH & Usyd) +- Yang Song (UNSW) +- Nikos Makris (BWH) +- Yogesh Rathi (BWH) +- Weidong Cai (Usyd) +- Lauren J O'Donnell (BWH) + +# Project Description + + + +We propose a deep-learning-based framework, Superficial White Matter Analysis (SupWMA) (Xue et al 2022, ISBI), that performs an efficient and consistent parcellation of 198 SWM clusters from whole-brain tractography. We perform evaluation on a large tractography dataset with ground truth labels and on three independently acquired testing datasets from individuals across ages and health conditions. +In this project week, we work on releasing the code of SupWMA. We provide the trained model and testing samples for demonstration. + +## Objective + + + +1. Code cleaning +2. Release code and pre-trained model +3. Documentation and testing samples + +## Approach and Plan + + + +1. Release code and pre-trained model at: https://github.com/SlicerDMRI/SupWMA +2. Provide the instruction of SupWMA framework usage +3. Upload the testing sample and demonstration script. + +## Progress and Next Steps + + + +1. Code and pre-trained model were released +2. Instruction, testing sample and demonstration script were provided +3. Update the training details to help user train SupWMA on custom data. +4. Intergate SupWMA into SlicerDMRI so that users can use it via Slicer interface. + +# Illustrations + + + + + + + +![overview](https://user-images.githubusercontent.com/56477109/150529616-652b889d-0738-4528-b9db-4eb3e6953ce0.png) + +![demo](https://user-images.githubusercontent.com/56477109/150544504-f8d5d42a-23cd-42dc-b97d-747fbe860f3d.png) + +![results](https://user-images.githubusercontent.com/56477109/150544562-ccbeb71f-ce8b-4de7-aae8-cb674c8242ab.png) + +# Background and References + + + + + +Tengfei Xue, Fan Zhang, Chaoyi Zhang, Yuqian Chen, Yang Song, Nikos Makris, Yogesh Rathi, Weidong Cai, Lauren J. O’Donnell. "SupWMA: consistent and efficient tractography parcellation of superficial white matter with deep learning." ISBI (2022). diff --git a/PW36_2022_Virtual/Projects/TMRCatheterNavigation/README.md b/PW36_2022_Virtual/Projects/TMRCatheterNavigation/README.md new file mode 100644 index 000000000..0a164d259 --- /dev/null +++ b/PW36_2022_Virtual/Projects/TMRCatheterNavigation/README.md @@ -0,0 +1,57 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Catheter Navigation for Slicer + +## Key Investigators + +- Wenran Cai (University of Tokyo) +- Kazuaki Hara (University of Tokyo) +- Rina Nagano (University of Tokyo) +- Junichi Tokuda (Brigham and Women's Hospital) + +# Project Description + + + +Intra-arterial chemotherapy requires knowing the position of the catheter tip in the body in order to insert the catheter accurately. Currently, the position is confirmed by X-ray irradiation during the operation. To reduce exposure and contrast agents, it is necessary to apply a new method to track the catheter tip. Electromagnetic tracking system with a TMR (Tunnel Magnetoresistance) sensor is helpful to track the catheter movement. Here, we developed an extension of 3D Slicer to connect 3D Slicer and our TMR tracking system. + +## Objective + + + +1. Create an extension that can show catheter path from an electromagnetic tracking with TMR sensors in real time. + +## Approach and Plan + + + +A vessel model will be reconstructed from a preoperative 3D CT. During the operation, real-time data from the sensor is imported into 3D Slicer via OpenIGTLink. A first version is already done. + +## Progress and Next Steps + + + +1. Connect TMR position tracker system to our extension via OpenIGTLink. +2. Testing. + +# Illustrations + + +- Extension interface + +![Extension interface](catheternavigation.png) + +- Experimental setup + +![Experimental setup](img1.png) +![Experimental setup](img2.png) +![Experimental setup](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW36__TMRCatheterNavigation__video.gif) + +# Background and References + + + +1. R.Nagano, etal. CARS 2021: Development of an electromagnetic catheter tracking system using TMR sensor for superselective intraarterial chemotherapy, 2021. Int J CARS 16, 1–119 (2021). diff --git a/PW36_2022_Virtual/Projects/TMRCatheterNavigation/catheternavigation.png b/PW36_2022_Virtual/Projects/TMRCatheterNavigation/catheternavigation.png new file mode 100644 index 000000000..17d7141c4 Binary files /dev/null and b/PW36_2022_Virtual/Projects/TMRCatheterNavigation/catheternavigation.png differ diff --git a/PW36_2022_Virtual/Projects/TMRCatheterNavigation/img1.png b/PW36_2022_Virtual/Projects/TMRCatheterNavigation/img1.png new file mode 100644 index 000000000..58e9b1dd0 Binary files /dev/null and b/PW36_2022_Virtual/Projects/TMRCatheterNavigation/img1.png differ diff --git a/PW36_2022_Virtual/Projects/TMRCatheterNavigation/img2.png b/PW36_2022_Virtual/Projects/TMRCatheterNavigation/img2.png new file mode 100644 index 000000000..329a11029 Binary files /dev/null and b/PW36_2022_Virtual/Projects/TMRCatheterNavigation/img2.png differ diff --git a/PW36_2022_Virtual/Projects/UKF/README.md b/PW36_2022_Virtual/Projects/UKF/README.md new file mode 100644 index 000000000..efa4e9c74 --- /dev/null +++ b/PW36_2022_Virtual/Projects/UKF/README.md @@ -0,0 +1,61 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Anatomically informed UKF tractography + +## Key Investigators + +- Fan Zhang (BWH, HMS) +- Yogesh Rathi (BWH, HMS) +- Lauren J O'Donnell (BWH, HMS) + +# Project Description + + + +In this project, we will include brain tissue segmentation maps into the existing unscented Kalman filter (UKF) framework (Malcolm et al 2010, IEEE TMI; Reddy et al 2016, Front. Neuroscience) to inform fiber tracking seeding and stopping. Segmentations of WM, GM and CSF are computed using a deep learning based method that performs tissue segmentation using diffusion MRI data (Zhang et al 2021, Neuroimage). The WM segmentation will be used for tractography seeding, and the GM/CSF segmentations will be used for tractography stopping. + +## Objective + + + +1. Add WM/GM/CSF segmentation maps. +1. Add seeding and stopping masks. +1. Improve input check to handle multiple input options. + +## Approach and Plan + + + +1. UI design for better usage of the seeding/stopping options +1. CLI help documention + +## Progress and Next Steps + + + +1. Coding part of the project is almost done. +1. Push requst to the master branch of [UKF](https://github.com/pnlbwh/ukftractography/pull/142) +1. Waiting for the final pull requst approval +1. Decide default (suggested) settings for each option +1. Testing on more datasets other than HCP data. + +UKF-comparison + +# Illustrations + + + +![UKF-WMGMCSF](https://user-images.githubusercontent.com/7855446/149682553-d16fef74-102a-4013-993b-bf1144b72521.png) + +# Background and References + + + +Zhang, F., Breger, A., Cho, K. I. K., Ning, L., Westin, C. F., O’Donnell, L. J., & Pasternak, O. (2021). Deep learning based segmentation of brain tissue from diffusion MRI. NeuroImage, 233, 117934. + +Reddy, C.P. and Rathi, Y., 2016. Joint Multi-Fiber NODDI Parameter Estimation and Tractography Using the Unscented Information Filter. Frontiers in Neuroscience, 10. + +Malcolm, J.G., Shenton, M.E. and Rathi, Y., 2010. Filtered multitensor tractography. IEEE transactions on medical imaging, 29(9), pp.1664-1675. diff --git a/PW36_2022_Virtual/Projects/sliCERR/README.md b/PW36_2022_Virtual/Projects/sliCERR/README.md new file mode 100644 index 000000000..0990a848d --- /dev/null +++ b/PW36_2022_Virtual/Projects/sliCERR/README.md @@ -0,0 +1,59 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# sliCERR + +## Key Investigators + +- Eve LoCastro (MSKCC) +- Aditya Apte (MSKCC) +- Aditi Iyer (MSKCC) +- Harini Veeraraghavan (MSKCC) + +# Project Description + +CERRx is an Octave/MATLAB-based software platform for developing and sharing research results using radiation therapy treatment planning and imaging informatics. "sliCERR" is being developed to facilitate the use of CERRx's radiotherapy and image analysis functionality. The extension we envision, "sliCERR", will provide scripted modules for data I/O operations, and will run analysis routines from CERRx. + +## Objective + + +"sliCERR" will be a scripted module for Slicer, written in Python. We are starting in Jupyter notebooks with 3D Slicer kernel for visualization and image processing. The cerr2mrml module handles the I/O operations of loading native CERR planC format files into the 3DSlicer MRML scene, including import of scan, dose and ROI contours. +1. Easy imaging data IO exchange between CERR and 3D Slicer platforms via module UI +2. Enable CERRx features for dosimetry and image analysis, ROE Radiotherapy Outcomes Estimator, semi-quanitative DCE features, DL-based image segmentation models + +## Approach and Plan + + + +1. Slicer-Jupyter notebooks to code the data import/export process and analysis as proof-of-concept +2. User interface was development for 3DSlicer GUI to simplify the process of selecting CERR-format datasets for import in 3DSlicer. + +## Progress and Next Steps + + + +1. Use of specialized functions in CERR such as Deep Learning-based image segmentation and radiomics texture mapping, demonstrated in Jupyter notebooks publicly available on GitHub. +2. GUI is in development +3. Expand wiki documentation for setup and usage + +# Illustrations + +Link to Jupyter Notebook demo: https://mskcc.box.com/s/eizbm2nc54uvddcomzmvotw2w8sl82h4 + + +sliCERR + + +planC with dose in Slicer + +import DICOM to planC + +# Background and References +* ROE: Radiotherapy Outcomes Estimator - An Open-Source Tool for Modeling Radiotherapy Outcomes https://www.aapm.org/meetings/2017am/PRAbs.asp?mid=127&aid=37270 +* Extension of CERR for computational radiomics: A comprehensive MATLAB platform for reproducible radiomics research https://pubmed.ncbi.nlm.nih.gov/29896896/ +* CERR GitHub Source Repo https://github.com/cerr/CERR +* CERR Wiki https://cerr.github.io/CERR/ +* sliCERR Repo https://github.com/cerr/sliCERR + diff --git a/PW36_2022_Virtual/Projects/sliCERR/Screen Shot 2022-01-17 at 10.46.36 AM.png b/PW36_2022_Virtual/Projects/sliCERR/Screen Shot 2022-01-17 at 10.46.36 AM.png new file mode 100644 index 000000000..925b22696 Binary files /dev/null and b/PW36_2022_Virtual/Projects/sliCERR/Screen Shot 2022-01-17 at 10.46.36 AM.png differ diff --git a/PW36_2022_Virtual/Projects/sliCERR/sliCERR_dose_overlay.png b/PW36_2022_Virtual/Projects/sliCERR/sliCERR_dose_overlay.png new file mode 100644 index 000000000..829669a3f Binary files /dev/null and b/PW36_2022_Virtual/Projects/sliCERR/sliCERR_dose_overlay.png differ diff --git a/PW36_2022_Virtual/Projects/sliCERR/sliCERR_graphic.png b/PW36_2022_Virtual/Projects/sliCERR/sliCERR_graphic.png new file mode 100644 index 000000000..664be26d3 Binary files /dev/null and b/PW36_2022_Virtual/Projects/sliCERR/sliCERR_graphic.png differ diff --git a/PW36_2022_Virtual/Projects/sliCERR/slicer_notebook.JPG b/PW36_2022_Virtual/Projects/sliCERR/slicer_notebook.JPG new file mode 100644 index 000000000..99d5a5ed1 Binary files /dev/null and b/PW36_2022_Virtual/Projects/sliCERR/slicer_notebook.JPG differ diff --git a/PW36_2022_Virtual/README.md b/PW36_2022_Virtual/README.md new file mode 100644 index 000000000..6a314dab6 --- /dev/null +++ b/PW36_2022_Virtual/README.md @@ -0,0 +1,257 @@ +## Welcome to the web page for the 36th Project Week! + +[This event](https://projectweek.na-mic.org/PW36_2022_Virtual/) was held virtually January 17-21, 2022. We recorded 131 registered attendees with 43% first time attendees from 25 countries. Participants worked in 37 Project Teams, and participated in 3 breakout sessions. New for this Project Week, two events were recorded - a Monai Label Workshop during the prepration weeks leading upto the event (recording available [here]( https://youtu.be/PmD8umlcpF4)), and the OHIF breakout session (recording available [here](https://vimeo.com/668339696/63a2c48de8)). + +If you have any questions, you can contact the [organizers](../README.md#who-to-contact). + +## Before Project Week +1. Register [here](https://forms.gle/1zE3pDs59sJ4ENP96), it is free! +2. Attend one or more preparation meetings to present a project you intend to work on at PW, for which you are seeking collaborators or to join one of the projects proposed by others. + +4. Join the [Discord server](https://discord.gg/d5Q6b5ug8u) that will be used to communicate with your team during Project Week. Go to [this page](../common/Discord.md) for more info on the use of Discord during PW. + +## During Project Week +* The week will start at **9am on Monday Jan 17th** with informal conversations on **[Discord](https://discord.gg/d5Q6b5ug8u)**. +* Initial **project presentations** will start at **10am on Zoom**, using [this link](https://etsmtl.zoom.us/j/86211702920?pwd=TEl0ZTFDam90WVN5bjZhR05kNVRVZz09). Each team must delegate a member to present their projects in no more than 2 minutes using no other visual support than the project page on GitHub (we won’t have time to switch screen sharing) +* If you don’t have a project, look at the PW36 page to find a project you might be interested in and contact team members through their Discord channel. +* Breakout sessions start every day at **10am on Zoom** (link in the calendar below) +* Work in **project teams** will happen throughout the week with communication between team members taking place on **Discord**. If you want to schedule a meeting ahead you can "reserve" a meeting room in [this spreadsheet](https://docs.google.com/spreadsheets/d/1jrYSecdhg9XQ1Re_7yqOCYTMjX2mOe-GowAp3yfWS7g/edit?usp=sharing). +* We will end the week with **project results presentation (10am on Friday)**. Again, each team will delegate one member to present their results in a maximum of 2 minutes. We will use the project page as a visual support for the presentation, so please make sure it is up to date with your latest results by Friday morning. + + + +## Agenda + +
+
+ + + + + + + +[How to add this calendar to your own?](../common/Calendar.md) + +## Projects [(How to add a new project?)](Projects/README.md) + +### VR/AR and Rendering +1. [Collaborative Slicer session](Projects/SlicerCollaboration/README.md) (Csaba Pinter, Mónica Garcia, Jean-Christophe Fillion-Robin) +2. [AR in Slicer](Projects/AR_in_Slicer/README.md) (Alicia Pose Díez de la Lastra, Javier Pascau, Gabor Fichtinger, Andras Lasso, Adam Rankin, Csaba Pinter, Lucas Gandel, Jean-Christophe Fillion-Robin) +3. [Slicer TMS Module](Projects/SlicerTMS_Module/README.md) (Loraine Franke, Lipeng Ning, Yogesh Rathi, Steve Pieper, Raymond Yang, Daniel Haehn) +4. [PRISM Rendering](Projects/PRISMRendering/README.md) (Simon Drouin, Steve Pieper, Andrey Titov, Rafael Palomar) +1. [Echo VolumeRender UI](Projects/EchoVolumeRenderUI/README.md) (Samuelle St-Onge, Simon Drouin, Andrey Titov) +1. [Integration of Flir Thermal Camera](Projects/FlirCameraInSlicer) (Juan Bautista Ruis Alzola, Robabeh Salehiozoumchelouei, Mónica García Sevilla, Yousef Rajaeitabrizi) + +### Image-guided therapy and low cost systems +1. [Low-Cost Ultrasound Training](Projects/LowCostUltrasoundTraining/README.md) (David Garcia, Csaba Pinter, Rebecca Hisey, Leah Groves, Ahmed Mahran, Matt McCormick, Steve Pieper, ...) +1. [Slicer-Liver](Projects/SlicerLiver/README.md) (Rafael Palomar, Ole V. Solberg, Geir Arne Tangen, Gabriella D'Albenzio, Javier Pérez de Frutos) +1. [Visualization of catheter path based on an electromagnetic tracking with TMR sensors](Projects/TMRCatheterNavigation/README.md) (Wenran Cai, Kazuaki Hara, Rina Nagano, Junichi Tokuda +1. [Integration of ROS and 3D Slicer using OpenIGTLink](Projects/ROS-MED/README.md) (Junichi Tokuda, Tamas Ungi, Simon Leonard, Axel Krieger, Mark Fuge) +1. [GPU Rigid Registration for Neuronavigation (Montreal IBIS System)](Projects/GPURigidRegistration/README.md) (Gelel Rezig, Houssem Gueziri, Simon Drouin) +1. [NousNav: Low-cost neuronavigation system](Projects/NousNav/README.md) (Étienne Léger, Alexandra Golby, Sam Horvath, Sarah Frisken, David Allemang, Tina Kapur, Steve Pieper, Jean-Christophe Fillion-Robin, Sonia Pujol, Kelly Wang) +1. [Skin Surface Extraction for NousNav Registration](Projects/SkinSegmentation/README.md) (Reuben Dorent, Tina Kapur, Sarah Frisken, Mohammad Jafari, Samantha Horvath, Jean-Christophe Fillion-Robin, Harneet Cheema, Fryderyk Kögl) +1. [MR-US Landmarking for Neuronavigated surgery](Projects/AnnotationMR-US/README.md) (Fryderyk Kögl, Harneet Cheema, Tina Kapur, Simon Drouin) +1. [Low-cost trackers](Projects/CheapTracking/README.md) (Steve Pieper, Gabor Fichtinger) +1. [sliCERR](Projects/sliCERR/README.md) (Aditya Apte, Aditi Iyer, Eve LoCastro, Harini Veeraraghavan) +1. [Mandible Reconstruction Automatic Planning](Projects/MandibleReconstructionAutomaticPlanning/README.md) (Mauro I. Dominguez, Andras Lasso, Manjula Herath) + +### Segmentation/Classification +1. [Spine Segmentation](Projects/SpineSegmentation/README.md) (Ron Alkalay, Steve Pieper, ...) +1. [Multi-organ segmentation](Projects/MultiOrganSegmentation/README.md) (Murat Maga, Sara Rolfe, Andres Diaz-Pinto) +1. [Brain Mask Prediction](Projects/BrainPrediction/README.md) (Raymond Yang, Jax Luo, Lipeng Ning, Cathy Yang, Steve Pieper, Daniel Haehn) +1. [Automatic Landmark Identification in 3D Cone-Beam Computed Tomography scans](Projects/ALICBCT/README.md) (Maxime Gillot, Baptiste Baquero, Antonio Ruellas, Marcela Gurgel, Elizabeth Biggs, Marilia Yatabe, Jonas Bianchi, Lucia Cevidanes, Juan Carlos Prieto) +1. [ALIIOS - Automatic Landmarks Identification for Intra OralScans](Projects/ALIDDM/README.md) (Baptiste Baquero, Maxime Gillot, Lucia Cevidanes, Juan Carlos Prieto, Najla Al Turkestani, Marcela Gurgel, Camila Massaro, Aron Aliaga, Maria Antonia Alvarez Castrillon, Marilia Yatabe, Jonas Bianchi, Juan Fernando Aristizabal, Diego Rey, Antonio Ruellas) +1. [Automatic Segmentation of Teeth and Alveolar bone using MONAI Label](Projects/AutomaticSegmentationofTeethandAlveolarBone/README.md) (Daniel Palkovics, Csaba Pinter, Andrés Diaz-Pinto) + +### SlicerDMRI +1. [Anatomically informed UKF tractography](Projects/UKF/README.md) (Fan Zhang, Yogesh Rathi, Lauren J O'Donnell) +1. [Deep Diffusion MRI Registration (DDMReg)](Projects/DDMReg/README.md) (Fan Zhang, William M. Wells III, Lauren J O'Donnell) +1. [SWM Tractography Parcellation](Projects/SupWMA/README.md) (Tengfei Xue, Fan Zhang, Chaoyi Zhang, Yuqian Chen, Yang Song, Nikos Makris, Yogesh Rathi, Weidong Cai, Lauren J. O’Donnell) +1. [White Matter Fiber Clustering with Deep Learning](Projects/DeepFiberClustering/README.md) (Yuqian Chen, Chaoyi Zhang, Yang Song, Tengfei Xue, Nikos Makris, Yogesh Rathi, Weidong Cai, Fan Zhang, and Lauren J. O’Donnell) + +### Cloud +1. [Body Part Regression using IDC](Projects/IDCBodyPartRegression/README.md) (Deepa Krishnaswamy, Andrey Fedorov) +2. [OHIF Mode Gallery](Projects/OHIFModeGallery/README.md) (Alireza Sedghi, James Petts, Erik Ziegler) +3. [Kaapana and XNAT exploration on Google Cloud](Projects/KaapanaXNATExploration/README.md) (Nadya Shusharina, Andrey Fedorov) +4. [Static DICOM Web for AI Assissted Annotations on AWS](Projects/StaticDicomWeb/README.md) (Chris Hafey, Gang Fu, Jordan Kojouharov, Qing Liu, Dmitry Pavlov, Andres Diaz-Pinto, Sachidanand Alle) +5. [OHIF deployment on Google Cloud](Projects/OHIFonGCP/README.md) (Andrey Fedorov, Igor Octaviano, Steve Pieper) + +### Infrastructure +1. [Slicer 5 Release](Projects/Slicer5/README.md) (Sam Horvath, Jean-Christophe Fillion-Robin, Steve Pieper, Andras Lasso) +1. [SlicerPipelines](Projects/SlicerPipelines/README.md) (Connor Bowley, Sam Horvath) +1. [Slicer Internationalization](Projects/SlicerInternationalization/README.md) (Sonia Pujol, Steve Pieper, Andras Lasso, Mamadou Camara, Jean-Christophe Fillion-Robin, Ibrahima Fall, Samba Diaw, Aissa Mboup, Fryderyk Kögl, Attila Nagy, Adriana Vilchis Gonzalez, Theodore Aptekarev, Pedro Moreira, Andrey Fedorov, Ahmedou Moulaye, Yahya Tfeil) +1. [Update the Chest Imaging Platform extension to support Slicer 5](Projects/CIP_Update/README.md) (Rudolf Bumm, Raul San Jose Estepar, Andras Lasso, Steve Pieper) +1. [Batch Anonymization](Projects/DSCIAnonymize/README.md)(Hina Shah, Juan Carlos Prieto, Fryderyk Kögl) + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. [Register here](https://forms.gle/1zE3pDs59sJ4ENP96). + +List of registered participants so far (names will be added here after processing registrations): + +1. Steve Pieper , Isomics, Inc. , USA +1. HINA SHAH , UNC Chapel Hill , USA +1. YAHYA TFEIL , UNIVERSITY OF NOUAKCHOTT ALASSRIYA , Mauritania +1. Monica García Sevilla , Universidad de Las Palmas de Gran Canaria , Spain +1. Rafael Palomar , Oslo University Hospital / NTNU , Norway +1. Ismail Irmakci , Feinberg School of Medicine - Northwestern University , USA +1. Miguel Xochicale , King's College London , United Kingdom +1. Adama Rama WADE , École Supérieure Polytechnique (ESP) , Senegal +1. Li Zhenzhu , Department of Neursosurgery, Hwa Mei Hospital, University of Chinese Academy of Sciences, Ningbo, China; , China +1. Gang Fu , Amazon , USA +1. Maxime Gillot , University of Michigan , USA +1. Baptiste Baquero , University of Michigan , USA +1. Simon Drouin , École de technologie supérieure , Canada +1. Tina Kapur , Brigham and Women's Hospital, Harvard Medical School , USA +1. Loraine Franke , University of Massachusetts Boston , USA +1. Harneet Cheema , Brigham and Women's Hospital and Harvard Medical School, U Ottawa , Canada +1. Fryderyk Kögl , Brigham and Women's Hospital and Harvard Medical School, Technical University of Munich , USA +1. Sonia Pujol , Brigham and Women's Hospital, Harvard Medical School , USA +1. Daniel Haehn , University of Massachusetts Boston , USA +1. Juan Ruiz-Alzola , Universidad de Las Palmas de Gran Canaria , Spain +1. Felix von Haxthausen , University of Lübeck , Germany +1. Deepa Krishnaswamy , Brigham and Women's Hospital , USA +1. Antonio Cartón , Hospital Universitario La Paz , Spain +1. Yi Shen , HIT , China +1. Ahmedou Moulaye IDRISS , Faculty of Medicine, University of Nouakchott Al Asriya , Mauritania +1. Csaba Pinter , EBATINCA / Pixel Medical , Spain +1. Adam Wittek , The University of Western Australia , Australia +1. Reuben Dorent , King's College London , United Kingdom +1. David García-Mato , Ebatinca S.L. , Spain +1. Rebecca Hisey , Queen's University , Canada +1. Leah Groves , Queens University , Canada +1. xi cao , changshu hospital of chinese medicine , China +1. Sen Li , École de Technologie Supérieure , Canada +1. Khaled Younis , Philips , United States +1. Lina Mekki , Johns Hopkins University , United states +1. Yahia Megahed , University of Central Florida , USA +1. Étienne Léger , Brigham and Women's Hospital , Canada +1. Lipeng Ning , Brigham and Women's Hospital , USA +1. Ahmed Mahran , Toronto General hosptial , Canada +1. Nirav Patel , IIT Madras , India +1. Saidou TALLA , École Supérieure Polytechnique (ESP) , Sénégal +1. Rudolf Bumm , Kantonsspital Graubünden , Switzerland +1. Andras Lasso , PerkLab, Queen's University , Canada +1. Lucia Cevidanes , Univ. of Michigan , USA +1. Gabor Fichtinger , Queen's University , Canada +1. Ron Kikinis , Harvard Medical School , USA +1. Yousef Rajaeitabrizi , IAC , Spain +1. Kate Kazlovich , Toronto General Hospital , Canada +1. Raymond Yang , University of Massachusetts Boston , USA +1. Joaquin Olivares , Universidad de Córdoba , Spain +1. Xiang Chen , Memorial University of Newfondland , China +1. Motoki Katsube , Kyoto University , Japan +1. Sarah Frisken , Brigham and Women's Hospital , USA +1. Adam Rankin , Robarts Research Institute , Canada +1. Connor Haberl , Carleton University , Canada +1. Alexandra Golby , Brigham and Women's Hospital/Harvard Medical School , USA +1. Ron Kikinis , Harvard Medical School , USA +1. Badiaa , Abdelmalek Essaadi University , Morocco +1. Ole Vegard Solberg , SINTEF , Norway +1. Geir Arne Tangen , SINTEF , Norway +1. Javier Perez deFrutos , SINTEF , Norway +1. Andrey Fedorov , Brigham and Women's Hospital/Harvard Medical School , USA +1. Nadya Shusharina , Mass General Brigham /Harvard Medical School , USA +1. Kumar Punithakumar , University of Alberta , Canada +1. Junichi Tokuda , Brigham and Women's Hospital , USA +1. Masoom Haider , Univ of Toronto , Canada +1. Souleymane Diao , Cheikh Anta Diop University , Sénégal +1. Mo Alsad , Research Associate , United Kingdom +1. Sara Rolfe , Seattle Children's Research Institute and University of Washington , USA +1. Samantha Horvath , Kitware Inc , USA +1. Theodore Aptekarev , Slicer Community , Russia +1. Pedro Moreira , Brigham and Women's Hospital/Harvard Medical School , USA +1. Eve LoCastro , Memorial Sloan Kettering Cancer Center , USA +1. Tamas Ungi , Queen's University , Canada +1. Mauro Ignacio Dominguez , Independent , Argentina +1. Erik Ziegler , Open Health Imaging Foundation / Radical Imaging , Netherlands +1. Antonio , Hospital Universitari Arnau de Vilanova , Spain +1. Alicia Pose Díez de la Lastra , Universidad Carlos III de Madrid , Spain +1. Alireza Sedghi , OHIF , Canada +1. James Hanks , MGH/OHIF , USA +1. Sanket Deshpande , Carpl.ai , India +1. Rohit Takhar , NSIT , India +1. Mónica Iturrioz , Brigham and Women's hospital , USA +1. Ruben San Jose Estepar , BWH , USA +1. Michael Dada , Federal University of Technology, Minna , Nigeria +1. Ron Alkalay , Beth Israel Deaconess Medical Center , USA +1. Sanni Henry Ananyi , Federal University of Technology Minna , Nigeria +1. Wenran Cai , University of Tokyo , Japan +1. Suleiman Jamila , Federal University of Technology, Minna. , Nigeria +1. Idowu Abdulsemiu Babatunde , Federal University of Technology Minna Niger State , Nigeria +1. Nayra Pumar Carreras , Ebatinca , Spain +1. Manjula Herath , Malmo University , Sweden +1. Robabeh Salehiozoumchelouei , Instituto de Astrofísica de Canarias (IAC) , Spain +1. jonas bianchi , University of the Pacific , USA +1. Houssem Gueziri , Montreal Neurological Institute / McGill University , Canada +1. Attila Nagy , University of Szeged, Department of Medical Physics and Informatics , Hungary +1. Juan María Piñera Parrilla , SurgicalWorks , Spain +1. Kazuaki Hara , The University of Tokyo , Japan +1. Mamadou Samba CAMARA , University of Dakar , Senegal +1. Dániel Palkovics , Semmelweis University , Hungary +1. Parikshit Juvekar , Brigham & Women's Hospital, Department of Neurosurgery , USA +1. James Petts , Ovela Solutions , United Kingdom +1. Fan Zhang , Brigham and Women's Hospital , USA +1. Keita , École Supérieure Polytechnique (ESP) de Dakar , Sénégal +1. Mamadou Moustapha DIAGNE , ESP , Senegal +1. Gregory Fischer , WPI , USA +1. Javier Pascau , Universidad Carlos III de Madrid , Spain +1. Ariela Shahvar , Western University , Canada +1. Samuelle St-Onge , École de technologie supérieure (ÉTS) , Canada +1. Andrey Titov , École de technologie supérieure , Canada +1. Tengfei Xue , Harvard Medical School/University of Sydney , Australia +1. Yiwei Jiang , Worcester Polytechnic Institute , USA +1. Yuqian Chen , Harvard Medical School , USA +1. Gabriella d'Albenzio , The Intervention Centre (OUS) , Norway +1. Raul San Jose Estepar , Brigham and Women's Hospital , USA +1. Kyle Sunderland , Queen's University , Canada +1. Árpád Márki , University of Szeged , Hungary +1. Martin Dr. Cseh , University of Szeged Faculty of Pharmacy , Hungary +1. Sandy Wells , HMS / BWH , USA +1. Chris Hafey , AWS , USA +1. Matt Lungren , Amazon , USA +1. Jean-Christophe Fillion-Robin , Kitware , USA +1. Peter Traneus Anerson , Retired , U.S.A. +1. Adriana Herlinda Vilchis González , Universidad Autónoma del Estado de México , México +1. Richard Doerer , Modl3d, LLC , USA +1. Andres Diaz-Pinto , NVIDIA , United Kingdom +1. Randy Gollub , MGH , USA +1. Luis , University of Minho , Portugal +1. Igor Octaviano , Pontifical Catholic University of Minas Gerais , Brazil +1. Pape Mady THIAO , École militaire de santé de Dakar , Sénégal +1. Carl-Fredrik Westin , Brigham and Womens Hospital , USA + +## Statistics +* 131 Registered attendees + * 43% first time attendees +* 25 countries + +Attendees per country +Attendees timezones + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). diff --git a/PW36_2022_Virtual/images/README.md b/PW36_2022_Virtual/images/README.md new file mode 100644 index 000000000..433e29b28 --- /dev/null +++ b/PW36_2022_Virtual/images/README.md @@ -0,0 +1 @@ +Use this directory to store image assets for the main page diff --git a/PW36_2022_Virtual/images/attendees-per-country.png b/PW36_2022_Virtual/images/attendees-per-country.png new file mode 100644 index 000000000..ec7c17ecc Binary files /dev/null and b/PW36_2022_Virtual/images/attendees-per-country.png differ diff --git a/PW36_2022_Virtual/images/timezones.png b/PW36_2022_Virtual/images/timezones.png new file mode 100644 index 000000000..c80386fc9 Binary files /dev/null and b/PW36_2022_Virtual/images/timezones.png differ diff --git a/PW37_2022_Virtual/MONAILabel_Workshop.md b/PW37_2022_Virtual/MONAILabel_Workshop.md new file mode 100644 index 000000000..e1a3e72ba --- /dev/null +++ b/PW37_2022_Virtual/MONAILabel_Workshop.md @@ -0,0 +1,65 @@ +# MONAI Label Workshop / Tutorial + +Video of the event is here: https://www.youtube.com/watch?v=wtiEe_jiUzg + + +Registration is closed. Details for registered attendees will be emailed before the workshop. + +June 22, 2022 9-11am EDT + +The goal of this workshop is to prepare users and developers to make use of MONAI and MONAI Label in their work at [Project Week 37](README.md). + +~~To request participation, please fill out [this registration form](https://docs.google.com/forms/d/e/1FAIpQLSc_hdylCGslg6Lxl-VLYNutUoja6dZfwbhu2SHnmCyxBOP2jQ/viewform).~~ Registration is closed. + +Each participant will be allocated a cloud-hosted GPU workstation provided by [AWS AppStream](https://aws.amazon.com/appstream2/faqs/). +Instances will be allocated in the US an EU based on the location of the participant. See [this information about bandwidth considerations](https://docs.aws.amazon.com/appstream2/latest/developerguide/bandwidth-recommendations-user-connections.html). + +Space will be limited so sign up by June 8th 2022. + +## Background information + +See [discussion here](https://discourse.slicer.org/t/monailabel-3d-slicer-for-cloud-computing-workshop-jan-12-2022-2-4-est/21152) for +information about the previous workshop held before [Project Week 36](https://projectweek.na-mic.org/PW36_2022_Virtual/). + +Please watch [the video of the previous workshop](https://youtu.be/PmD8umlcpF4) for background information. + +MONAILabel itself is described [in this paper](https://arxiv.org/abs/2203.12362). + + +## Agenda +* Welcome - Steve Pieper, Isomics, Inc. +* Michael Zephyr - Nvidia +* AWS Infrastructure - Qing Liu, AWS +* MONAI Label - Andres Diaz-Pinto, NVIDIA + * Presentation + * Hands-On +* Chest segmentation use case - Rudolf Bumm, Kantonsspital Graubünden + +## Event logistics + +* The zoom link info is: + +[https://us06web.zoom.us/j/89885357745?pwd=Y3d1cTRZS2paRWJZbmZSMG5ONTZvUT09](https://us06web.zoom.us/j/89885357745?pwd=Y3d1cTRZS2paRWJZbmZSMG5ONTZvUT09) + + +Meeting ID: 898 8535 7745 +Passcode: 980979 + + + +* We will use the Project Week discord for tech support and discussion during the workshop. Please sign up here: https://discord.gg/d5Q6b5ug8u +* Please use the same email for discord that you used to sign up for the workshop and also please use your first and last names and organization in your discord name. +* The workshop will be recorded and made available. +* There will be about one hour of presentation and one hour of hands-on. +* Connection info for access to your cloud 3D Slicer instance will be emailed to you before/during the workshop and you'll get instructions on how to connect. + * The AWS team has worked to provide you with a server in your geographic location, but if you experience latency it may be possible to assign you to a different region for better performance. + * Your cloud instance will be available from the time you receive the email. It may take a minute or two to start when you first connect. + * It will remain active for about 1 hour after the workshop ends if you want to work on it more. + +*Big thanks in advance to the AWS, NVIDIA and Slicer teams for providing this activity!* + +[Document from Rudolf](https://docs.google.com/document/d/1azFpJutBVJEW9W_riYZlXzrXac58ToCEzNTAwkzNf2c/edit) on setting up a Windows machine to run MONAI Label. + +## After the event +* Please help us improve future events by filling out this brief survey: https://survey.immersionday.com/p4K9-Zqng +* The workshop will be recorded and a link will be circulated diff --git a/PW37_2022_Virtual/Projects/AMASSS_CBCT/README.md b/PW37_2022_Virtual/Projects/AMASSS_CBCT/README.md new file mode 100644 index 000000000..054558764 --- /dev/null +++ b/PW37_2022_Virtual/Projects/AMASSS_CBCT/README.md @@ -0,0 +1,121 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automatic multi-anatomical skull structure segmentation of cone-beam computed tomography scans using 3D UNETR + +![Segmentation](https://user-images.githubusercontent.com/46842010/172177602-8cbfc188-9715-488a-ad2e-abb8d219536d.png) + +## Key Investigators + + +- Maxime Gillot (UoM) +- Baptiste Baquero (UoM) +- Celia Le (UoM) +- Romain Deleat-Besson (UoM) +- Jonas Bianchi (UoM, UoP) +- Antonio Ruellas (UoM) +- Marcela Gurge (UoM) +- Marilia Yatabe (UoM) +- Najla Al Turkestani (UoM) +- Kayvan Najarian (UoM) +- Reza Soroushmehr (UoM) +- Steve Pieper (ISOMICS) +- Ron Kikinis (Harvard Medical School) +- Beatriz Paniagua (Kitware) +- Jonathan Gryak (UoM) +- Marcos Ioshida (UoM) +- Camila Massaro (UoM) +- Liliane Gomes (UoM) +- Heesoo Oh (UoP) +- Karine Evangelista (UoM) +- Cauby Chaves Jr +- Daniela Garib +- F ́abio Costa (UoM) +- Erika Benavides (UoM) +- Fabiana Soki (UoM) +- Jean-Christophe Fillion-Robin (Kitware) +- Hina Joshi (UoNC) +- Lucia Cevidanes (UoM) +- Juan Prieto (UoNC) + + +# Project Description + +The segmentation of medical and dental images is a fundamental step in automated clinical decision support systems. +It supports the entire clinical workflow from diagnosis, therapy planning, intervention, and follow-up. +In this paper, we propose a novel tool to accurately process a full-face segmentation in about 5 minutes +that would otherwise require an average of 7h of manual work by experienced clinicians. +This work focuses on the integration of the state-of-the-art UNEt TRansformers (UNETR) +of the Medical Open Network for Artificial Intelligence (MONAI) framework. +We trained and tested our models using 618 de-identified Cone-Beam Computed Tomography (CBCT) volumetric images of the head +acquired with several parameters from different centers for a generalized clinical application. Our results on a 5-fold cross-validation +showed high accuracy and robustness with an Dice up to 0.962 pm 0.02. + +## Objective + + + +1. Create only one model for multiple structures. +2. Create a slicer module for the algorithm +4. Add new structure to segment +5. Deploy the AMASSS tool with the updated trained models + +## Approach and Plan + + + +1. Get the data merged by the clinicians for the skull. +1. Use the begening of a slicer module to create a new one for AMASSS. +1. Use new dataset to train new HD models. + +## Progress and Next Steps + + + +1. An algorithm has already been made to run segmentation out of slicer as a docker to implement in the DSCI +1. We collected data to generate segmentation model using the MONAI librairie +1. For large field of view : +- A model has been trained to generate a segmentation of 5 skull structures (mandible, maxilla, cranial base, cervical vertebra and upper airway) +- An other to segment the skin. + +1. For small field of view : +- A model for upper and lower root canal has been trained as well as HD mandible and maxilla +- We still need data to train networks for crown and mandible canal segmentation + +1. To be more user friendly, the development of an AMASSS module for Slicer has been started in march. +1. The UI of a slicer module was already started befor project week and has now been updated. +1. We linked the UI with a CLI module to run the prediction/segmentation directly on the user computer through Slicer 5's python 3.9 +1. The module has been tested locally with clinicians and is ready to be deployed as a Slicer module as a part of the slicer CMF extention +( The code is available at https://github.com/Maxlo24/Slicer_Automatic_Tools ) + +1. We colaborated with [Slicer Batch Annonymize](Projects/SlicerBatchAnonymize/README.md) (Hina Shah, Juan Carolos Prieto) to use AMASSS as a first step to perform defacing of patients scans during the batch anonymisation process. ( Figure 3 Mask for defacing ) + + +# Illustrations + +## 1. Different process to perform a CBCT segmentation +- Contrast correction and rescaling to the trained model spacing +- Use the UNETR classifier network through the scan to perform a first raw segmentation +- Post process steps to clean and smooth the segmentation +- Upscale to the original images size + +![prediction](https://user-images.githubusercontent.com/46842010/172177605-b2e5d91c-3e10-4608-9c2d-1e5f2dfcc261.png) + +## 2. Screen of the slicer module during a segmentation +- Selection of the different parameters and which structure to segment +- Use of a dialog progress bar to show/cancel the progress of the segmentation in real time (top right end corner). +- One the 3D view, result of one of the segmentation with the generated VTK files + +- A prediction takes from 120s to 300s for one patient depending on the local computer GPU capacity ( 15GB down to 3GB) + +![Screen slicer](https://user-images.githubusercontent.com/46842010/176789535-b7473878-fbeb-494d-988a-5ee1afa7d4fa.png) + +## 3. Use of AMASSS to generate mask for a defacing tool +- The scan intensity in the pink region ( mainely nose, lips and eyes ) will be set to 0 to make it impossible to identify the patient +- The bones segmentations are used to make sure we dont remove important informations during the process + +![mask for defaceing](https://user-images.githubusercontent.com/46842010/176813614-f9ec9123-4c34-4f8c-828f-ed4a84d30132.jpeg) + + +# Background and References + + diff --git a/PW37_2022_Virtual/Projects/AutomaticLandmarkIdentification/README.md b/PW37_2022_Virtual/Projects/AutomaticLandmarkIdentification/README.md new file mode 100644 index 000000000..75323a146 --- /dev/null +++ b/PW37_2022_Virtual/Projects/AutomaticLandmarkIdentification/README.md @@ -0,0 +1,71 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automatic Landmark Identification in IOS and Cranio Facial CBCT + +## Key Investigators + +- Maxime Gillot (UoM) +- Baptiste Baquero (UoM) +- Jonas Bianchi (UoM, UoP) +- Marcela Gurge (UoM) +- Najla Al Turkestani (UoM) +- Marilia Yatabe (UoM) +- Lucia Cevidanes (UoM) +- Juan Prieto (UoNC) + + +# Project Description + +For CBCT : +We propose a novel approach that reformulates anatomical landmark detection as a classification problem through a virtual agent placed +inside a 3D Cone-Beam Computed Tomography (CBCT) scan. This agent is trained to +navigate in a multi-scale volumetric space to reach the estimated landmark position. The +agent movements decision relies on a combination of Densely Connected Convolutional +Networks (DCCN) and fully connected layers. + +For IOS : + + +## Objective + + + +1. Combine the old ALI CBCT of the first project week and the new ALI IOS in a new module +2. Add new landmarks in the available list +3. Deploy the tool in a Slicer module + +## Approach and Plan + + + +1. Use the module we worked on during project week 36 +1. Get new data to train on with more landmarks + + +## Progress and Next Steps + + + +1. We have models for landmark identification in CBCT and IOS +1. We have the begenning of an UI on slicer +1. The next steps are : +1. Link the UI with both ALI IOS and ALI CBCT algorithms +1. Train new models for more landmarks +1. Deploy the tool as a Slicer module in the sclicer CMF extention + +---- + +1. I havn't been able to make any progress on this module during this week as I've been working on [AMASSS CBCT](Projects/AMASSS_CBCT/README.md) (Maxime Gillot, Baptiste Baquero,Lucia Cevidanes, Juan Prieto) and [Slicer Batch Annonymize](Projects/SlicerBatchAnonymize/README.md) (Hina Shah, Juan Carolos Prieto) +1. I learned how to use CLI modules that will make the development/deployment of ALI faster in the near future. +1. Nes agents have been trained to reach a total of 120 landmarks that can be automatically identified + +# Illustrations +![Slicer screen](https://user-images.githubusercontent.com/46842010/174138265-66ab080e-e885-4f76-a150-7e4da3869aa0.png) + +results + + + +# Background and References + + diff --git a/PW37_2022_Virtual/Projects/AutomaticQuantitative3DCephalometrics/README.md b/PW37_2022_Virtual/Projects/AutomaticQuantitative3DCephalometrics/README.md new file mode 100644 index 000000000..335dacca2 --- /dev/null +++ b/PW37_2022_Virtual/Projects/AutomaticQuantitative3DCephalometrics/README.md @@ -0,0 +1,73 @@ +Back to [Projects List](../../README.md#ProjectsList) + +Automatic Quantification 3D Components + +## Key Investigators +- Baptiste Baquero (University of Michigan) +- Maxime Gillot (University of Michigan) +- Lucia Cevidanes (University of Michigan) +- David Allemang (Kitware Inc) +- Jean-Christophe Fillion-Robin (Kitware Inc) + +# Project Description +The Automatic Quantification 3D Components(AQ3DC) aims to provide a user-friendly tool that decrease user time for extraction of quantitative image analysis features. AQ3DC is a Slicer extension to automatically compute lists of measurements seleted by users for a single case or a whole study sample, at one or more time points. The current implementation is aimed at automatic computation of 3D components like distances (AP, RL and SI) between points, points to line, midpoint between two points or angles (Pitch, Roll and Yaw), which can be further extended to any type of desired computation/quantitative image analysis. The design of the user interface is currently aimed at quantification of craniofacial dental, skeletal and soft tissue structures. + + +## Objective + +- A. Develop a Slicer extension that will automatically perform angular or linear measurements between landmarks in one or more time points for all patients in a study folder. +- B. Automatically generate the "clinical meaning" of the directionality for the numbers and signs obtained after the computation (skeletal directions: AP,SI,RL; dental directions: Mesio-Distal,Bucco-Lingual,Extrusion-Intrusion, 3D angles: Pitch, Roll and Yaw). +- C. Generate the mid point between two points. + +## Approach and Plan + +1. May 5,2022- Met with David Allemang and J-Christophe to plan design of AQ3DC and not replicate Markups or Q3DCfunctionalities. +2. May 12, 2022- David A. provide information regarding the markups module and its documentation about the markups module. After teh May 05th meeting, David sagreed that the markups module would not be suitable for this proposed work. as each type of markup must be contained in a different MRML node. For example, you can't have a plane and a line defined by the same markups node. This is the role that DependantMarkups fills. +3. Work on writing the code in the branch https://github.com/baptistebaquero/Q3DCExtension/tree/add-AQ3DC-module of Q3DC extension, focusing on the steps described below and with feedback of project team members. +4. Pre-selection of the points needed. +5. Import/export excel file with all the measurement needed. +6. Creation of interactive table with the different measurement needed. +7. Computation of linear measurements for one time point and exporting of the data in an excel file. + + +## Progress and Next Steps + +# Progress: +1. Computation of linear measurements for two time points and exporting of the data in an excel file +2. Computation of angles and distances with sign meaning depending on the type of points used +3. Met with David and J-Christophe to discuss overall infrastructure and discuss how to update the code to streamline future maintenance + +# Next steps : +1. Computation of mid points. +2. Finalize branch https://github.com/baptistebaquero/Q3DCExtension.git and create a pull request. +3. Update SlicerCMF workflow to document and integrate with AQ3DC. + + +# Illustrations + + +# 1. Slicer Interface +![Screenshot from 2022-06-30 18-31-37](https://user-images.githubusercontent.com/83285614/176789715-f90c3ea5-faf6-4e49-bdf3-2683b18ce375.png) + +# 2. List of measurements exported. +![Screenshot from 2022-06-30 18-29-01](https://user-images.githubusercontent.com/83285614/176789814-29e76874-1060-4681-bbe3-a4853975f510.png) + +# 3. Results of the computation for all the list of measurement for a sample of patient. +![Screenshot from 2022-06-30 19-01-23](https://user-images.githubusercontent.com/83285614/176792428-d5c3cb6f-4e56-45c0-95e2-fb24798453a8.png) + +# 4. Skeletal measurements signs meaning. +![skeletal_measurement](https://user-images.githubusercontent.com/83285614/176794349-fa99dcc8-bdf7-4518-ba8e-01451ebf05d8.jpeg) + +# 5. Linear measurements signs meaning. +![linear_measurement](https://user-images.githubusercontent.com/83285614/176794371-c87e7cba-8242-4149-bbda-5e67e28859cc.jpeg) + +# 6. Angular measurements signs meaning. +![angular_measurement](https://user-images.githubusercontent.com/83285614/176794405-c1e283e6-bad2-4da5-b777-991e93c419ce.jpeg) + + +# Background and References + + diff --git a/PW37_2022_Virtual/Projects/CinematicRenderingVTK/README.md b/PW37_2022_Virtual/Projects/CinematicRenderingVTK/README.md new file mode 100644 index 000000000..893bc7328 --- /dev/null +++ b/PW37_2022_Virtual/Projects/CinematicRenderingVTK/README.md @@ -0,0 +1,117 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Cinematic rendering in Slicer leveraging VTK Physically Based Rendering (PBR) + +## Key Investigators + +- Shreeraj Jadhav, Kitware, USA +- Jiayi Xu, Kitware, USA +- Jean-Christophe Fillion-Robin Kitware, USA +- Andras Lasso (PerkLab, Queen's University, Canada) + +# Project Description + + +- The goal of this project is to identify and evaluate Kitware's previous efforts on cinematic rendering and how these can be leveraged in Slicer. +- Slicer internally uses VTK as its rendering engine to display the 2D and 3D viewports in its interface. Since release 9.0, VTK has more enhanced rendering capabilities such as the Physically Based Rendering (PBR) lighting model and the integration of ray tracing backends such as the Nvidia OptiX and intel's OSPRay engine. These capabilities are already being leveraged in Paraview as discussed in the following blog posts [here](https://www.kitware.com/vtk-pbr/) and [here](https://www.kitware.com/physically-based-rendering-improvements-in-paraview/). + +- Primary focus will be on VTK's PBR capabilities and ray tracing backends (OptiX, OSPRay). Since Slicer already uses VTK rendering pipeline, and it will be significantly easier to incorporate/leverage these capabilities from within Slicer. + + +## Objective + + + + + +## Approach and Plan + + +1. Review accessible offerings on cinematic rendering (VTK PBR, VTK backends: OSPRay and OptiX, omniverse, etc) that can be utilized inside Slicer. +2. Develop prototypes showing how these can be enabled and used in Slicer. + - Start with surface rendering (PBR, ambient occlusion, global ilumination) + - Investigate what capabilities can be enabled for volume rendering (perhaps OSPRay). + - Tradeoffs: Performance vs Image Quality without degrading user experience. + - Changes to user interface, parameters tuning, simplification. +3. Review existing Slicer modules (such as [Light Module](https://discourse.slicer.org/t/new-module-to-customize-lighting-in-3d-view/8804)) that enhance Slicer's rendering capabilities and evaluate how these can be included in the current effort. +4. Evaluate how integrating these in Slicer will affect other modules such as LookingGlass, OpenXR, etc. + +## Progress and Next Steps + + +Use of `vtkSSAOPass` class to generate ambient occlusion (AO) for volumes: + - Volume mapper cannot directly work when AO pass is enabled, need further investigations to understand how this could be done. + - Initial attempt encountered OpenGL State errors (see error dump below) trigger by [this](https://gitlab.kitware.com/vtk/vtk/-/blob/master/Rendering/OpenGL2/vtkOpenGLState.cxx#L1755). + - To reproduce this error: use github [vtk-examples branch](https://github.com/jadh4v/vtk-examples/tree/ENH-SSAO-for-volumes) and build/run the [SSAOVolume example](https://github.com/jadh4v/vtk-examples/blob/ENH-SSAO-for-volumes/src/Cxx/Rendering/SSAOVolume.cxx). + ``` + Generic Warning: In C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkOpenGLState.cxx, line 1069 +Error glEnable/Disable1 OpenGL errors detected + 0 : (1282) Invalid operation + + with stack trace of + at vtksys::SystemInformationImplementation::GetProgramStack in C:\D\slicer-d1\VTK\Utilities\KWSys\vtksys\SystemInformation.cxx line 3979 + at vtksys::SystemInformation::GetProgramStack in C:\D\slicer-d1\VTK\Utilities\KWSys\vtksys\SystemInformation.cxx line 829 + at `anonymous namespace'::reportOpenGLErrors in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkOpenGLState.cxx line 294 + at vtkOpenGLState::SetEnumState in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkOpenGLState.cxx line 1069 + at vtkOpenGLState::ScopedglEnableDisable::~ScopedglEnableDisable in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkOpenGLState.h line 299 + at vtkOpenGLState::vtkglBlitFramebuffer in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkOpenGLState.cxx line 1758 + at vtkOpenGLGPUVolumeRayCastMapper::vtkInternal::CaptureDepthTexture in C:\D\slicer-d1\VTK\Rendering\VolumeOpenGL2\vtkOpenGLGPUVolumeRayCastMapper.cxx line 817 + at vtkOpenGLGPUVolumeRayCastMapper::GPURender in C:\D\slicer-d1\VTK\Rendering\VolumeOpenGL2\vtkOpenGLGPUVolumeRayCastMapper.cxx line 3102 + at vtkGPUVolumeRayCastMapper::Render in C:\D\slicer-d1\VTK\Rendering\Volume\vtkGPUVolumeRayCastMapper.cxx line 171 + at vtkVolume::RenderVolumetricGeometry in C:\D\slicer-d1\VTK\Rendering\Core\vtkVolume.cxx line 380 + at vtkProp::RenderFilteredVolumetricGeometry in C:\D\slicer-d1\VTK\Rendering\Core\vtkProp.cxx line 324 + at vtkDefaultPass::RenderFilteredVolumetricGeometry in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkDefaultPass.cxx line 167 + at vtkVolumetricPass::Render in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkVolumetricPass.cxx line 44 + at vtkSequencePass::Render in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkSequencePass.cxx line 71 + at vtkCameraPass::Render in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkCameraPass.cxx line 145 + at vtkRenderStepsPass::Render in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkRenderStepsPass.cxx line 207 + at vtkSSAOPass::RenderDelegate in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkSSAOPass.cxx line 240 + at vtkSSAOPass::Render in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkSSAOPass.cxx line 499 + at vtkOpenGLRenderer::DeviceRender in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkOpenGLRenderer.cxx line 285 + at vtkRenderer::Render in C:\D\slicer-d1\VTK\Rendering\Core\vtkRenderer.cxx line 385 + at vtkRendererCollection::Render in C:\D\slicer-d1\VTK\Rendering\Core\vtkRendererCollection.cxx line 53 + at vtkRenderWindow::DoStereoRender in C:\D\slicer-d1\VTK\Rendering\Core\vtkRenderWindow.cxx line 347 + at vtkRenderWindow::Render in C:\D\slicer-d1\VTK\Rendering\Core\vtkRenderWindow.cxx line 306 + at vtkOpenGLRenderWindow::Render in C:\D\slicer-d1\VTK\Rendering\OpenGL2\vtkOpenGLRenderWindow.cxx line 2345 + at main in C:\D\Shadows\Shadows.cxx line 132 + ``` + +Adapt `vtkSSAOPass` to create local ambient occlusion (LAO) implementation for volumes: + - `ComputeKernel()` in vtkSSAOPass can be modified to adapted for LAO of volumes [Jiayi] + - Possibly, only compute an occlusion volume once (pre-compute), use this for shading in raycaster + +Through discussions with the VTK team (Timothee Chabat, Mathieu Westphal), we identified another feature which could help improve shading in volume rendering: + - https://gitlab.kitware.com/vtk/vtk/-/merge_requests/9231 + - an accurate ambient occlusion effect can be achieved with the current VTK master, setting the `GlobalIllumationReach` to `0` and setting `VolumetricScatteringBlending` to something `>= 1.0` +![](https://gitlab.kitware.com/vtk/vtk/uploads/397286f8f4fc59281174e51ad639fae7/demo_shadows.gif) +3 + +4 + +_Image Courtesy: Gaspard Thevenon_ + +# Illustrations + + + +# Background and References + +Related modules: +- Models: In recent Slicer versions, PBR interpolation can be selected and PBR material properties can be edited. +- Lights (in [Sandbox extension](https://github.com/PerkLab/SlicerSandbox#lights)): it can configure lighting, PBR, image based lighting, ambient shading (SSAO) + +![](https://camo.githubusercontent.com/69b7b0e1828a78bd1e19bacfec1d4ecb22a0070e035284ce75c30be60753cb8c/68747470733a2f2f617773312e646973636f757273652d63646e2e636f6d2f7374616e6461726431372f75706c6f6164732f736c696365722f6f7074696d697a65642f32582f642f643362626532316637636435393339346366396264303065366262353133626136666261333065305f325f31303335783632382e6a706567) + + +Slicer Discourse References: +1. [https://discourse.slicer.org/t/how-to-perform-3d-cinematic-rendering/7313](https://discourse.slicer.org/t/how-to-perform-3d-cinematic-rendering/7313) +1. [https://discourse.slicer.org/t/is-there-interest-in-higher-quality-rendering-for-slicer/6862/5](https://discourse.slicer.org/t/is-there-interest-in-higher-quality-rendering-for-slicer/6862/5) +1. [https://discourse.slicer.org/t/2021-01-19-hangout/15585/2](https://discourse.slicer.org/t/2021-01-19-hangout/15585/2) + +VTK References: +1. VTK PBR [https://www.kitware.com/vtk-pbr/](https://www.kitware.com/vtk-pbr/) +1. PBR integration in Paraview [https://www.kitware.com/physically-based-rendering-improvements-in-paraview/](https://www.kitware.com/physically-based-rendering-improvements-in-paraview/) +1. Related merge request for VTK [https://gitlab.kitware.com/vtk/vtk/-/merge_requests/5584](https://gitlab.kitware.com/vtk/vtk/-/merge_requests/5584) diff --git a/PW37_2022_Virtual/Projects/EquivalentRotationSliders/README.md b/PW37_2022_Virtual/Projects/EquivalentRotationSliders/README.md new file mode 100644 index 000000000..a49050930 --- /dev/null +++ b/PW37_2022_Virtual/Projects/EquivalentRotationSliders/README.md @@ -0,0 +1,42 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Equivalent Rotation Sliders + +## Key Investigators + +- Mauro I. Dominguez +- Theodore Aptekarev +- Csaba Pinter +- Andras Lasso + +# Project Description + +Add a new feature to Slicer to describe/edit/interact with rotation transforms + +## Objective + +Getting the correct angular descriptions of transforms to sliders, and from sliders back to a matrix, indenpendently of the order of the sliders change + +## Approach and Plan + +There is a rudimental implementation here: +https://gist.github.com/mauigna06/627596274a2b3412989bae81cb060fed + +## Progress and Next Steps + + + +1. Confirm the math needed for the transforms, implement some tests on python. +1. ... +1. ... + +# Illustrations + + + +# Background and References + +https://discourse.slicer.org/t/slicer-doesnt-show-rotation-slider-value-corresponding-to-a-transform-after-rotation-axis-is-not-cannonical/23943/12 diff --git a/PW37_2022_Virtual/Projects/IDCProstateSegmentation/README.md b/PW37_2022_Virtual/Projects/IDCProstateSegmentation/README.md new file mode 100644 index 000000000..0cf1c4cbf --- /dev/null +++ b/PW37_2022_Virtual/Projects/IDCProstateSegmentation/README.md @@ -0,0 +1,57 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# nnUnet - Prostate segmentation on Imaging Data Commons(IDC) data + +## Key Investigators + +- Cosmin Ciausu (Brigham and Women's Hospital) +- Andrey Fedorov (Brigham and Women's Hospital) + +# Project Description + +Inference on prostate IDC data using [nnUnet](https://github.com/MIC-DKFZ/nnUNet) segmentation framework. +This framework provides an end to end segmentation pipeline, from pre-processing, data augmentation, hyper-parameter selection to post-processing, with variants of an Unet segmentation model. +The Imaging Data Commons platform provides both labelled and unlabelled prostate scans collections. + +## Objective + +1. Augment existing and applicable IDC collections with prostate segmentations obtained from nnUnet pre-trained models. +2. Investigate/analyze nnUnet framework generalizability to IDC data. + +## Approach and Plan + +1. Use nnUnet models pre-trained on prostate data to get predictions results on labelled IDC collections first, [PROSTATEx](https://portal.imaging.datacommons.cancer.gov/explore/filters/?access=Public&collection_id=Community&collection_id=prostatex) or [qin-prostate-repeatability](https://portal.imaging.datacommons.cancer.gov/explore/filters/?access=Public&collection_id=QIN&collection_id=qin_prostate_repeatability) for example. +2. Evaluate performance on labelled data, followed by inference on unlabelled IDC collections. + +## Progress and Next Steps + +1. Verification of nnUnet claimed results on prostate decathlon data, used for available pre-trained models. +2. Inference on Qin-Prostate-Repeatability collection using a pre-trained nnUnet model on task05 imaging decathlon data : + + * 3d full-res model, T2 and ADC modalities, so there is a need to resample the input. + +3. Inference on Qin-Prostate-Repeatability collection using a different pre-trained nnUnet model, task 24 promise. + + * Easier to corner the resampling problem since this pre-trained model has only one input modality -- T2 + +3. Obtained good dice scores results on the 15 PatientID divided into two studies each using this model. +4. Dealt with the Resampling/Converting issue - slice spacing incorrect -- use of simpleITK instead of plastimatch + +# Illustrations +Slicer visualisation of ground truth and predicted whole prostate segmentation mask, on PatientID01. +Red is ground truth and green is prediction from nnUnet. + +![Slicer demo](slicer_idc_prostate_seg.gif) + + + +# Background and References + +* [Ipynb link](https://colab.research.google.com/drive/1len4_C1mzDi5kDqg120avexJ9g7sEiM9?usp=sharing) +* [Google slides link](https://docs.google.com/presentation/d/10A1zjISq8pcal4enwX48TTj3jgUvvzuboCShGGiI4FA/edit?usp=sharing) +* [nnUnet pre-trained models for download](https://zenodo.org/record/4003545#.Yr7DA-zMIrk) +* [nnUnet paper](https://www.nature.com/articles/s41592-020-01008-z) + diff --git a/PW37_2022_Virtual/Projects/IDCProstateSegmentation/slicer_idc_prostate_seg.gif b/PW37_2022_Virtual/Projects/IDCProstateSegmentation/slicer_idc_prostate_seg.gif new file mode 100644 index 000000000..b0c5b4df7 Binary files /dev/null and b/PW37_2022_Virtual/Projects/IDCProstateSegmentation/slicer_idc_prostate_seg.gif differ diff --git a/PW37_2022_Virtual/Projects/ImagingDataCommons/README.md b/PW37_2022_Virtual/Projects/ImagingDataCommons/README.md new file mode 100644 index 000000000..af963a630 --- /dev/null +++ b/PW37_2022_Virtual/Projects/ImagingDataCommons/README.md @@ -0,0 +1,55 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# NCI Imaging Data Commons + +## Key Investigators + +- Andrey Fedorov (BWH) +- Deepa Krishnaswamy (BWH) +- Dennis Bontempi (AIM Lab/HMS/Maastro) +- Cosmin Ciausu (BWH) +- Steve Pieper (Isomics Inc) +- Ron Kikinis (BWH) + + +# Project Description + +[NCI Imaging Data Commons (IDC)](https://imaging.datacommons.cancer.gov) is a cloud-based repository of publicly available cancer imaging data co-located with the analysis and exploration tools and resources. IDC is a node within the broader NCI Cancer Research Data Commons (CRDC) infrastructure that provides secure access to a large, comprehensive, and expanding collection of cancer research data. + +## Objective + + + +1. Introduce IDC to the group, demonstrate main capabilities, and latest developments. +2. Support users experiencing issues using IDC or completing tutorials. +3. Collect feedback for the future development. + +## Approach and Plan + + + +1. Organize tutorial/breakout session on how to get started with IDC. + +## Progress and Next Steps + + + +1. Tutorial/breakout session took place on Thursday. Document with the agenda and all the materials presented (slides, Colab notebooks) are here: [https://bit.ly/3NzJNNM](https://bit.ly/3NzJNNM). +2. Follow-up troubleshooting session same day, through discussions with Simon Drouin, identified and fixed gaps in the documentation and notebooks +3. Through a [conversation on IDC discourse](https://discourse.canceridc.dev/t/storing-definitions-of-data-collections-as-dicom-entities/286) that started from a question on PW discord, learned about a new kind of DICOM object that might be suitable for storing cohorts. + +IDC Office Hours are open to everyone on Google Meet at [https://meet.google.com/xyt-vody-tvb](https://meet.google.com/xyt-vody-tvb) every Tuesday 16:30 – 17:30 (New York) and Wednesday 10:30-11:30 (New York). Please drop by to ask any questions related to IDC and troubleshoot +issues! + +# Illustrations + + + + +# Background and References + +* [NCI Imaging Data Commons](https://imaging.datacommons.cancer.gov) + diff --git a/PW37_2022_Virtual/Projects/LNQ/README.md b/PW37_2022_Virtual/Projects/LNQ/README.md new file mode 100644 index 000000000..a43395bcd --- /dev/null +++ b/PW37_2022_Virtual/Projects/LNQ/README.md @@ -0,0 +1,38 @@ +back to [Projects List](../../README.md#ProjectsList) + +# MONAI Label App for AI-assisted Interactive Lymph Node Segmentation in CT + +## Key Investigators +- Roya Khajavi (Department of Radiology, Brigham and Women’s Hospital, Boston, MA) +- Erik Ziegler (Novometrics LLC, Lexington, MA) +- Steve Pieper (Isomics Inc, Cambridge, MA) +- Ron Kikinis (Department of Radiology, Brigham and Women’s Hospital, Boston, MA) + +## Project Description +We have designed, developed, and validated deep learning methods for mediastinal lymph node segmentation using 3D U-Net and Tensorflow. +In this project we aim to investigate and build a MONAI Label APP to interactively segment, train, infer, and employ active learning strategies for mediastinal lymph node segmentation in CT scans. + +## Objective + +To create an end-to-end pipeline for interactive AI-assisted lymph node annotation using MONAI Label and 3D Slicer. + +## Approach and Plan + +We will use the mediastinal subset of TCIA CT Lymph Node as data for development and performing experiments. Below is our plan during the course of project week: +1. Download TCIA data, convert to nifti, and organize per requirements of MONAI Label. +1. Set up MONAILabelAPP including network definition +2. Set up MONAI Label training pipeline: including validation split, transformations, and data augmentations. +3. Set up MONAI Label inference pipeline: set type of inferers and inference transforms. +4. Set up MONAI Label active learning strategy +5. Set up MONAI Label server on Google Cloud Platform to efficiently train models on GPUs. + +## Progress +- We completed data gathering and data organization based on MONAI Label requirements. +- Discussed required transformation for lymph node annotation with MONAI label team, specifically ways to handle scarcity of foreground label in training data. +- Added RandCropByPosNegLabel and CropForeground transforms to the training pipeline. +- Started setting up Google Cloud Platform for interactive annotation, training, and inference using MONAI Label. + +## Next steps +- Finish setting up MONAI Label on GCP. +- Testing the training pipeline with the added RandCropByPosNegLabel and CropForeground transforms and make sure we can handle class imbalance during training. +- Implement active learning strategy. diff --git a/PW37_2022_Virtual/Projects/LowCostUltrasoundTraining/README.md b/PW37_2022_Virtual/Projects/LowCostUltrasoundTraining/README.md new file mode 100644 index 000000000..5714b2616 --- /dev/null +++ b/PW37_2022_Virtual/Projects/LowCostUltrasoundTraining/README.md @@ -0,0 +1,74 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Low-Cost Ultrasound Training + +## Key Investigators + +- David García Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Csaba Pinter (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Rebecca Hisey (Queen’s University, Kingston, ON, Canada) +- Matthew Holden (Carleton University, Ottawa, ON, Canada) + +# Project Description +[**Ebatinca S.L.**](https://ebatinca.com/) is currently developing a **low-cost training platform for ultrasound imaging and ultrasound-guided procedures** in low- and middle-income countries. We are developing a 3D Slicer based application to perform training exercises and evaluate participants. The app is called [**TrainUS**](https://github.com/EBATINCA/TrainUS) and it is available with open-source license. + +Some basic features have already been developed: participant/recording management, hardware connection, selection of training exercises,... Currently, we are working on the development of basic exercises to train basic ultrasound skills. The app should be able to evaluate recordings made by users and to provide feedback about their performance. + +## Objective + + +Develop specific module to train in-plane needle insertions. The module should display instructions on how to perform the exercise correctly and should enable the recording of ultrasound-guided needle insertions. Then, recordings must be evaluated using the [PerkTutor extension](http://perktutor.github.io/), getting overall performance metrics (elapsed time, needle path length, rotations, translations,...). Performance metrics must be displayed in plots and tables to the users, providng specific feedback to users about their performance. + +Once integrated, this module could serve as a reference/example for the development of future exercises in TrainUS. + +## Approach and Plan + + + +1. Determine best way to manage recordings of ultrasound images and tracking data. Currently, we are saving the entire sequence browser node as .sqbr files which can then be easily imported into Slicer using the PerkTutor extension. +2. Define best methodology to display exercise instructions to users: images vs video +3. Discuss the usefulness of assessing performance metric values in "real-time" during sequence playback. Example: distance from needle tip to US plane. Real-time vs overall metrics. +4. Integrate exercise into TrainUS app. +5. Discuss best strategy to provide specific feedback to users based on recorded data from experts. Deep learning? + +## Progress and Next Steps + + + +1. Saving recordings into a single .sqbr file seems the best option for easy import/export. Current infrastructure enables saving the entire vtkMRMLSequenceBrowserNode into a .sqbr file. Custom app should include updated versions of Slicer and PerkTutor to include the fixes related to the following issues which prevented a correct management of .sqbr files: [#6429](https://github.com/Slicer/Slicer/issues/6429) and [#6435](https://github.com/Slicer/Slicer/issues/6435) +2. It has been proposed that it may be a good idea to show the instructions (images or videos) while the exercise is being performed. This could be integrated as a possible configuration for the exercises. Currently, instructions can be shown as images or videos using slice views integrated into the Slicer layout. +3. The new feature to measure and evaluate performance metrics in real-time is considered really useful by the community. This is specially useful to identify specific parts of the recordings where performance drops significantly. +4. Feedback for basic skills exercises can be computed with current methodologies integrated into PerkTutor extension. No deep learning is needed unless complexity of the exercise/procedure increases. +5. Automatic segmentation (deep learning) could be integrated into specific exercise to compute useful data/metrics from the US image. Example: position of a vessel/mass on the image, position of the needle,... +6. Exercise integrated into TrainUS custom app. + +# Illustrations + +Instructions slides displayed to the user before starting the exercise: + +drawing + +Video with instructions displayed to the user before starting the exercise: + +drawing + +Plot showing real-time metric values during recording playback: + +drawing + +Table showing overall performance metrics computed using PerkTutor extension: + +drawing + + + + +# Background and References +- Previous [Low-Cost Ultrasound Training](https://github.com/NA-MIC/ProjectWeek/blob/master/PW36_2022_Virtual/Projects/LowCostUltrasoundTraining/README.md) during 36th Project Week held virtually on January 17-21, 2022. +- **TrainUS** GitHub repository: [TrainUS app](https://github.com/EBATINCA/TrainUS) +- **PerkTutor** GitHub repository: [PerkTutor extension](https://github.com/PerkTutor/PerkTutor) + + diff --git a/PW37_2022_Virtual/Projects/LumbarSpineSegmentation/README.md b/PW37_2022_Virtual/Projects/LumbarSpineSegmentation/README.md new file mode 100644 index 000000000..9815517ad --- /dev/null +++ b/PW37_2022_Virtual/Projects/LumbarSpineSegmentation/README.md @@ -0,0 +1,150 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Lumbar Spine Segmentation using MONAI Label + +## Key Investigators + +- Nayra Pumar (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- María Rosa Rodriguez (Universidad de Las Palmas de Gran Canaria (ULPGC), Spain) +- David García Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) + +# Project Description + +Our goal is to have a trained model in MONAI that is able to segment five lumbar vertebrae and four intervertebral discs, working with the 3D volumes of a public dataset. + +## Objective + +Train and deploy neural network to segment lumbar verteral bodies and intervertebral discs from MRI 3D volumes of the lumbar region. + + + +1. Objective A. Prepare the public dataset of choice, using the T2. +1. Objective B. Train MONAI to segment the 5 vertebrae (from L1 to L5) and the 4 intervertebral corresponding discs. +1. Objective C. Deploy the trained model to another computer and be able to run it to perform segmentations without being connected to a server. + +## Approach and Plan + + +1. Choose a public dataset +1. Get the files ready for MONAI. +1. Install the MONAI server locally +1. Train the model + +## Next Steps + + + +1. The chosen dataset is [Multi-scanner and multi-modal lumbar vertebral body and intervertebral disc segmentation database](https://www.nature.com/articles/s41597-022-01222-8): consisting of 39 patients. From all the images provided, we selected 51 of them, the ones with a T2 volume. +5. Change the file format to .nii for the selected t2 images. +6. Unify the 9 segments into a single file (5 segments for vertebrae and 4 for discs) + +# Past issues + +* MONAI not being able to read the .nii images when they been resized with nibabel. +The command we used for resize was: +`result1 = skTrans.resize(im, (newSize,newSize,numSlices), order=0, preserve_range=True)` +* Resized masks no longer in B/W, but having some grayscale pixels: solved with conditional value replacing using numpy (replacing values <0.5 to 0 and the remaining ones to 1) +* Missing segmentations in the dataset + +# Illustrations +T2 volume with the mask file, showing the 9 segments: + + + +The segmentation, with the corresponding labels, as seen in 3DSlicer + + + +# Progress +Using the public dataset, once all segments had been placed in the same .nii file, and with the right segment IDs: 1 to 5 for the vertebrae (named from top to bottom) and 6 to 9 for the discs (named from top to bottom as well), we trained the model using different setups. + +The tags were configured like this in the segmentation.py file: + + + +These were commented or left as this, depending on the number of tags to use on each training attempt. +One issue we found is that tags should be always be numbered starting at 1; if we tried to run the model using tags from 6 to 9 we would get an error and the model wouldn't train. This issue will be solved soon in the code. + +## Al Khalil dataset + +### 2000 epochs and 9 tags + + + +The accuracy was too low (23%), and the results weren't good. +Note also that we had only 32 images for training and 8 for validation. So we went for less tags. + +### 2000 epochs and 5 tags + +This time we are only segmentating bone: tags 1 to 5 corresponding to the vertebrae. + + + +Visual results are better, but the accuracy is still too low (51%). + +### 2000 epochs and 2 tags + +Trying to determine if the poor results were caused by the lack of an appropriate nuber of images, the model was trained for only two tags: vertebrae L1 and L2. + + + +Accuracy 52%, almost the same than when aiming for 5 tags. + +A possible approach then would be to train five different networks, each for two elements. Then use it to segment volumes from another dataset, put all of these segmentations into a single file (like what we did when we prepared the Al Khalil dataset) and have a larger segmented dataset with the 9 tags. This idea was put on hold, to go for further training with different approaches. + +## CHU dataset + +We tried with another public dataset, called CHU for short, [Annotated T2-weighted MR images of the Lower Spine](https://zenodo.org/record/22304#.Yr7nSXZ_paY). This one only has bone, and all seven vertebrae (the 2 last thoractic and 5 lumbar) are all in the same tag. + +The dataset comprises 23 images, and we hand processed them, separating the 7 vertebrae into 7 segments (numbered from top to bottom). + +### 2000 epochs and 5 tags + + + +Good accuracy (92%), acceptable visual results, but there is an evident confusion in one of the vertebrae, where tags are mixed. + +We tried one of the images from the Al Khalil database with this model and got this result, because of the different ways the x,y,z coordinates are oriented on each dataset: + + + +### 2000 epochs and 1 tags + +This is using a single tag with all the vertebrae together. (The image shows only the last segment, but it segmented the 7 vertebrae) + + + +Accuracy 94%. + +## DeepEdit + +Andres Diaz Pinto suggested to train using the deep edit module, instead of the segmentation we had been using until the moment. + +The computer we have been using for this has a RTX 3070, and we tailored the memory usage for a 128x128x128 train image size: + + + +### 200 epochs and 9 tags + +It took 22 hours to train. Again, we had 32+8 images and 9 tags. + + + +Taking into account the low number of images used for training, the results are good. But not good enough to start segmentating other images. Accuracy is 80%. + +# Work in progress + +* Separate the Al Khalil dataset into two different ones: vertebrae (5 tags, 1 to 5) and discs (4 tags, 1 to 4) and train separatedly +* Put the CHU dataset in the same coordinate system than Al Khalil and use the L1 to L5 segments to increase the number of images available for training the vertebrae segmentation: 32 from Al Khalil + 23 from CHU = 55 volumes. +* Train the model for the vertebrae with the 55 images for 5 tags, using the segmentation and deep edit and compare results. +* Train the model for the discs with the 32 images for 4 tags, using the segmentation and deep edit and compare results. + +# Background and References + + +* Al Khalil dataset: [Multi-scanner and multi-modal lumbar vertebral body and intervertebral disc segmentation database](https://www.nature.com/articles/s41597-022-01222-8) +* CHU dataset: [Annotated T2-weighted MR images of the Lower Spine](https://zenodo.org/record/22304#.Yr7nSXZ_paY) + +# Acknowledgments + +Many thanks to all those who stopped by the Discord channel to contribute their knowledge. And especially thanks to Andrés Diaz-Pinto for his availability and patience in helping us to configure the models. diff --git a/PW37_2022_Virtual/Projects/MONAILabelAndDeploy/README.md b/PW37_2022_Virtual/Projects/MONAILabelAndDeploy/README.md new file mode 100644 index 000000000..3ed83ced2 --- /dev/null +++ b/PW37_2022_Virtual/Projects/MONAILabelAndDeploy/README.md @@ -0,0 +1,53 @@ +back to [Projects List](../../README.md#ProjectsList) + +# MONAI Label + Deploy SDK Repository Setup Example + +## Key Investigators +- Erik Ziegler (Novometrics LLC, Lexington, MA) +- Roya Khajavi (Department of Radiology, Brigham and Women’s Hospital, Boston, MA) +- Steve Pieper (Isomics Inc, Cambridge, MA) +- Ron Kikinis (Department of Radiology, Brigham and Women’s Hospital, Boston, MA) +- Andres Diaz-Pinto (Nvidia) + +## Project Description +We have designed, developed, and validated deep learning methods for mediastinal lymph node segmentation. We want to use active learning and MONAI Label to transfer this to additional areas of the body (e.g. abdominal or retroperitoneal lymph nodes). We want to use MONAI Deploy SDK as the framework for pre/post-processing in our production inference pipeline. It's currently unclear how a MONAI Label App relates to a MONAI Deploy SDK Application. They feel largely disconnected at the moment, despite sharing the same project name. It's not possible to build a repository of custom Operators in MONAI Deploy that are shared across Applications without publishing a public Python package first. + +## Objective + +To work with the MONAI Deploy SDK and MONAI Label teams to identify how to structure a repository for use with both toolkits. + +## Approach and Plan + +We will use the mediastinal subset of TCIA CT Lymph Node as data for development and performing experiments. Below is our plan during the course of project week: +1. Setup MONAI Label approach for lymph node segmentation workflow (see [LNQ Project](../LNQ/README.md) for more details) +2. Determine how to wrap / reorganize this application to share code with a MONAI Deploy SDK Application +3. Publish an example repository for this setup +4. Work with MONAI Deploy SDK team to identify how custom Operators can be shared across Applications + +## Progress and Next Steps +Not much progress, got pulled into other things all week. +- Had a good meeting about MONAI Label and discussion with Andres and Khaled. +- Plan to meet with MONAI Deploy SDK team in the near future to discuss how users are expected to build custom applications +- Khaled and Andres pointed out the MONAI Bundle format which is used in the MONAI Model Zoo (https://github.com/Project-MONAI/model-zoo) + +The end goal is a directory structure something like this, but it's still a bit unclear to us how it will work. + +``` +/src + /shared python code + - custom operators or pre/postprocessing steps + - custom models + /monai-label-apps + (These should be able to use MONAI Bundles. Each Application may have custom input from the user which may be different than e.g. deep edit or deep grow. The MONAI Label applications are expected to be applicable to already-curated (e.g. cropped) datasets.) + - /app1 (e.g. https://github.com/Project-MONAI/MONAILabel/blob/main/sample-apps/radiology/main.py, which runs in MONAI Label Server) + - /app2 + - /app3 + /monai-deploy-apps + (These set up the pre/postprocessing pipelines prior to running the model which are required for integration at the application level (e.g. cropping to lungs). Each App should be able to be packaged into Docker containers individually. + - /app1 (e.g. https://github.com/Project-MONAI/monai-deploy-app-sdk/blob/main/examples/apps/ai_unetr_seg_app/app.py) + - /app2 + /monai-bundles + (These include the actual model, the network definition, the training and inference pre/postprocessing steps (e.g. NormalizeIntensityd). Unclear how this would pull in private pre/postprocessing steps or models. + - model bundle 1 (e.g. https://github.com/Project-MONAI/model-zoo/tree/dev/models/brats_mri_segmentation/configs) + - model bundle 2 +``` diff --git a/PW37_2022_Virtual/Projects/MONAILabelLung/MONAILabel_Installation.md b/PW37_2022_Virtual/Projects/MONAILabelLung/MONAILabel_Installation.md new file mode 100644 index 000000000..e47e2439b --- /dev/null +++ b/PW37_2022_Virtual/Projects/MONAILabelLung/MONAILabel_Installation.md @@ -0,0 +1,140 @@ +## MONAI Label installation + +_as prepared for the Virtual MONAI Label workshop_ +_June 22nd, 2022_ +_Rudolf Bumm (KSGR)  and Andres Diaz-Pinto (NVIDIA)_ + +The [Monailabel GitHub page is here.](https://github.com/Project-MONAI/MONAILabel) + +To run MONAI Label locally, you should have a computer with a medium/high-end NVIDIA GPU (16-24 GB totally available video RAM)  and CUDA available.  +MONAI Label can also be run on CPU, but the performance will lack.   + +GPU compatibility: + +[https://developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus) + +## Installation in Windows 11 step by step: + +Install Python 3.9 from Windows Store + +[Enable long path names in Windows 11](https://thegeekpage.com/make-windows-11-accept-file-paths-over-260-characters/) + +Use an elevated Powershell (admin mode)  +change to (cd) user directory (important, start in a directory with full read/write access)  + +``` +python -m pip install --upgrade pip setuptools wheel +``` + +Install the latest stable version for PyTorch + +``` +pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113 +``` + +Check if cuda enabled + +``` +python -c "import torch; print(torch.cuda.is_available())" +``` + +True + +_\# if false troubleshoot_ + +[Pytorch get started page](https://pytorch.org/get-started/locally/) + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/cbb0c881-f25b-40e2-8748-3e7aa485e68d) + + +Install latest monailabel version from Github + +``` +git clone https://github.com/Project-MONAI/MONAILabel +``` + +``` +pip install -r MONAILabel/requirements.txt +``` + +Set MONAILabel script paths + +``` +$Env:PATH += ";C:\Users\yourname\MONAILabel\monailabel\scripts" +``` + +Download sample apps + +``` +monailabel apps # List sample apps +monailabel apps --download --name radiology --output apps +``` + +Download MSD Datasets + +``` +monailabel datasets # List sample datasets +monailabel datasets --download --name Task06_Lung --output datasets +``` + +Run Segmentation Model. +Options can be (deepedit|deepgrow|segmentation|segmentation\_spleen|all) in case of radiology app. +You can also pass comma separate models like --conf models deepedit,segmentation + +``` +monailabel start_server --app apps/radiology --studies datasets/Task06_Lung/imagesTr --conf models segmentation +``` + +Once you start the MONAI Label Server, by default it will be up and serving at [http://127.0.0.1:8000/](http://127.0.0.1:8000/). Open the serving URL in browser. It will provide you the list of Rest APIs available. + +**Known problems:** + +**Monai model-zoo API request fails** + +IF the git api request for a model fails due to an improperly formatted URL + +(for example https://api.github.com/repos/Project-MONAI\model-zoo/releases 1) + +THEN + +Step 1: The model “.zip” file can be downloaded (via web browser, for example) with a properly formatted version of the URL (replace '' with ‘/’) and then expanded into a folder on your PC. + +Step 2: Set values for two environment variable the server will use as an override: + +$Env:MONAI\_ZOO\_SOURCE = ‘local’ +$Env:MONAI\_ZOO\_REPO = \< folder name > + +Step 3: Start monailabel server + +## Docker-based step by step installation: + +Running MonaiLabel through [docker conterization environment](https://docs.docker.com/get-started/overview/) will simply installation steps considerably. This is particularly true for GPU support, since Docker will require you to install only the NVIDIA GPU driver and you don't have to worry about the CUDA environment setup. However, for docker to function you need to have admin (sudo) priviledges on the computer. Instructions here are primarily tested on a Linux environment, but should be applicable to Windows docker as well. + +1. **Start \[from here and find your OS is supported\]**(https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). Follow the rest of the instructions in the Nvidia documentation. The basic steps are: + +* Confirming/installing NVIDIA GPU driver +* Confirming/installing Docker enginer and runtime libraries +* Installing NVIDIA Docker runtime libraries +* Testing that NVIDIA docker runtime enviroment is working correctly by running the command `sudo docker run --rm --gpus all nvidia/cuda:11.0.3-base-ubuntu20.04 nvidia-smi`. If this command doesn't work correctly, you won't be able to run MonaiLabel with GPU support. + +Once you confirmed that NVIDIA docker runtime environment is working correctly for nvidia-smi command above, follow the instructions for [MonaiLabel Docker installation](https://github.com/Project-MONAI/MONAILabel#docker). + +To test the dockerized MonaiLabel server, open a browser and connect to the http://127.0.0.1:8000. If everything is working correctly, this will provide the list of the REST APIs available. Note that, this assumes the docker engine is running on the same computer where you are using the web browser. If that's not the case, replace the http://127.0.0.1 with the IP address of the docker host. + +### Docker specific considerations + +Changes in docker environments are ephemeral, meaning if you restart your docker session all the changes you made to it will be lost. This would be true for any training session you might have done, any new segmentations you might have pushed to the MonaiLabel server. Therefore, after following the instructions above and confirming that your MonaiLabel docker session is working correctly, you need to modify the docker command to use persistent storage volumes so that changes can be retained. For example, create a Monai folder on your desktop, via command + +``` +mkdir $USER/Desktop/Monai +``` + +then you can modify your docker command such that this folder is mapped inside the Docker environment via -v option: + +`sudo docker run -it --rm --gpus all --ipc=host --net=host -v $USER/Desktop/Monai/:/workspace/ projectmonai/monailabel:latest bash` + +In this case, contents of $USER/Desktop/Monai will be visible under /workspace folder in docker enviroment. Next, you modify the MonaiLabel server startup scripts to make use of this persistent folder: + +``` +monailabel start_server --app /workspace/apps/radiology --studies /workspace/datasets/Task06_Lung/imagesTr --conf models segmentation +``` diff --git a/PW37_2022_Virtual/Projects/MONAILabelLung/README.md b/PW37_2022_Virtual/Projects/MONAILabelLung/README.md new file mode 100644 index 000000000..c3bdf0d02 --- /dev/null +++ b/PW37_2022_Virtual/Projects/MONAILabelLung/README.md @@ -0,0 +1,76 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# MONAI Label lung and airway segmentation + +## Key Investigators + +* Rudolf Bumm, MD (KSGR) +* Andres Diaz-Pinto (Nvidia) + +# Project Description + +MONAI Label is a server-client system that facilitates interactive medical image annotation by using AI. It is an open-source and easy-to-install ecosystem that can run locally on a machine with single or multiple GPUs. Both server and client work on the same/different machine. It shares the same principles with MONAI. + +The aim of the project is to set up, train and evaluate a lung and airway server model in MONAI Label + +## Objective + +* set up MONAI Label on a PC with moderate to high-end Nvidea GPU +* load MONAI Label apps and datasets +* use Lung CT Segmenter for rapid creation of detailed CT Lung labels in MONAI Label for + * right lung + * left lung + * airways +* do training with the server model  +* evaluate the AI´s auto-segmentation performance + +## Approach and Plan + +fine tune the MONAI Label server +provide links + +## How to set up a MONAI Label in Windows 11 + +* [Please refer to this document](./MONAILabel_Installation.md)  + +## Dataset + +This is the dataset we have been using: + +Decathlon lung dataset (Task06\_lung) 63 cases with lung tumors [http://medicaldecathlon.com/](http://medicaldecathlon.com/)  + +It is available for download (8 GB) after installation of MONAI Label and running this command in a powershell or bash: (edited) + +`monailabel datasets --download --name Task06_Lung --output datasets`  + +## Progress and Next Steps + +* Demonstration of the current workflow at the MONAI Label Workshop June 22nd 2022 +* Youtube Video: [https://www.youtube.com/watch?v=wtiEe_jiUzg](https://www.youtube.com/watch?v=wtiEe_jiUzg)  + +# Illustrations / Results + +![](https://lh4.googleusercontent.com/qDgKazWsVFylsaoVOcR87y2OwPsTuMRULtLIZ5dDpppktTaG5rKrFUpC3PQj0Js7Ow2TPMa1ixEP2J8qnKFrzCrY2Nv99W4g9Q33omjdvfxT7jeCysN_wGN_rxLgSLzfQLGWgixZsm8yC9aN5r-img) + +Fig 1: MONAI Label inference after providing 2 high quality samples and training (50 epochs): Not usable + +![](https://lh3.googleusercontent.com/DmJb1FLEcoDjGLF0VkVvT7JIicjt10KYGdRbE1NSpvoXFH-CANWPuboDzpTehbe48iKEl9AQITmrd7XuwrQpefu7QeqbM4Q5soPRKyK8V6ZouS3js62eUNZ4BxIzhXgI5BPWHVI2cUBrQtI-ENNvBg) + +Fig 2: Status after providing 5 more high-quality labels and  training 1000 epochs /  5 iterations (1 h with RTX 3070 Ti), "deepedit" model:   +ML is able to divide right and left lungs as well as airways, but resolution is low.    + +![](https://lh5.googleusercontent.com/MJwUyGBtI15UYL2OPc6LLyCUpKNpk_0G9GddXcovVYWKD_EXOlIWuWXthbkE-n4FPC-Ay_F-bNZ1EtWz5o9bR3Wzjf7OoUgMJZnejxoLejLW46gvxpUzCgDyx8nIEl3aI4U3T_biYB0Vm4tT7Mq0fQ) + +Fig 3: Status after labelling 17 more datasets, training another 1000 epochs /  22 iterations (6 h with RTX 3070 Ti), "segmentation" model:  +Much better resolution.   + +# ![](https://lh5.googleusercontent.com/kN_jvl7i-Osv662Yhh69wRg5nMS4PzdYQarTBGYe6gTyq6-1A-xAcxkUSdIlFiSdyr3WXxk_WQGfQKAuwCp2OAiHcN2irQfeW1-DsWDgx31aRzVDy6KwIQo1Yf955Dh3k4K0YuLEVfwNkOG9kPkjPQ) + +Fig 4: Autosegmentation after label correction, 500 epochs / 22 iterations training (1.5h RTX 3070 Ti): +Good result!  + +# Background and References + +https://github.com/Project-MONAI/MONAILabel + +https://github.com/rbumm/SlicerLungCTAnalyzer diff --git a/PW37_2022_Virtual/Projects/MarkupConstraints/README.md b/PW37_2022_Virtual/Projects/MarkupConstraints/README.md new file mode 100644 index 000000000..1245739fb --- /dev/null +++ b/PW37_2022_Virtual/Projects/MarkupConstraints/README.md @@ -0,0 +1,71 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# MarkupConstraints Slicer Module + +## Key Investigators + +- David Allemang (Kitware Inc.) +- Jean-Christophe Fillion-Robin (Kitware Inc.) +- Lucia Cevidanes (University of Michigan) +- Maxime Gillot (University of Michigan) +- Baptiste Baquero (University of Michigan) + +## Acknowledgements + +- Kyle Sunderland (PerkLab) + +# Project Description + +MarkupConstraints is a Slicer module intended for Slicer extension developers to constrain +and synchronize markups and control points of different nodes. + +The module has been developed to support Q3DCExtension, however I intend to expand it +further for reuse in other interactive tools. + +## Objective + +1. Robust constraints between control points in different vtkMRMLMarkupNode instances + * Support for constraining points between for example a line and fiducial markup is + very limited +2. Detect dependency cycles and provide meaningful error messages + * Slicer freezes indefinitely when updating constraints with cycles +3. Robust constraints from control points to other node types + * Project a point to the surface of a model +4. Support saving/loading a markup node or MRML scene while preserving constraints +5. Determine the best mode of distribution and publish + * Likely will be published on the Extension Index in "Developer Tools" +6. Create a simple interactive UI for debugging/testing purposes + * Should allow viewing or editing existing constraints in the scene + +## Progress + +* Created a Slicer module and logic with appropriate observers for applying constraints +* Began refining API to support arbitrary constraints and dependencies (as means to + resolve #1) +* Fully resolved #1 with a robust API to define arbitrary constraints. +* Created [architecture documentation][arch] and [API documentation][api] for build constraint functions. +* Created unittest suite for primary features. + +[arch]: https://github.com/KitwareMedical/SlicerMarkupConstraints/blob/main/Docs/Architecture.md +[api]: https://github.com/KitwareMedical/SlicerMarkupConstraints/blob/main/Docs/API.md + +Thanks to Kyle Sunderland (PerkLab) for his advice and inspiration! + +## Next Steps + +* Implement more adaptors to enable constraints against arbitrary node types, not just control points. +* Use adaptors and node parameters/references to enable serialization with scene. +* Implement graph analysis to detect cycles and deadlocks. +* Expand constraint interface using abstract class for more extensibility. + +# Illustrations + +![Projection and Distance constraints](./length-chain.gif) +![Chain of distance constraints](./project-anchor.gif) +![Constraint Demo](./project-axis.gif) + +# Background and References + +Source code is hosted in [SlicerMarkupConstraints][repo]. + +[repo]: https://github.com/KitwareMedical/SlicerMarkupConstraints diff --git a/PW37_2022_Virtual/Projects/MarkupConstraints/length-chain.gif b/PW37_2022_Virtual/Projects/MarkupConstraints/length-chain.gif new file mode 100644 index 000000000..2063042a3 Binary files /dev/null and b/PW37_2022_Virtual/Projects/MarkupConstraints/length-chain.gif differ diff --git a/PW37_2022_Virtual/Projects/MarkupConstraints/project-anchor.gif b/PW37_2022_Virtual/Projects/MarkupConstraints/project-anchor.gif new file mode 100644 index 000000000..f31f3f206 Binary files /dev/null and b/PW37_2022_Virtual/Projects/MarkupConstraints/project-anchor.gif differ diff --git a/PW37_2022_Virtual/Projects/MarkupConstraints/project-axis.gif b/PW37_2022_Virtual/Projects/MarkupConstraints/project-axis.gif new file mode 100644 index 000000000..ec111d2ed Binary files /dev/null and b/PW37_2022_Virtual/Projects/MarkupConstraints/project-axis.gif differ diff --git a/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_1.PNG b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_1.PNG new file mode 100644 index 000000000..67fddc3c1 Binary files /dev/null and b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_1.PNG differ diff --git a/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_2.PNG b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_2.PNG new file mode 100644 index 000000000..2eda9f4b6 Binary files /dev/null and b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_2.PNG differ diff --git a/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_3_Bad.PNG b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_3_Bad.PNG new file mode 100644 index 000000000..7a45c5723 Binary files /dev/null and b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_3_Bad.PNG differ diff --git a/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_4.PNG b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_4.PNG new file mode 100644 index 000000000..0ce5573b1 Binary files /dev/null and b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_4.PNG differ diff --git a/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_5_Bad.PNG b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_5_Bad.PNG new file mode 100644 index 000000000..c49f01b98 Binary files /dev/null and b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/PreliminaryTeethSegmentation_5_Bad.PNG differ diff --git a/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/README.md b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/README.md new file mode 100644 index 000000000..f1db4e914 --- /dev/null +++ b/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/README.md @@ -0,0 +1,75 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Multi-stage deep learning segmentation of teeth + +## Key Investigators + +- Daniel Palkovics (Semmelweis Medical University) +- Csaba Pinter (Ebatinca) +- David Garcia Mato (Ebatinca) +- Andres Diaz-Pinto (NVidia) + +# Project Description + + + +Segmenting and identifying the teeth in a mandible or maxilla is a difficult task, especially due to the high number of structures and their similarity. Recent results suggest that multi-stage segmentation may yield more accurate segmentation in these scenarios. + +## Objective + + + +The idea is to create a simple two-stage approach in MONAILabel where the first stage detects the teeth centre and the second stage accurately segments the teeth themselves. + +## Approach and Plan + + + +1. Discuss with Andrés the details about multi-stage segmentation in MONAILabel +1. Design the changes to be made + +## Progress and Next Steps + + + +1. Discussion with Andrés about multi-stage deep learning approach + - Multistage approach is more robust because the complexity is separated (robustness is the main advantage) + - Paper (see below) has several models: ROI, Centroid/Skeleton (numbers OR images), Multi-task tooth segmentation, Tooth ID classification, Cascaded bone segmentation + - Baseline data for centroid model are just the centroids, that can be calculated from the baseline segmentation. Same with the centerline one + - Implementing generic multi-stage approach in MONAILabel is a bit of a work + - In MONAI core this is easier to set up + - Why one model? Having one upper and one lower is OK for us + - Even lower + upper + bone + implant/tooth separation is a possibility + - Advantage of one multi-stage model is that we have only one system + - If we have both upper and lower then we need more data + - Clinically we'll only have either upper or lower + - How do we connect the stages? + - Centroid/skeleton using numbers (not images) is regression (not segmentation) + - Concatenate input numbers on the "bottom of the UNet" where we have a huge array of numbers after downsampling + - MONAI files + - .pt: model (need to define network first etc.) + - .ts: torch script that contains preprocessing and the inference too +2. Proposal + - Simple multi-stage model implementation using MONAI + - Initial: ROI definition -> Centroids -> Tooth segmentation + - Later still possible to add for example tooth identification, centerline, implant segmentation, etc. + +# Illustrations + +There are some promising preliminary results + +![Good result](PreliminaryTeethSegmentation_1.PNG) +![Good result](PreliminaryTeethSegmentation_2.PNG) +![Good result](PreliminaryTeethSegmentation_4.PNG) + +but there is room for improvement! + +![Bad result](PreliminaryTeethSegmentation_3_Bad.PNG) +![Bad result](PreliminaryTeethSegmentation_5_Bad.PNG) + +# Background and References + + + +- [PW36 project](https://github.com/NA-MIC/ProjectWeek/tree/master/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone) +- [Cui2022 paper](https://www.nature.com/articles/s41467-022-29637-2) diff --git a/PW37_2022_Virtual/Projects/OHIFviewSR/README.md b/PW37_2022_Virtual/Projects/OHIFviewSR/README.md new file mode 100644 index 000000000..5643a1cde --- /dev/null +++ b/PW37_2022_Virtual/Projects/OHIFviewSR/README.md @@ -0,0 +1,55 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# View slice level annotations in OHIF using Structured Reports + +## Key Investigators + +- Deepa Krishnaswamy (Brigham and Women's Hospital) +- Davide Punzo (Radical Imaging) +- Markus Herrmann (MGH) +- Chris Bridge (MGH) +- Andrey Fedorov (Brigham and Women's Hospital) + +# Project Description + +In the last project week, we worked on an [approach](https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/IDCBodyPartRegression/) for enriching the DICOM metadata, specifically the body part examined. +We showed that we could classify each CT axial slice in a volume as a particular body region -- head, neck, chest, abdomen, pelvis and legs. (using [this approach](https://arxiv.org/abs/2110.09148)) +In order to visualize the results of body region per slice, we used DICOM SEG objects. Though good for visualization, this is not exactly a good use of the DICOM SEG. +A better approach would be to use Structured Reports (SR), and view an annotation for each slice as you scroll. This functionality has been made available in the latest release of OHIF, and we would like to adapt our body part regression results to this format. + +## Objective + + + +1. Objective A. View slice level annotations using a Structured Report in the OHIF viewer. +1. Objective B. Visualize the body part regression results using the above approach + +## Approach and Plan + + + +1. A colab notebook has been written by Andrey using [highdicom](https://github.com/herrmannlab/highdicom) library to create instances of DICOM Structured Reports that instantiate [TID 1500 reporting template](https://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_A.html#sect_TID_1500) and include slice-level qualitative (key:value) evaluations. +2. OHIF Viewer v2 was extended by Davide to enable visualization of slice-level annotations (see [https://github.com/OHIF/Viewers/issues/2797](https://github.com/OHIF/Viewers/issues/2797)) +3. Deepa will adapt this notebook to create the appropriate SR for the body part regression task. + +## Progress and Next Steps + + + +1. We have successfully created the SR for the body part regression task and can view the CT and SR in our own instance of the OHIF viewer. + +# Illustrations + + + +![ohif_sr_mpr](https://user-images.githubusercontent.com/59979551/176761836-a36a8fbf-e29b-4fb7-8ebb-3c60fc54b9db.JPG) + +![ohif_sr_scroll](https://user-images.githubusercontent.com/59979551/176781588-50285b10-c341-4f95-bf34-1c771f800c6c.gif) + + +# Background and References + + diff --git a/PW37_2022_Virtual/Projects/OHIFviewSR/ohif_sr_scroll.mp4 b/PW37_2022_Virtual/Projects/OHIFviewSR/ohif_sr_scroll.mp4 new file mode 100644 index 000000000..70dcaca87 Binary files /dev/null and b/PW37_2022_Virtual/Projects/OHIFviewSR/ohif_sr_scroll.mp4 differ diff --git a/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/README.md b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/README.md new file mode 100644 index 000000000..6af624e99 --- /dev/null +++ b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/README.md @@ -0,0 +1,38 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Streamlined Outlier Detection for Large Scale Mammogram Data + +## Key Investigators + +- Pablo Bendiksen (University of Massachusetts Boston) +- Neha Goyal (University of Massachusetts Boston) +- Ryan Zurrin (University of Massachusetts Boston) +- Kendrick Kheav (University of Masachusetts Boston) +- Daniel Haehn (University of Massachusetts Boston) + +# Project Description +Modern deep learning systems can detect breast cancer early when trained with large amounts of data. As part of our mission to create the world's largest publicly-available annotated mammography dataset with ground truth labels, we care to curate a final collection of 70,000 breast cancer scans (from a dataset of > 190,000 images)comprised of both 3D Digital Breast Tomosynthesis (DBT) and 2D Digital Mammography (DM) studies. To this end we must automate the effective detection of atypical scans across approximately 250,000 images. + +## Objective + +To contribute translations of the user'interface of 3D Slicer and its corresponding tutorials + +## Approach and Plan + +1. Become familiarized with the 3D Slicer Language Tools Module and Weblate's Glossary/String Search Functionality. + +## Progress and Next Steps + + +1. Continue to provide translation aid of 3D Slicer GUI components (all strings from the Python source code), and corresponding tutorials, from English to Spanish on a module-by-module basis. + +# Illustrations +[![Weblate Interface](./weblate.png)](./weblate.png) + +[![Welcome_Module](./welcome_module.png)](./welcome_module.png) + +[![Models_Module](./models.png)](./models.png) + +# Background and References +
See Weblate 3D Slicer and Glossary components [here](https://hosted.weblate.org/projects/3d-slicer/)
+3D Slicer extension for translation incorporation [here](https://github.com/Slicer/SlicerLanguagePacks) diff --git a/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/image_histograms_with_scores.png b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/image_histograms_with_scores.png new file mode 100644 index 000000000..13fcd273b Binary files /dev/null and b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/image_histograms_with_scores.png differ diff --git a/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/images_with_scores.png b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/images_with_scores.png new file mode 100644 index 000000000..792db454b Binary files /dev/null and b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/images_with_scores.png differ diff --git a/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/models.png b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/models.png new file mode 100644 index 000000000..30d31ba73 Binary files /dev/null and b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/models.png differ diff --git a/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/weblate.png b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/weblate.png new file mode 100644 index 000000000..8ae41b7a8 Binary files /dev/null and b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/weblate.png differ diff --git a/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/welcome_module.png b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/welcome_module.png new file mode 100644 index 000000000..60ed3156b Binary files /dev/null and b/PW37_2022_Virtual/Projects/OutlierDetectionLargeScaleMammograms/welcome_module.png differ diff --git a/PW37_2022_Virtual/Projects/PRISMRendering/README.md b/PW37_2022_Virtual/Projects/PRISMRendering/README.md new file mode 100644 index 000000000..75202f237 --- /dev/null +++ b/PW37_2022_Virtual/Projects/PRISMRendering/README.md @@ -0,0 +1,60 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# PRISM Volume Rendering + +## Key Investigators + +- Simon Drouin, ETS Montreal +- Steve Pieper, Isomics, Cambridge MA, USA +- Andrey Titov, ETS Montreal +- Rafael Palomar, Oslo University Hospital / NTNU, Norway +- Mauro Dominguez +- Samuelle St-Onge, ETS Montreal + +# Project Description + +The goal of this project is to enable the development of advanced 3D rendering techniques in Slicer. The goal is to facilitate access to GPU shaders and enable GPU-based filtering in Slicer by improving shader access multipass rendering in VTK and Slicer. The [PRISM Module](https://github.com/ETS-vis-interactive/SlicerPRISMRendering) in Slicer will serve as a test environment for the new capabilities. + +## Long-term Objective + +1. Facilitate the development and debugging of GPU shaders for Slicer (see [this branch](https://gitlab.kitware.com/drouin-simon/vtk/-/tree/volume-shader-readability) of VTK that improves shader readability) +2. Extend the principles introduced in the PRISM module to surface rendering and other types of rendering (Adapt [DisplayableManagers](https://github.com/Slicer/Slicer/tree/main/Modules/Loadable/VolumeRendering/MRMLDM) like in the volume rendering module?) +4. Integrate GPU filters in Slicer and connect them with volume rendering in such a way that filtered volumes do not have to be transfered back to CPU memory before rendering. See work by Kyle Sunderland on VTK GPU image filters (branch [here](https://github.com/Sunderlandkyl/VTK/commits/vtkGPUImageFilter3)). +5. Explore custom rendering to simplify integration with the vtk render process. Prior work includes: + * Python scripted Actor/Mappers: https://www.slicer.org/wiki/Slicer3:Python:ScriptedActor + * SimpleMapper: https://github.com/IbisNeuronav/Ibis/tree/master/IbisVTK/vtkExtensions + +## PW37 Objective + +1. Finish updating the PRISMRendering module to work with Slicer 5. +2. Enable opening shaders with tags in a text editor while running Slicer + * Previous efforts by Simon Drouin were made to facilitate shader debugging by leaving tags in the shader code. Code is available in [this branch](https://gitlab.kitware.com/drouin-simon/vtk/-/tree/volume-shader-readability). + * In vtkShaderProgram class, debug functionality is available by setting the string variable FileNamePrefixForDebugging, which loads a shader from a file before rendering or dumps the shader to a file if doesn't already exists. However, this functionality is private. Mappers should have public functions to enable this debugging mechanism. +4. Generalize the mechanism that allows the VolumeRendering module to store vtkShaderProperties in the display node to obtain the same behavior with the Models module. + +## Progress +* PRISM module code cleanup: removed underused and buggy shader editing gui +* Started adaptation of shader gui to use new Markup system in Slicer +* Discussion with Kitware: tools to debug shaders in VTK (access to full shader code and improved code readability) +* Planned a followup meeting on the future of rendering in Slicer after PW. + +## Next Steps +* Use more appropriate types of markups when needed (e.g. plane intersection could use lines instead of points) +* Improve architecture of the module +* Support for more shader parameter types +* Support new VTK rendering functionality: Physically-based shading model, ambient occlusion and shadowing. + +# Illustrations + + +# Background and References + + +- PRISM Module [GitHub repository](https://github.com/ETS-vis-interactive/SlicerPRISMRendering). +- Incomplete [documentation](https://vtk.org/Wiki/VTK/ProgrammableMultiVolumeRendering) for the Volume Rendering shader used by vtkOpenGLVolumeRaycastMapper +- [Original article](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0193636) about the PRISM framework that served as a basis to develop the PRISM module in Slicer +- Previous project weeks + - [PW36](https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/PRISMRendering/) + - [PW35](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/PRISM_volume_rendering/) + - [PW30](https://projectweek.na-mic.org/PW30_2019_GranCanaria/Projects/GLSLShaders/) + - [PW28](https://projectweek.na-mic.org/PW28_2018_GranCanaria/Projects/MultiVolumeRendering/) diff --git a/PW37_2022_Virtual/Projects/PercPlan/README.md b/PW37_2022_Virtual/Projects/PercPlan/README.md new file mode 100644 index 000000000..c68acc133 --- /dev/null +++ b/PW37_2022_Virtual/Projects/PercPlan/README.md @@ -0,0 +1,36 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Percutaneous Planing + +## Key Investigators + +- Nirav Patel (IIT Madras, India) +# Project Description + +Aim of this projhect is to craete a Slicer extension/module that would streamline the percutensous trajectory planning. +## Objective + + + +1. Objective A. Streamline percutaneous planning interface by integrating some of the modules such as fiducial markers, models, tranform, volume reslice driver etc + +## Approach and Plan + + + +1. We already have created a preliminary module and we would like to get feedback from the IGT community and see if this could be helpful for integration with the low-cost navigation system NousNav + +## Progress and Next Steps + + + + +# Illustrations + + +![Userinterface and planned trajectory](Slicer_screenshot.png) + + +# Background and References + + diff --git a/PW37_2022_Virtual/Projects/PercPlan/Slicer_screenshot.png b/PW37_2022_Virtual/Projects/PercPlan/Slicer_screenshot.png new file mode 100644 index 000000000..5ae459e32 Binary files /dev/null and b/PW37_2022_Virtual/Projects/PercPlan/Slicer_screenshot.png differ diff --git a/PW37_2022_Virtual/Projects/README.md b/PW37_2022_Virtual/Projects/README.md new file mode 100644 index 000000000..572aeb94b --- /dev/null +++ b/PW37_2022_Virtual/Projects/README.md @@ -0,0 +1,17 @@ +# How to create a new project + +- Post questions about the project idea and team on the [Project Week forum][forum], our communication mechanism as of PW28. +- When you are ready, add a new entry in the list of **Projects** by creating a new `README.md` file in subfolder in `Projects` folder, and copying contents of [project description template][project-description-template] file into it. Step-by-step instructions for this are: + +1. Open [project description template][project-description-template] and copy its full content to the clipboard +1. Go back to [Projects](https://github.com/NA-MIC/ProjectWeek/tree/master/PW37_2022_Virtual/Projects) folder on GitHub +1. Click on "Create new file" button +1. Type `YourProjectName/README.md` +1. Paste the previously copied content of project template page into your new `README.md` +1. Update at least your project's __title, key investigators, project description sections__ +1. Add a link to your project to the [main project list](..#projects-how-to-add-a-new-project) + +Note: some steps above may require creating a [pull request](https://help.github.com/articles/creating-a-pull-request/) until your account is given write access. + +[forum]: https://discourse.slicer.org/c/community/project-week +[project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW30_2019_GranCanaria/Projects/Template/README.md diff --git a/PW37_2022_Virtual/Projects/Slicer5Plus/README.md b/PW37_2022_Virtual/Projects/Slicer5Plus/README.md new file mode 100644 index 000000000..62cc390e8 --- /dev/null +++ b/PW37_2022_Virtual/Projects/Slicer5Plus/README.md @@ -0,0 +1,66 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer 5++ : Roadmap forward for 3D Slicer + +## Key Investigators + +- Sam Horvath (Kitware Inc.) +- Andras Lasso (Queen's University, Kingston, Canada) +- Steve Pieper (Isomics Inc., Cambridge, MA, USA) +- Jean-Christophe Fillion-Robin (Kitware Inc.) + +# Project Description + +During this project week, we would like to discuss a *broad* roadmap for major features and changes to Slicer between now and Slicer 6 + +## Objective + + + +1. Develop that starting point of the Slicer 6 roadmap + +## Approach and Plan + + + +1. Discussion! + - Had breakout session Wed @ 2pm +1. Should Qt6 = Slicer 6? + +## Working Roadmap + + +1. Long term (Slicer 6 to be released in ~3 years) + 1. Modularization ("pip install slicer") + - Heavily dependent on improved VTK remote module support + - Exampe of current progress with VTK remote modules can be found in [SlicerLookingGlass](https://github.com/Kitware/LookingGlassVTKModule) + - This modularization will support 3D Slicer cloud projects as well as simplifying the build system + 1. Distributing a Slicer SDK + - Allowing development on C++ modules without building all of Slicer + - We have been working on making VTK SDKs [available](https://vtk.org/files/wheel-sdks/) + 1. Ability to update an installed Slicer version + 1. Laying the groundwork for hybrid desktop / web Slicer applications +1. Short term (Slicer minor releases in the next year) + 1. Using oriented image data support already available in VTK + 1. Extension auto updates (already available for preview) + 1. Expanding language support + 1. Improved volume display mechanism + - Volume behavior is inconsistent for show/hide compared to other data + 1. Qt 6 transition + - Qt 6 support already in VTK upstream + - ALso look at Pyside6 transition + 1. Investigate using libraries (openslide, etc) to support smooth viewing of larger images + +## Next steps + +1. Transition this roadmap to Slicer GitHub wiki +2. Begin hacking! + + +# Background and References + + + +- [Slicer 5 Roadmap](https://www.slicer.org/wiki/Documentation/Labs/Slicer5-roadmap) +- [3D Slicer Roadmap](https://github.com/Slicer/Slicer/wiki/Roadmap) +- [Slicer 6 Roadmap notes](https://docs.google.com/document/d/1X3Lv5yNBxViB-dMEYjEaw1g9825mlEVH-8R40jGSnJQ/edit?usp=sharing) diff --git a/PW37_2022_Virtual/Projects/SlicerBatchAnonymize/README.md b/PW37_2022_Virtual/Projects/SlicerBatchAnonymize/README.md new file mode 100644 index 000000000..36bc52863 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerBatchAnonymize/README.md @@ -0,0 +1,60 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Updates to Batch Anonymizer +SlicerBatchAnonymize is a Slicer Extension that strips off metadata from dicom files, and converts them to various file formats. +The work during project week will involve investigating and creating prototypes for defacing in medical images, support and single file dicom export. + +## Key Investigators + +- Hina Shah (UNC Chapel Hill) +- Juan Carolos Prieto (UNC Chapel Hill) +- Maxime Gillot (UoM) + +# Project Description + + +The very first step to make any medical data available to research community is it's anonymization. [SlicerBatchAnonymize](https://github.com/hina-shah/SlicerBatchAnonymize) +is a 3D Slicer extension to anonymize a batch of DICOM images by stripping most of metadata (image information stays intact). +The tool currently provies a user-friendly UI, supports export to several popular research formats including DICOM series, and also generates a crosswalk files for future uses. + + +## Objective + + + +1. Support export to a single DICOM file. +2. Investiage existing defacing methods +3. Come up with a prototype for defacing of CBCT images. + +## Approach and Plan + + + +1. Use either DICOMlib or pydicom for export to DICOM as a single file. +2. Get guidance from community on existing defacing methods, and lookup existing tools/literature available for defacing +3. Decide and implement a prototype for defacing of CBCT images + +## Progress and Next Steps + + + +1. During this project week we implemented a functionality that will call [AMASS extension](../AMASSS_CBCT/README.md). This CBCT segmentation extension will return a mask for the skin ROI. Using this mask we zero out those voxels from the CBCT scan and save the resulting dicom without metadata and defaced. + +# Illustrations + + +![image](https://user-images.githubusercontent.com/22948571/149800624-b1468449-96a1-467c-ad49-7559e68fb74b.png) +image +![image](https://user-images.githubusercontent.com/22948571/176885382-1e115e85-3a17-4869-9c7a-9d7df5702b95.png) + + + + +# Background and References + + +1. [Source code in Github repository](https://github.com/hina-shah/SlicerBatchAnonymize) +2. [AMASS extension](https://github.com/Maxlo24/Slicer_Automatic_Tools) diff --git a/PW37_2022_Virtual/Projects/SlicerColoc-Z-Stats/README.md b/PW37_2022_Virtual/Projects/SlicerColoc-Z-Stats/README.md new file mode 100644 index 000000000..7d27fef1a --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerColoc-Z-Stats/README.md @@ -0,0 +1,56 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Write full project title here + +## Key Investigators + +- Xiang Chen (Memorial University of Newfoundland) +- Oscar Meruvia-Pastor (Memorial University of Newfoundland) +- Touati Benoukraf (Memorial University of Newfoundland) + +# Project Description + +This is an extension for computing the percentage of colocalization(Spatial overlap between different channels) of Z-stack TIFF images, which developed for category 'Quantification'. + +## Objective + + + +1. As of now, the computation of my module is a bit slow(when the threshold range for each selected channel is very large), so I'm hoping to get help from slicer experts to make it faster. + +## Approach and Plan + +1. Collaborate with Slicer community members during this Project Week. + +## Updates and Next Steps + + + +1. Currently my extension has already implemented the calculation functionality and the current goal is to increase the calculation speed. +2. As shown below, The calculation time has been greatly reduced after removing all unnecessary code related to creating the closed surface representations for all segments. + +**Before:** +
+ +**Now (The calculation time has been shortened to less than 30s):** +
+ +When the threshold range is set not that so large, the calculation time will be shorter: +
+ +**Next Steps:** +1. Convert the volume corresponding to each channel in the ROI to a numpy array. +2. Apply thresholding to all numpy array of the volumes within the ROI. +3. Detect all intersections among all channels using numpy indexing. +4. Count the number of voxels resulting from step 3 and multiply by the volume of one voxel. + +# Illustrations + +Users can threshold the volume rendering of the input Z-stack image in the 3D view window, select the region of interest(ROI) by the bounding box, and get a Venn diagram that shows the critical metric of colocalization's percentage. +[Extension ScreenShots](https://github.com/ChenXiang96/SlicerColoc-Z-Stats/blob/main/Images/Screenshots.png) + + +# Background and References +[The link to the source code repository](https://github.com/ChenXiang96/SlicerColoc-Z-Stats) + +[Download links to sample image](https://drive.google.com/file/d/1IYlggsikgtQR7jXE83sSS2ZtMCuswsA0/view) diff --git a/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PET1.zip b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PET1.zip new file mode 100644 index 000000000..b842962df Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PET1.zip differ diff --git a/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PET2.zip b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PET2.zip new file mode 100644 index 000000000..f72660ec6 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PET2.zip differ diff --git a/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PolarMap.svg b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PolarMap.svg new file mode 100644 index 000000000..63cd42679 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PolarMap.svg @@ -0,0 +1,7501 @@ + + + + diff --git a/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/README.md b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/README.md new file mode 100644 index 000000000..04d992540 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/README.md @@ -0,0 +1,91 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerHeart Polar Map Module + +## Key Investigators + +- Connor Haberl (University of Ottawa Heart Institute) +- Investigator 2 (Affiliation) +- Investigator 3 (Affiliation) + +# Project Description + + + +
+ +This project aims to add polar map funcitonality to SlicerHeart. Polar maps provide a standardized 2D representation of the 3D LV myocardium for consistent comparisons across different patients or over time, and allow the entire LV to be viewed at once in a single 2D image. +Polar maps are commonly used in: +-nuclear imaging (PET, SPECT) to show perfusion or other radiotracer uptake +-scar imaging (LGE MRI or LIE CT) to show contrast washout to identify regions of scar +-anatomical imaging (CT, MRI or Echo) to show myocardial wall motion or thickening, typically to identify regions or scar or hypomobility +This project looks to create a module within SlicerHeart to enable the creation of polar maps for each of these use-cases. + +## Objective + + +(Within Project Week) +1. Objective A. Create LV polar maps from surfaces (e.g. a 3D polydata surface representing scalar values at either the endo- or epi- cardial surface) +2. Objective B. Create LV polar maps from nuclear imaging volumes (e.g. perfusion mapping from PET scan) +3. Objective C. Allow user to interact (e.g. draw a segmentation) on the polar map, and translate those segmentations to the 3D scene. + +(Beyond Project Week) +4. Objective D. Create LV polar maps from CT/MR volumes showing washout (e.g. Late Gadolineum uptake in LGE MRI to identify regions of scar) +5. Objective E. Create LV polar maps from 4D CT/MR/Echo volumes showing wall motion & thickening + + +## Approach and Plan + + + +1. Create method for automatic LV long-axis detection (or require user to define using markers at apex and center of LV) +1. Create method for interactive plot of polar coordinates in Slicer (similar to matplotlib 'polar' plot type https://matplotlib.org/stable/gallery/pie_and_polar_charts/polar_demo.html) +1. Create method for apical sampling (or, eventually, allow user to select one of 3 common methods of apical sampling for polar maps) +1. Implement some level of semi-automatic LV myocardial segmentation (anything existing to leverage?) + +## Progress and Next Steps + + + +1. (Complete) Create initial extension with basic polar plot capabilities for surface data +1. (To Do) Try to solve objective C - interactive plots with connection to 3D +1. (To Do) Work with sample data to develop sampling method for nuclear imaging data to create polar map. + +# Illustrations + + +An example of 17 segment display of LV wall thickening: + + + + +# Background and References + + +1. Sample Data + +We have some PET and SPECT images to use for nuclear imaging polar map development. We also have 3D Electroanatomic Voltage Maps for converting a 3D surface into polar map. These will be linked prior to project week. +TBD - we have not yet found any LGE MRI or LIE CT images to use for testing yet, so we wll probably start by working on the pathways for which we have data to analyze. + +2. What is a polar map? + +Polar maps enable properties of the left ventricle, a 3D object, to be viewed entirely in 2D. Apical segments of the LV are shown in the center of the polar map, and basal segments are shown near the perimeter. +Polar maps are viewed as if looking at the myocardium from the apex with the anterior LV wall at the top and the septal wall to the left. They are commonly used to show perfusion or tissue health. + +3. What is the 17-segment heart model? + + +The 17 segment model is a standardized myocardial segmentation nomenclature used to discretize segments of the left ventricular myocardium. The model describes the left ventricle as 17 distinct segments - 6 basal segments, 6 mid-ventricular segments, and 5 apical segments. +Physicians may be familiar with looking at the heart in transaxial slices on CT, in cardiac planes, or literally through the chest wall (e.g. surgeon). The 17-segment model is very commonly used and can act as a common-ground for physicians with different backgrounds. +Assigning characteristics to each of these 17-segments is commonly used in cardiology, be it to describe the health of the tissue for the corresponding region, or to draw attention to an anatomical anomaly in a certain region. It is particularly helpful in considering bloodflow because each of the 3 main cardiac arteries source distinct segments of the 17-segment model. +More on the definition of the 17-segment model can be found in [this American Heart Association paper] (https://www.ahajournals.org/doi/pdf/10.1161/hc0402.102975) + +4. Apical Sampling Methods + + + +All polar maps sample the basal and mid segments the same, but there are 3 different convensions to how to sample the apex. These are described in more detail in + Lin, G. Sharat et al. “Automated Quantification of Myocardial Ischemia and Wall Motion Defects by Use of Cardiac SPECT Polar Mapping and 4-Dimensional Surface Rendering.” Journal of nuclear medicine technology 34.1 (2006): 3–17. Print. diff --git a/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SPECT1.zip b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SPECT1.zip new file mode 100644 index 000000000..6a34dc142 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SPECT1.zip differ diff --git a/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SVGImport.py b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SVGImport.py new file mode 100644 index 000000000..7e8e3b27a --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SVGImport.py @@ -0,0 +1,29 @@ +import xml.etree.ElementTree as ET + +ns = {'inkscape': 'http://www.inkscape.org/namespaces/inkscape', + 'sodipodi': 'http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd', + 'ns': 'http://www.w3.org/2000/svg'} + +polarMapFP = "https://github.com/NA-MIC/ProjectWeek/blob/master/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/PolarMap.svg" + +tree = ET.parse(polarMapFP) +root = tree.getroot() + +for group in root.findall('ns:g', ns): + for path in group[0].findall('ns:path', ns): + # try setting each element from 0-10 degrees to white + if(path.attrib["{http://www.inkscape.org/namespaces/inkscape}label"] == "0-10"): + style = path.attrib["style"] + style = style[0:20] + "ffffff" + style[27:] + print(style) + #style[21:27] = "FFFFFF" + path.set('style', style) + +# Saving the changed SVG, though ideally this is unnecessary +outputFN = "C:\\Users\\haber\\Documents\\School\\VT Ablation Project\\3D Slicer\\Project Week 37\\testOutput.svg" +tree.write(outputFN) + +# Show the updated polar map in Slicer +imageWidget = qt.QLabel() +#!!! DO SOMETHING HERE TO SHOW THE SVG IN SLICER !!! +imageWidget.show() diff --git a/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SurfaceMaps.zip b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SurfaceMaps.zip new file mode 100644 index 000000000..b430f500f Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerHeartPolarMaps/SurfaceMaps.zip differ diff --git a/PW37_2022_Virtual/Projects/SlicerInternationalization/README.md b/PW37_2022_Virtual/Projects/SlicerInternationalization/README.md new file mode 100644 index 000000000..a5d642b25 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerInternationalization/README.md @@ -0,0 +1,64 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer Internationalization + +## Key Investigators + +- Sonia Pujol, (Brigham and Women's Hospital, Harvard Medical School, Boston, MA, USA) +- Steve Pieper (Isomics Inc., Cambridge, MA, USA) +- Andras Lasso (Queen's University, Kingston, Canada) +- Mamadou Camara (Cheikh Anta Diop University, Dakar, Senegal) +- Adama Wade (Cheikh Anta Diop University, Dakar, Senegal) +- Luiz Otavio Murta Junior (University of Sao Paulo, Ribeirao Preto, Brazil) +- Vinicius Pavanelli Vianna (University of Sao Paulo, Ribeirao Preto, Brazil) +- Oumar Sy (Cheikh Anta Diop University, Dakar, Senegal) +- Mohamed Alalli BILAL (Ecole Supérieure polytechnique, Senegal) +- Adriana H. Vilchis González (Universidad Autónoma del Estado de México, Mexico) +- Mouhamed DIOP (Cheikh Anta Diop University, Dakar, Senegal) + +# Project Description + +The goal of the project is to develop a novel software infrastructure to enable the localization of 3D Slicer to multiple languages. The project is funded through an Essential Open Source Software for Science grant of the Chan Zuckerberg Initiative. + +## Objective + + + +1. Objective A. To identify members of the Slicer community interested in contributing translations in their native language. +1. Objective B. To connect Slicer Weblate to ReadTheDocs internationalization infrastructure + +## Approach and Plan + + + +1. Daily translation hackathons at 12 pm EST (Boston time) with members of the Slicer community +1. Use case of Weblate project translation + +## Progress and Next Steps + + + +Describe specific steps you **have actually done**. +1. We have made lots of progress on the translation of the DICOM, Models, Segmentation, and Volumes modules as well as the DICOM tutorial in french +2. Some progress has been made with the extraction of the translatable strings in the Python code +3. We set up test projects (main and translations) on ReadTheDocs to try out configurations and tools in order to determine the best fit for the documentation translation infrastructure, before applying those to the official Slicer documentation +4. We are experimenting with a file structure of one file per documentation section to ensure coherence in the order in which the sections are translated on Weblate +5. An infrastructure is being set up to automatically mark translatable strings in the source code +6. Tests are being made to come up with the best way to manage translations over different versions of Slicer + +# Illustrations + + +![3D Slicer's DICOM module translated to french](images/DICOM.png) +![3D Slicer's Models module translated to french (1)](images/Modules.png) +![3D Slicer's Models module translated to french (2)](images/Modules2.png) +![DICOM Tutorial translated to french (1)](images/Tutorial1.png) +![DICOM Tutorial translated to french (2)](images/Tutorial2.png) +![DICOM Tutorial translated to french (3)](images/Tutorial3.png) + +# Background and References + + diff --git a/PW37_2022_Virtual/Projects/SlicerInternationalization/images/DICOM.png b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/DICOM.png new file mode 100644 index 000000000..ec2d33fe4 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/DICOM.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Modules.png b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Modules.png new file mode 100644 index 000000000..e16dcc56f Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Modules.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Modules2.png b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Modules2.png new file mode 100644 index 000000000..c1d4953a7 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Modules2.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial1.png b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial1.png new file mode 100644 index 000000000..86a2be0e1 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial1.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial2.png b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial2.png new file mode 100644 index 000000000..2bb2e33d7 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial2.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial3.png b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial3.png new file mode 100644 index 000000000..ae5e58866 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerInternationalization/images/Tutorial3.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerLiver/README.md b/PW37_2022_Virtual/Projects/SlicerLiver/README.md new file mode 100644 index 000000000..1515d7550 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerLiver/README.md @@ -0,0 +1,95 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer-Liver + +## Key Investigators + +- Rafael Palomar (Oslo Unviersity Hospital/NTNU, Norway) +- Gabriella d'Albenzio (Oslo University Hospital, Norway) +- Ruoyan Meng (NTNU, Norway) +- Ole Vegard Solberg (SINTEF, Norway) +- Geir Arne Tangen (SINTEF, Norway) +- Javier Pérez de Frutos (SINTEF, Norway) + +# Project Description + +This project will continue the development of the *Slicer-Liver* extension +that will be developed through the [ALive project](https://alive-research.no). +The objective of the Slicer-Liver extension is to provide researchers +with tools to perform liver analytics towards planning of liver interventions +(resections, ablations). At this point in the project we need to port early +prototypes of our resection planning algorithms into 3D Slicer. + +## Objectives + + - Liver resection planning: + + 1. Integration of volumetric analysis for resections + 2. Improve the UI for managing resections + + - Computation of vascular territories: + + 1. Calculation of vascular liver-segments based on multiple vascular systems (hepatic arterial/vein, portal vein) + 2. Structuring of data for the liver module – implementing object hierarchy + 3. Storing and reloading of module data + 4. User-friendly GUI for liver module – optimize for clinical use. + +## Approach and Plan + +For this Project week we will build on the advances obtained in the las project +week. Some of the objectives are based on new functionality that has been tested +but not integrated yet, while some other objectives are refinement of +functionality previusly integrated in [Slicer-Liver +PW36](https://github.com/NA-MIC/ProjectWeek/tree/master/PW36_2022_Virtual/Projects/Slicer-Liver + +## Illustrations + + + + + + + + + + + + + + + + + + + +## Progress and Next Steps + +In this project week, we have changed the user interaction to use segmentations instead of models, which greatly simplifies the user interaction. + +There is a PR for adding this extension to the extension manager. This be effective when we prepare a tutorial video on the use of the extension. + +There are still standard features (e.g., volumetry computation) and new research features (e.g, risk maps visualization, new planning algorithms) that we would like to implement in future Project Weeks. + +# Background and References +1. [Slicer-Liver PW36](https://github.com/NA-MIC/ProjectWeek/tree/master/PW36_2022_Virtual/Projects/Slicer-Liver) (January 2022) +1. [Slicer-Liver PW35](https://github.com/NA-MIC/ProjectWeek/tree/master/PW35_2021_Virtual/Projects/Slicer-Liver) (June 2021) +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/NorMIT-Plan/) (january 2020) +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW34_2020_Virtual/Projects/SlicerLiverAnalysis/) (December 2020) +1. Palomar, Rafael, et al. "A novel method for planning liver resections using deformable Bézier surfaces and distance maps." Computer Methods and Programs in Biomedicine 144 (2017): 135-45. +1. Palomar, Rafael, et al. "Surface reconstruction for planning and navigation of liver resections." Computerized Medical Imaging and Graphics 53 (2016): 30-42. diff --git a/PW37_2022_Virtual/Projects/SlicerLiver/bezier_surface_markup.png b/PW37_2022_Virtual/Projects/SlicerLiver/bezier_surface_markup.png new file mode 100644 index 000000000..9d1fdb191 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerLiver/bezier_surface_markup.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerLiver/resection_initialization.png b/PW37_2022_Virtual/Projects/SlicerLiver/resection_initialization.png new file mode 100644 index 000000000..dcd1f430f Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerLiver/resection_initialization.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerLiver/resection_planning.png b/PW37_2022_Virtual/Projects/SlicerLiver/resection_planning.png new file mode 100644 index 000000000..987f1d8f0 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerLiver/resection_planning.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerLiver/screenshot.png b/PW37_2022_Virtual/Projects/SlicerLiver/screenshot.png new file mode 100644 index 000000000..5310aa739 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerLiver/screenshot.png differ diff --git a/PW37_2022_Virtual/Projects/SlicerPhotoGram/README.md b/PW37_2022_Virtual/Projects/SlicerPhotoGram/README.md new file mode 100644 index 000000000..2f3400e9c --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerPhotoGram/README.md @@ -0,0 +1,94 @@ +# Slicer module for 3D stereophotogrammetry + +## Key Investigators + +- Chi Zhang (Seattle Children's Research Institute) +- A. Murat Maga (University of Washington and Seattle Children's Research Institute) +- Steve Pieper (Isomics, Inc) + +# Project Description + +3D stereophotogrammetry has becoming increasingly common in biomedical and clinical fields as an economic, fast, flexible and safe (non-invasive, no radiation) way to achieve accurate 3D surface models. In particular, photogrammetry can acquire realistic surface texture that allows researchers and clinicians to accurately assess traits and placing landmarks. + +To further reduce the cost and faciliate the use of photogrammetry, we want to build an open-source pipeline for photogrammetry in 3D Slicer for from digital image post-processing to photogrammetric 3d mesh reconstruction and texturing that incorporate open-source software and packages. Eventually, we will also create guidelines for clinicians and researchers, espically using mobile devices (e.g., smart phone), for Slicer-based photogrammetry. + + +## Objective + +1. Build open-source software and packages asscoiated with photogrammetry (e.g., WebODM, ExifTool, OpenCV, OpenSFM) into Slicer. Create GUI for adjusting parameters. +2. Loading multi-png textured model into Slicer +3. 3D model and texture building from point clouds created from photogrammetry software. +4. Import partial textured models into Slicer, clean them up, align and fuse. + +## Approach and Plan +1. Figure out what our issues and priorities are: + * how imporant is visual fidelity vs convenience (point clouds are more conveneient but lower image quality compared to textured models) + * how much do we imaging users interactively fixing geometry at various levels of detail + * how much do we need to rely on third party code and what maintenance or licencing considerations come with it +3. Determine what parts of the process Slicer can be particularly useful for (i.e. should we bring in the raw images and estimate the camera parameters or assume that other software has already done that). +4. Work to define and describe the ideal interactive workflow leveraging existing Slicer functionality like markups and segmentations. + +## Progress and Next Steps +1. Adapt an online example to render an obj file with multiple resources through `vtkRenderer`. First build a `vtkOpenGLPolyDataMapper` to link to the mesh and store each texture image according to material names, and then add a `vtkOpenGLActor` to refer to the content of the mapper. Finally, passing the actor to the rendere. However, for our dataset, `vtkRenderer` crashed whenever I rendered the model with more than 14 texture images (on Windows), while w111 accompanied texture images. Steve confirmed the same issue on Mac. Below an example of rendering with 10 texture images. + +

+ +

+ +2. Andras suggested spliting the mesh according to texture images and each part being mapped with a few texture images. However, the texture appears to have one dominant image, the rest just cover isolated areas. This may make mesh splitting complicated. Below are the model matched with the 1st & 10th texture image. + +

+ + +

+ +3. I then followed Steve and Andras' suggestions to try using Blender to merge texture images. I merged the first 25 texture images into one using Blender and map to the model using the Texture Modeler in Slicer. Slicer successufully rendered the model with the texture. The result is showing below. + +

+ +

+ +* The resolution is pretty low. This is probably because the scaling for each texture image after merging into one texture image. Below shows the 1st texture image (the dominant one) (left) and the merged one (right). In the merged one, the texture of the specimen basically concentrate at the lower left corner. + +

+ + +

+ +4. Future directions: +* Merge texture images properly: + * I did not merge all 111 texture images because Blender requires me to add the same texture mapping node for every image. It appears that we can do python scripting in Blender. Thus, it might be useful to explore looping through every image using python and later connect to Slicer. + * Find a proper way to merge texture to retain the resolution. I'm also checking with the ODM people to see if they can do it. +* Follwing Andras' suggestion, directly access vtkRenderer() in Slicer scene to have stable rendering. +* Steve suggested geometry accuracy is more important than visual fidelity at this moment and we can archiving images for adding more algorithms in the future, such as machine learning. For the near future, we can focus on first getting a pipeline based on ODM. In the long run, we should definitely consider adding machine/deep learning algorithms, for example, to image registration, which is the foundation of geometric & texture accuracy in structure-from-motion photogrammetry. This can also greatly improve the efficieny of photo taking. Currently, we have to take a lot photos carefully to ensure proper registration but it is still tricky. We will have more discussions with Murat. +* We will also discuss how much we can rely on Slicer & how much we have to use 3rd party software & packages. + + +# Illustrations +Example + +Example textured model (.obj) exported from WebODM viewed in MeshLab + + + +Example camera positions reconstructed by WebODM, viewed in WebODM + + + +Example point cloud exported from webODM and loaded in Slicer using [this code](https://gist.github.com/pieper/e4ca5e4c753c5ed6c61656d25b93402c). + + + + + + + +# Background and References + +1. The repository for SlicerPhotoGram: [https://github.com/SlicerMorph/PhotoGram](https://github.com/SlicerMorph/PhotoGram). + +2. Currently, we have created a script [output_cropped_image.py](https://github.com/SlicerMorph/PhotoGram/blob/main/output_cropped_images.py) for loading digital image sequnece as a volume, crop each image using ROI tool for reducing background noise, and export each cropped slice as a tiff image. + +3. WebODM for photogrammetry that rely on OpenCV and OpenSFM: [https://www.opendronemap.org/docs/](https://www.opendronemap.org/docs/) and [https://github.com/OpenDroneMap/WebODM](https://github.com/OpenDroneMap/WebODM). + +4. We have a script for loading WebODM point clouds in Slicer: [https://gist.github.com/pieper/e4ca5e4c753c5ed6c61656d25b93402c](https://gist.github.com/pieper/e4ca5e4c753c5ed6c61656d25b93402c) diff --git a/PW37_2022_Virtual/Projects/SlicerPoissonShapeAnalysis/README.md b/PW37_2022_Virtual/Projects/SlicerPoissonShapeAnalysis/README.md new file mode 100644 index 000000000..37e2439d4 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerPoissonShapeAnalysis/README.md @@ -0,0 +1,49 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Write full project title here + +## Key Investigators + +- Yi Gao (Shenzhen University) + +# Project Description + + + +Given two groups of shapes, represented as binary volumes, the Poisson shape analysis algorithm extracts the region of statistically difference on the shape. + +## Objective + + + +1. Objective A. Add the Poisson shape analysis module as an extension to Slicer. + +## 20220701 What's been done in the week +1. Study the qSlicerCLIModuleUIHelper class to see how to select multiple files. + +## Approach and Plan + + + +1. Make the ctkPathLineEdit able to select multiple file names. + +## Progress and Next Steps + + + +1. Currently the CLI can run from the command line and read multiple files. But from Slicer GUI, since the ctkPathLineEdit can only open a single file, it can't read multiple shape files for analysis. + +# Illustrations + + + +# Background and References + + + +https://github.com/gaoyi/SlicerPoissonShapeAnalysis + +Gao Y, Bouix S. Statistical Shape Analysis using 3D Poisson Equation---A Quantitatively Validated Approach. Medical Image Analysis. 2016 Jan 15. diff --git a/PW37_2022_Virtual/Projects/SlicerScope/README.md b/PW37_2022_Virtual/Projects/SlicerScope/README.md new file mode 100644 index 000000000..01d29d0f2 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerScope/README.md @@ -0,0 +1,59 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerScope extension + +## Key Investigators + +- Yi Gao (Shenzhen University) +- Steve Pieper (Isomics) +- Andras Lasso (Queen's University) + +# Project Description + + +SlicerScope is a microscopy tool for operating on slides like histopathology (H&E) images. + +## Objective + + + +1. Objective A. Add the SlicerScope, an open platform for whole slide histopathology image as a Slicer extension. +1. Objective B. Add the nucleus segmentation module into the extension. + +## 20220701 What's been done in the week +1. With the help from Steve and Andras, the extension has been submitted to the extension manager. We will keep watching it's bugs and make improvements. +1. The extension has been renamed to "BigImage" to better reflect its nature. + +## Approach and Plan + + + +1. It's currently already on github. The extension s4ext file has been updated and the pull-request has been created. We will review the extension with the benevolent dictators. +1. Add the nucleus segmentation (non-deep learning version) to a module under this extension. + + +## Progress and Next Steps + + + + +1. The extension and the module has already been pushed to github + +# Illustrations + + + +![Large image viewing module panel](https://user-images.githubusercontent.com/920557/174559913-77ccaee3-5063-4fa5-b562-dd1ad3b24236.png) +![Large image viewing module: zoom out](https://user-images.githubusercontent.com/89077084/174545844-83a5f601-32ca-4d88-b328-b3a0cba0e922.png) +![Large image viewing module: zoom in](https://user-images.githubusercontent.com/89077084/174545870-063ae0a8-2e3d-49bd-8d61-08ca19c5dbb6.png) + +# Background and References + + + +See this webpage for more details. + +Xiaxia Yu, Bingshuai Zhao, Haofan Huang, Mu Tian, Sai Zhang, Hongping Song, Zengshan Li, Kun Huang, Yi Gao, "An Open Source Platform for Computational Histopathology," in IEEE Access, vol. 9, pp. 73651-73661, 2021, doi: 10.1109/ACCESS.2021.3080429. diff --git a/PW37_2022_Virtual/Projects/SlicerTMS/README.md b/PW37_2022_Virtual/Projects/SlicerTMS/README.md new file mode 100644 index 000000000..b1b5f1cbf --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerTMS/README.md @@ -0,0 +1,84 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Real-time visualization for transcranial magnetic stimulation (TMS) + +## Key Investigators + +- Loraine Franke (University of Massachusetts Boston) +- Jax Luo (BWH & Harvard Medical School) +- Raymond Yang (University of Massachusetts Boston) +- Lipeng Ning (BWH & Harvard Medical School) +- Steve Pieper (Isomics, Inc.) +- Daniel Haehn (University of Massachusetts Boston) + +# Project Description + +Transcranial magnetic stimulation is a nonivasive procedure used for treating depression with magnetic and electric fields to stimulate nerve cells. +A TMS coil is slowly moved over the subject's head suface to target certain areas in the brain. +Our project aims to develop a deep-learning powered software for real-time E-Field prediction and a visualization of TMS within 3D Slicer. + +## Objective + +Real-time visualization of an electric field (E-field) for transcranial magnetic stimulation (TMS) on the brain surface, visualization through an AR app (over browser). + +## Approach and Plan +- We created a TMS module in Slicer mapping NifTi file onto brain mesh with 3D TMS coil that can be moved by the user (done) -> TODO: create a bounding box for the Coil mesh to not turn it into brain, attach the coil to the skull/skin surface +- We use OpenIGTLinkIF used to transfer data (E-Field from TMS) into 3D Slicer (done) +- Next steps include connecting 3DSlicer to the web browser using our newly implemented WebSocket from https://github.com/liampaulhus/slicerWebWSS-WIP +- Connect a mobile device via WebXR: So far, we have started implementing face tracking for the TMS module with simple JavaScript. In the future, this might need to be implemented with WebXR to retrieve depth information for AR. + +## Progress and Next Steps + +1. Some bug fixes of observers in the TMS Module +2. Integration of deep learning model: Modified OpenIGTLink with pyigtl (https://github.com/lassoan/pyigtl) to send out transform message of the moving TMS coil (added nodes, enabled push on connect, modified python demon that accesses the outgoing data), in collaboration with the deep learning project [Slicer TMS Deep-Learning](../SlicerTMS_E-field) +3. Tested our new secure websocket connection to the browser (https://github.com/liampaulhus/slicerWebWSS-WIP) using a self-signed certificate for https. +4. Next steps: a) Attach the coil to the skull/skin surface in Slicer and b) Modify the browser-based WebXR controller so that it can be synchronized with slicer + + +## Illustrations + +#### Current Visualization of the TMS Module in 3DSlicer with Coil and mapping of E-field on brain: + +SlicerTMS Module with Efield mapped on brain + + +#### Coil moving and updating the electric field on the brain surface inside slicer: + +Brain surface coil moving + + +#### Started WebSocket Tests: we can interact with the red, green and yellow slices with the new websocket connection: + + + +#### So far, we tested some simple Javascript Face-Tracking via mobile Phone and redered the brain: + +Facetracking in javascript for mobile phone + + + +# Background and References + +This project is related to: ../SlicerTMS_E-field + +## Infos for running WebXR: + +Phones need a Depth sensor to run AR/VR. A list of supported devices can be found here: https://developers.google.com/ar/devices + +On an Android Phone via USB: +- PlayStore: Download Google VR Services and Google AR Services App +- Update Chrome/Camera apps etc. +- On the phone: Enable Developer tools (https://developer.android.com/studio/debug/dev-options) and USB debugging (description here: https://developer.chrome.com/docs/devtools/remote-debugging/) +- Run chrome://inspect#devices in the browser on your computer and it should detect USB connected devices + +For iPhone: +- Mozilla offers a WebXR Emulator that can be downloaded from the Apple Store for any iPhone and iPad: https://labs.mozilla.org/projects/webxr-viewer/ + +## For Slicer TMS Module (see previous project week [PW 36](https://github.com/NA-MIC/ProjectWeek/blob/master/PW36_2022_Virtual/Projects/SlicerTMS_Module/README.md)): + + + +1. Predicting the distribution of the E-field based on the location of the coil + + + +## Approach and Plan + + + +1. Read a affine transform matrix from the updated (rotated) coil. +2. Perform an affine transformation to the Coil data and resample it to the subject head model space. +3. Combine the Coil data and the head model to generate a new nifti file and pre-process it. +4. Predict the E-field using the generated nifti file and a pre-trained deep network. +5. Visualize the precition result (.nii) + +## Progress and Next Steps + + + +1. Finished step 1-4. +2. Working on intergrating the code to the visualization module. +3. Improving the speed of the prediction. + +# Illustrations + +Visualization of the predicted E-field using the developed interface. +![Visualization of the predicted E-field from another software](../SlicerTMS/tmscoil_on_brain_surface.png) + +--> + +# Background and References +This is the sister project of [Slicer TMS Deep-Learning](../SlicerTMS) diff --git a/PW37_2022_Virtual/Projects/SlicerVRInfrastructure/README.md b/PW37_2022_Virtual/Projects/SlicerVRInfrastructure/README.md new file mode 100644 index 000000000..3f9cc135e --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerVRInfrastructure/README.md @@ -0,0 +1,77 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerVR infrastructure maintenance + +## Key Investigators + +- Csaba Pinter (EBATINCA) +- David Garcia Mato (EBATINCA) +- Adam Rankin (Robarts Research) +- Jean-Christophe Fillion-Robin (Kitware) + +# Project Description + + + +SlicerVR has many great features that allow an efficient visualization of medical 3D data. However, recent updates in the infrastructure (mainly that of VTK) broke certain key features, which make SlicerVR basically unusable in its current form. + +Recently there was also progress with the in-VR widget and its usage with the laser pointer that forms an integral part of the final SlicerVR infrastructure. The basic pieces are there, but need to be finalized and also see if the reimplemented widget in VTK works in VR (which before it did not). + +At the same time, a new project (Kitware/Robarts) has started to upgrade the rendering backend of SlicerVR from OpenVR to OpenXR and making the XR features available via the SlicerVR extensions. + +Since this project needs a fully functioning SlicerVR including the widgets, this Slicer week project we can unite forces and make a push towards this goal. + + +## Objective + + + +The goal of the project is, with coordination with Kitware/Robarts, to reach the previous usability state of the SlicerVR extension, as well as make progress with the in-VR widget. + + +## Approach and Plan + + + +1. The OpenXR integration into SlicerVR seems to advance well, so + 1. Test SlicerVR with the OpenXR branch + 2. Fix controller events with HP controller that seem to happen with XR as well, and the errors that bog down the application while rendering +1. Fix volume rendering display in the virtual reality view. This may not be OpenVR-related, because the same thing happens using the Looking Glass extension, so would be a first candidate. +1. Try out the latest [branch for the in-VR widget](https://github.com/dgmato/SlicerVirtualReality/tree/gui-widgets-interactions) + 1. Try the reimplemented vtkQWidgetWidget class that now does not use the unstable shared context + 2. Make progress with the laser pointer + 3. Fix the texture update in the VR view (the update happens in regular 3D view, i.e. the checkbox is checked visibly, but not in the VR view) + +## Progress and Next Steps + + + +1. OpenXR has been added to the SlicerVR superbuild, see [branch](https://github.com/adamrankin/SlicerVirtualReality/tree/openxr) (Adam, Jc) +2. Flag has been added to SlicerVR to use either OpenVR or OpenXR, and if statements added in the code to use the selected one. The OpenXR control paths currently log not implemented messages. See [branch](https://github.com/cpinter/SlicerVirtualReality/tree/openxr) (Csaba) + +# Illustrations + + + +![In-VR widget example](VRWidget.gif) + +# Background and References + + + +Branches +* [GUI widgets interactions branch](https://github.com/dgmato/SlicerVirtualReality/tree/gui-widgets-interactions) +* SlicerVR OpenXR branch comes here + +Issues +* [SlicerVR ticket #91](https://github.com/KitwareMedical/SlicerVirtualReality/issues/91) +* [SlicerVR ticket #80](https://github.com/KitwareMedical/SlicerVirtualReality/issues/80) +* [SlicerVR ticket #88](https://github.com/KitwareMedical/SlicerVirtualReality/issues/88) +* [SlicerVR ticket #43](https://github.com/KitwareMedical/SlicerVirtualReality/issues/43) + +Past project week pages +* [Project week #35 page](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/SlicerVR/) +* [Project week #34 page](https://projectweek.na-mic.org/PW34_2020_Virtual/Projects/SlicerVR/) diff --git a/PW37_2022_Virtual/Projects/SlicerVRInfrastructure/VRWidget.gif b/PW37_2022_Virtual/Projects/SlicerVRInfrastructure/VRWidget.gif new file mode 100644 index 000000000..cb89f99d9 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SlicerVRInfrastructure/VRWidget.gif differ diff --git a/PW37_2022_Virtual/Projects/SlicerVRTutorial/README.md b/PW37_2022_Virtual/Projects/SlicerVRTutorial/README.md new file mode 100644 index 000000000..98cead0a0 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerVRTutorial/README.md @@ -0,0 +1,76 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerVR Tutorial + +## Key Investigators + +- Mónica García-Sevilla (Universidad de Las Palmas de Gran Canaria, Spain) +- David García-Mato (EBATINCA, Spain) +- Csaba Pinter (EBATINCA, Spain) + +# Project Description + +SlicerVR is a nice tool for visualization of medical 3D data. It can be used in preoperative planning, surgical training and others. However, understanding how to interact with the images and models or getting used to move around the Slicer scene using the VR controllers can be difficult at the beginning. Therefore, a first steps tutorial could be useful for new users. + +## Objective + +The goal of this project is to develop a SlicerVR tutorial module to learn the basic actions needed to interact with objects and move around the 3D scene. + + +## Approach and Plan + +1. Define the actions to train (fly, grab, transform scene...) +1. Decide how to train those actions (define target positions or actions) +1. Decide the tutorial workflow (how to change which action is being trained) +1. Implement the module +1. Test with different headsets (HTC Vive, Oculus Rift...) + +## Progress and Next Steps + +1. A module has been created for the tutorial which connects to the VR hardware and shows a virtual scenario. +1. The user is assigned an avatar (head and hands). +1. Instructions on how to use the controllers for the most common devices are given. + +![controllersInstructions](https://user-images.githubusercontent.com/90038097/176795986-34bcddee-0b15-4882-a091-586b22280a25.gif) + +The tutorial includes a **first task** where users learn how to use the controllers to fly. The task consists on reaching a target (yellow cylinder shown in the scene). + +1. First, the module shows the instructions on how to use the controllers for that specific action and the task to perform. + +![Instructions](https://user-images.githubusercontent.com/90038097/176796116-4e752803-0fa2-4402-a3ae-d59f0f0d8813.png) + +1. Then, the user performs the task. + +![part1-fly](https://user-images.githubusercontent.com/90038097/176720297-e1098bcd-1c5f-4b84-ba09-055b7855993a.gif) + +1. When the target is reached, the user sees a message indicating “Success”. + +Started implementation of **two more tasks**: + +1. The user has to grab a model of a skull and put it in the scale + +![Move object](https://user-images.githubusercontent.com/90038097/176893946-de3241ca-018b-4373-a83b-1423384f1379.PNG) + +2. The user has to move the model of the femur to match the target position (shown as a transparent femur) + +![Move and rotate object](https://user-images.githubusercontent.com/90038097/176893988-a9f0cb25-73c3-4eef-8052-cd38fdacd795.PNG) + +__Next steps__: + +1. Finish the implementation of these tasks. +1. Include more tasks to cover all the possible actions performed with the controllers. +1. When the in-VR widget is ready, the instructions and messages will be shown there. + +# Illustrations +VR scenario + +![VR scenario](https://user-images.githubusercontent.com/90038097/175922432-08bccf28-2e82-4203-9b0b-c77b83cc5831.gif) + +Interaction with objects + +![Object Interaction](https://user-images.githubusercontent.com/90038097/175923480-92620ad5-d286-4b04-8ea9-fd05016ba54a.gif) + + +# Background and References + +Link to the source code repository: https://github.com/monicagsevilla/SlicerVRTutorial diff --git a/PW37_2022_Virtual/Projects/SlicerVirtualEndoscopy/README.md b/PW37_2022_Virtual/Projects/SlicerVirtualEndoscopy/README.md new file mode 100644 index 000000000..641925a82 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerVirtualEndoscopy/README.md @@ -0,0 +1,54 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Write full project title here + +## Key Investigators + +- Yi Gao (Shenzhen University) + +# Project Description + + + +Virtual endoscopy extracts the lumen from volumetric images and creates a fly-through visualization similar to the real endoscopy examinations. + +Slicer has an Endoscopy module, based on Delphine Nain's work: +https://dspace.mit.edu/handle/1721.1/87240 + +The current Endoscopy module requires the already extracted surface as input. The proposed module, however, starts from volumetric data and can perform vessel segmentation etc. + + +## Objective + + + +1. Objective A. Add the VirtualEndoscopy to Slicer extension. +1. Objective B. Contribute the vessel segmentation as a Segmentation Effect. + +## 20220701 What's been done in the week +1. Improve the code by using ui file to control the widgets. +1. Preparing the documents/figures for answering pull request questions of the ExtensionIndex. + +## Approach and Plan + + + +1. It's currently already on github. + +## Progress and Next Steps + + + +1. The extension and the module has already been pushed to github + +# Illustrations + + + + +# Background and References + + diff --git a/PW37_2022_Virtual/Projects/SlicerWGPU/README.md b/PW37_2022_Virtual/Projects/SlicerWGPU/README.md new file mode 100644 index 000000000..3de09ae50 --- /dev/null +++ b/PW37_2022_Virtual/Projects/SlicerWGPU/README.md @@ -0,0 +1,55 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerWGPU + +## Key Investigators + +- Steve Pieper, Isomics, Cambridge MA, USA +- others interested? + +# Project Description + +Explore the utility of using WebGPU from python in Slicer. + +A working prototype already exists here: https://github.com/pieper/SlicerWGPU + +## Objective + +1. Bounce the idea off people as a sanity check +2. Collect ideas for possible use cases +3. See if anyone wants to try implementing something practical + +## Progress and Next Steps +* Discussed the approach with several colleagues and confirmed interest in more experiments +* Presented, demoed, and discussed at the Slicer rendering breakout session + * VTK.js is already working with prototype WebGPU functionality + * It looks like VTK C++ will probably be going with WebGPU via Dawn in the comming years so there should be synergy and likely the ability to share shader code with anything developed via SlicerWGPU +* Improved the readme for the SlicerWGPU repository +* Collecting ideas for proof of concept experiments to test cross-platform performance + +# Illustrations + +image + +Figure 1: [Simple compute shader example](https://github.com/pieper/SlicerWGPU/blob/main/Experiments/slicer-compute.py) that inverts the values of a volume. + +``` +@group(0) @binding(0) +var data1: array; +@group(0) @binding(1) +var data2: array; +@stage(compute) +@workgroup_size(1) +fn main(@builtin(global_invocation_id) index: vec3) { + let i: u32 = index.x * @@SLICE_SIZE@@ + index.y * @@ROW_SIZE@@ + index.z; + data2[i] = -1 * data1[i]; +} +``` +Figure 2: Example WGSL compute shader code. + +image + +Figure 3: [Off-screen GPU render example](https://github.com/pieper/SlicerWGPU/blob/main/Experiments/slicer-render.py). + +# Background and References +See [the README](https://github.com/pieper/SlicerWGPU) for links and background. diff --git a/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/README.md b/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/README.md new file mode 100644 index 000000000..1ee897865 --- /dev/null +++ b/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/README.md @@ -0,0 +1,86 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Streamlined ROI annotation using Cornerstone.js for Mammograms + +## Key Investigators + +- Ryan Zurrin (University of Massachusetts Boston) +- Neha Goyal (University of Massachusetts Boston) +- Pablo Bendiksen (University of Massachusetts Boston) +- Kendrick Kheav (University of Massachusetts Boston) +- Daniel Haehn (University of Massachusetts Boston) + +# Project Description + +Fast and efficient means of loading mammogram's from server into a web based +viewer. Viewer is equipped with an ROI tool for generating bounding boxes (BB) +as simply as possible. It will allow users to create the BB, as well as +give it a score of likelihood that region is indeed cancer. Then, with the +press of a button, the roi and image id along with user info are saved back to +the server for later use. After being saved a new mammogram will then be served +up from the server into the viewer for them to again annotate. The saved +annotations from professionals will then be used later on in our research for +comparing classifier generated ROIs against professionally labeled ones. + +## Objective + +This tool is a subset of a much larger project, Oregon-Massachusetts Mammography +Database (OMAMA-DB), and has the sole purpose of giving professional labelers an +efficient way to label a massive amount of mammograms as fast and as simply +as possible. + +## Approach and Plan + + + +1. Learn and understand how to use Cornerstone.js which is a key component + in making this tool possible. (done) +2. Use Tornado webserver for backend because of its ability to make + asynchronous web requests, which will allow for multiple annotators to be working + simultaneously with our data. (done) +3. Using Tornado and Cornerstone, build a simple and fast method to serve up + mammograms from our dataset on server. (done) +4. Add ROI functionality. (done) +5. Add scoring functionality. +6. Add a method for users to mark if an image is defective. +7. Add a method to clear Previously added ROIs +8. Add a simple way to save data back to server. (done) + +## Progress + +1. Have been able to use Cornerstone.js and Tornado webserver to load + mammograms from server into the web based viewer. +2. Have been able to add the ROI functionality to viewer. +3. Have added the ability to press either *enter* or *space* key to save ROI + and image information back to server. + +## Next Steps + +1. Need to add scoring functionality (DONE) +2. Need to add way to mark image as defective (DONE) +3. Need to add way to clear Previously added ROIs (DONE w/help from erikz) +4. Change the format of the tooltip to make it look nicer and have it include the current image id (DONE) +5. Add means to Zoom, Pan, and adjust Window/Level (DONE) +6. Add a Help section at bottom, to let users know what buttons and actions do what (DONE) +7. optimize performance, if time is permitting. + + +# Illustrations + +

+ +

+ +Demo video from start of project: +![](SimplifiedROITool.gif) +![](roi-web-tool-example.gif) + +Demo video from end of project: +![](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW37__StreamlinedROIAnnotationTool__FinalROITool_1.gif) + +# Background and References + + +[Massachusetts Life Sciences Center, Press Release](https://www.masslifesciences.com/news/two-umass-boston-researchers-awarded-mass-life-sciences-grants/)
+[Cornerstone.js](https://cornerstonejs.org/)
+[Tornado webserver](https://www.tornadoweb.org/)
diff --git a/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/roi-web-tool-example.gif b/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/roi-web-tool-example.gif new file mode 100644 index 000000000..d72647fec Binary files /dev/null and b/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/roi-web-tool-example.gif differ diff --git a/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/work_flow.png b/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/work_flow.png new file mode 100644 index 000000000..cb1c12ed7 Binary files /dev/null and b/PW37_2022_Virtual/Projects/StreamlinedROIAnnotationTool/work_flow.png differ diff --git a/PW37_2022_Virtual/Projects/SystoleOS/README.md b/PW37_2022_Virtual/Projects/SystoleOS/README.md new file mode 100644 index 000000000..de66b01fc --- /dev/null +++ b/PW37_2022_Virtual/Projects/SystoleOS/README.md @@ -0,0 +1,95 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Systole OS: an operating system for development/deployment of medical devices. + +## Key Investigators + +- Rafael Palomar (Oslo University Hospital and NTNU, Norway) +- Andras Lasso (Queen's University, Kingston, Canada) +- Steve Pieper (Isomics Inc., Cambridge, MA, USA) +- Jean-Christophe Fillion-Robin (Kitware Inc., Carrboro, NC, USA) +- Sam Horvath (Kitware Inc.) + +# Project Description + + + +For more than a decade, 3D Slicer has been enabling world-class biomedical +research. The great success of 3D Slicer is now pushing the boundaries of +research, making some research groups and companies regard 3D Slicer as a viable +software for building medical devices that not only could support regular +clinical workflows but also become commercial products. While the development of +3D Slicer has been tailored towards research, its modular architecture makes the +development of industrial prototypes possible. + +The vision of Systole OS is the integration of 3D Slicer and related software (e.g, +Plus Toolkit, MONAI Label and more!) in a free and open-source operating system +based on GNU/Linux, with the aim to support the development and deployment of +medical devices. + +![Systole](systole.png) + +Here are some of the features that we would like to leverage in +Systole OS: + +### Cutting-edge software + +Systole OS is based on Gentoo Linux, which follows a rolling-release model +providing up-to-date support out of box. + +### Installable Slicer... + +Slicer, together with all the required dependencies will be installable with a simple +command (e.g., `emerge sci-medical/slicer`). No SuperBuild! No Slicer-Launcher! + +### ...and Modular Slicer + +The base installation of 3D Slicer will include only the components needed to +run the application (e.g., `emerge slicer-modules/models` can be done +separately). + +### Source-based + +Systole OS is a **source-based** distribution, which means that all packages +will be built from source. Having the flexibility to make decisions on +compile-time allows: + + - Tighter hardware integration + - Highly configurable packages (e.g., `flaggie sci-medical/slicer -python +opencl; emerge sci-libs/slicer` will install Slicer without python support and with opencl support) + - Portability to hardware architectures other than amd64 (e.g., arm, risc-v). + +### Extensible + +Systole OS works on the Gentoo overlay system which allows you to extend the +system with your own ovelay or override packages provided by Systole. + +## Objective + +In PW37 we aim to take the first steps towards the creation of the Systole +overlay, which is a set of packages containing an installable version of +3D Slicer and its dependences. + +## Approach and Plan + +1. Project discussion +1. Review of dependencies and `-DSlicer_SUPERBUILD=OFF` +1. Development of the packages for the dependencies and 3D Slicer +1. Development of the packages for the 3D Slicer default modules + +## Progress and Next Steps + +First full installation of a minimal 3D Slicer! + +[![asciicast](https://asciinema.org/a/505170.svg)](https://asciinema.org/a/505170) + +- SystoleOS gentoo overlay has been made public at [https://github.com/SystoleOS/gentoo-overlay](https://github.com/SystoleOS/gentoo-overlay). + +- Simple 3D Slicer installation with a `-DSlicer_SUPERBUILD=OFF` approach (-python). There are still some glitches and workarounds even for the simplest version, but this shows the feasibility of the project. + +Systole OS, by nature, is a continuous project that needs to keep up with the developments of 3D Slicer and the developments of the underlying software packages itself. It is necessary to establish infrastructure for testing that can help us on the development process (next step). In addition we need to establish infrastructue to support a community documentation, containers, communication channels, etc (next step). + +When it comes to the development of Systole OS itself, the first efforts will go on the direction of fixing bugs, enable new configurations of 3D Slicer (e.g, +python). To engage more users, it is important that Systole OS shows utility. We are planning to integrate a Slicer custom application (to be decided) as a deployment prototype. + +# Background and References + + diff --git a/PW37_2022_Virtual/Projects/SystoleOS/systole.png b/PW37_2022_Virtual/Projects/SystoleOS/systole.png new file mode 100644 index 000000000..3b9db3271 Binary files /dev/null and b/PW37_2022_Virtual/Projects/SystoleOS/systole.png differ diff --git a/PW37_2022_Virtual/Projects/ViPRE/README.md b/PW37_2022_Virtual/Projects/ViPRE/README.md new file mode 100644 index 000000000..21b500536 --- /dev/null +++ b/PW37_2022_Virtual/Projects/ViPRE/README.md @@ -0,0 +1,39 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Visualizing mass spectomatry data using Slicer + +## Key Investigators + +- Hanad Elmi (Queen’s University) +- Mackenzie Sharp (Queen’s University) +- Jessica Rodgers (Queen’s University) +- Amoon Jamzad (Queen’s University) +- Parvin Mousavi (Queen’s University) + +# Project Description + +Our project aims to develop a Slicer module, called ViPRE, an open-source software for analysis of Mass Spectrometry imaging data. ViPRE allows users to input DESI (Desorption electrospray ionization) data and visualize it in PCA (Principal Components Analysis) or single ion imaging. It also enables you to upload histopathology images for correspondence. Currently, we want to allow users to visualize a plot of the mass spectrum for each pixel imaging. We would also like to enable users to do image segmentation within the module. As of now, you need to open the image segmentation module to segment your tissue image. + +## Objective + +1. Objective A. Be able to plot a mass spectrum graph when clicking a pixel on the PCA image generated by the Slicer module +1. Objective B. Enable users to segment their PCA images within the module instead of having to switch to the image segmentation module in Slicer + +## Approach and Plan + +1. Create a function that plots the mass spectrum and displays it on Slicer + 1. The current issue is that the function is unable to access the peaks. This results with a plot with no bars +1. Create a function that takes cursor coordinates in real time and saves them if the user clicks on the pixel +1. Add the segmentation properties to the ViPRE module + + +## Progress and Next Steps + +1. I have already created a function that takes cursor coordinates in real time and saves them when the user presses the "." button on their keyboard + +# Illustrations + +Screen Shot 2022-06-26 at 2 41 55 PM + + +# Background and References diff --git a/PW37_2022_Virtual/Projects/mpReview/README.md b/PW37_2022_Virtual/Projects/mpReview/README.md new file mode 100644 index 000000000..d5db833cf --- /dev/null +++ b/PW37_2022_Virtual/Projects/mpReview/README.md @@ -0,0 +1,66 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# mpReview - 3DSlicer module for multiparametric annotation of the prostate + +## Key Investigators + +- Deepa Krishnaswamy (Brigham and Women's Hospital) +- Andrey Fedorov (Brigham and Women's Hospital) + +# Project Description + +The 3DSlicer module mpReview (part of the SlicerProstate extension) was previously developed to assist with manual annotation of the prostate and other related anatomical regions. +The current state of the module does not use the latest SegmentEditor, and requires that the data be organized and preprocessed in a specific manner on the user's local machine. +The overall goal is to update the module to be more streamlined, and later have the ability to be extended to other regions of the body. + +Currently, we have modified the module to allow data to be loaded from the local Slicer database, or from a remote GCP server. The DICOMweb client is used to retrieve studies/series/instances +of the user-selected GCP DICOM store. The resulting segments are saved as a DICOM SEG file (user-specific) and uploaded to the server. Using our own instance of the OHIF viewer, the data and the corresponding segmentations can be viewed. + +## Objective + + + +1. Objective A. Separate the location of the raw DICOM data and the DICOM SEG files. +1. Objective B. Brainstorm easier ways for multiple users to view their annotations. + +## Approach and Plan + + + +1. Implement the selection of two DICOM stores, where one will contain the raw DICOM data and the other the DICOM SEG files. +1. Discuss how to best view annotations from multiple users. + +## Progress and Next Steps + + + +1. The user can select to use the local DICOM database or the remote GCP server. +2. Annotations are saved as DICOM SEG files to the server. +3. We can create a second DICOM datastore to store only the segmentations, but cannot yet read the latest saved segmentation DICOM file from it. + +# Illustrations + + + +Select to use either the local Slicer database or a remote (GCP) server. + +![image](https://user-images.githubusercontent.com/59979551/173397241-97def393-7434-4d8d-978f-9ca695cf6efc.png) + +Create annotations of the prostate and other anatomical regions of interest, and upload the segmentation DICOM files to the server. + +![image](https://user-images.githubusercontent.com/59979551/173397664-c3a7f567-d5f2-4214-a366-7cef1344860c.png) + +Use our instance of the OHIF viewer to see the updated annotations. + +![ohif_mpreview](https://user-images.githubusercontent.com/59979551/176763073-ac96d7cf-d490-4946-bb2a-4ed073e80b47.JPG) + + + +# Background and References + + + +[Code is here](https://github.com/deepakri201/mpReview/tree/seg_editor) diff --git a/PW37_2022_Virtual/Projects/sliCERR/README.md b/PW37_2022_Virtual/Projects/sliCERR/README.md new file mode 100644 index 000000000..0990a848d --- /dev/null +++ b/PW37_2022_Virtual/Projects/sliCERR/README.md @@ -0,0 +1,59 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# sliCERR + +## Key Investigators + +- Eve LoCastro (MSKCC) +- Aditya Apte (MSKCC) +- Aditi Iyer (MSKCC) +- Harini Veeraraghavan (MSKCC) + +# Project Description + +CERRx is an Octave/MATLAB-based software platform for developing and sharing research results using radiation therapy treatment planning and imaging informatics. "sliCERR" is being developed to facilitate the use of CERRx's radiotherapy and image analysis functionality. The extension we envision, "sliCERR", will provide scripted modules for data I/O operations, and will run analysis routines from CERRx. + +## Objective + + +"sliCERR" will be a scripted module for Slicer, written in Python. We are starting in Jupyter notebooks with 3D Slicer kernel for visualization and image processing. The cerr2mrml module handles the I/O operations of loading native CERR planC format files into the 3DSlicer MRML scene, including import of scan, dose and ROI contours. +1. Easy imaging data IO exchange between CERR and 3D Slicer platforms via module UI +2. Enable CERRx features for dosimetry and image analysis, ROE Radiotherapy Outcomes Estimator, semi-quanitative DCE features, DL-based image segmentation models + +## Approach and Plan + + + +1. Slicer-Jupyter notebooks to code the data import/export process and analysis as proof-of-concept +2. User interface was development for 3DSlicer GUI to simplify the process of selecting CERR-format datasets for import in 3DSlicer. + +## Progress and Next Steps + + + +1. Use of specialized functions in CERR such as Deep Learning-based image segmentation and radiomics texture mapping, demonstrated in Jupyter notebooks publicly available on GitHub. +2. GUI is in development +3. Expand wiki documentation for setup and usage + +# Illustrations + +Link to Jupyter Notebook demo: https://mskcc.box.com/s/eizbm2nc54uvddcomzmvotw2w8sl82h4 + + +sliCERR + + +planC with dose in Slicer + +import DICOM to planC + +# Background and References +* ROE: Radiotherapy Outcomes Estimator - An Open-Source Tool for Modeling Radiotherapy Outcomes https://www.aapm.org/meetings/2017am/PRAbs.asp?mid=127&aid=37270 +* Extension of CERR for computational radiomics: A comprehensive MATLAB platform for reproducible radiomics research https://pubmed.ncbi.nlm.nih.gov/29896896/ +* CERR GitHub Source Repo https://github.com/cerr/CERR +* CERR Wiki https://cerr.github.io/CERR/ +* sliCERR Repo https://github.com/cerr/sliCERR + diff --git a/PW37_2022_Virtual/Projects/sliCERR/sliCERR_dose_overlay.png b/PW37_2022_Virtual/Projects/sliCERR/sliCERR_dose_overlay.png new file mode 100644 index 000000000..377fbffb6 Binary files /dev/null and b/PW37_2022_Virtual/Projects/sliCERR/sliCERR_dose_overlay.png differ diff --git a/PW37_2022_Virtual/Projects/sliCERR/sliCERR_graphic.png b/PW37_2022_Virtual/Projects/sliCERR/sliCERR_graphic.png new file mode 100644 index 000000000..c4c202056 Binary files /dev/null and b/PW37_2022_Virtual/Projects/sliCERR/sliCERR_graphic.png differ diff --git a/PW37_2022_Virtual/Projects/sliCERR/slicer_notebook.JPG b/PW37_2022_Virtual/Projects/sliCERR/slicer_notebook.JPG new file mode 100644 index 000000000..aeb5a68a4 Binary files /dev/null and b/PW37_2022_Virtual/Projects/sliCERR/slicer_notebook.JPG differ diff --git a/PW37_2022_Virtual/README.md b/PW37_2022_Virtual/README.md new file mode 100644 index 000000000..937d9f460 --- /dev/null +++ b/PW37_2022_Virtual/README.md @@ -0,0 +1,235 @@ +## Welcome to the web page for the 37th Project Week! + +[This event](https://projectweek.na-mic.org/PW37_2022_Virtual/) took place from June 27 to July 1, 2022. + +If you have any questions, you can contact the [organizers](../README.md#who-to-contact). + +Please fill out the [post-Project Week](https://forms.gle/E4ETUPXkKt3iFxvJ7) survey to let us know what you thought of this edition. Hope to see you again for Project Week 38 in Las Palmas, Jan 30-Feb 3rd 2023! + +## Screenshots, Illustrations & Photos Album + +[Google Photos album](https://photos.app.goo.gl/PTJjQn5D33uShcLM9) + +## Before Project Week +1. Participants met at preparation meetings to present projects they intended to work on at PW, seeking collaborators or to join one of the projects proposed by others. +1. Participants joined the [Discord server](https://discord.gg/yQsNVdVpS3) and used to communicate during Project Week. Go to [this page](../common/Discord.md) for more info on the use of Discord during PW. +2. A workshop on using MONAI Label with 3D Slicer was held June 22, 2022 from 9-11am EDT. See [this page](MONAILabel_Workshop.md) to for more information. + +## During Project Week (All times US Eastern Daylight (Boston) Time) +* Initial **project presentations** started at **9am on Zoom**, on Zoom. Each team delegated a member to present their projects in no more than 2 minutes using no other visual support than the project page on GitHub (no time to switch screen sharing) +* Participants who did not have a project, could find a project they were interested in and contacted team members through their Discord channel. +* Breakout sessions start every day at **9am on Zoom** (links in the calendar below) +* Work in **project teams** happened throughout the week with communication between team members taking place on **Discord**. +* The week ended with **project results presentation (9am on Friday)**. Again, each team delegated one member to present their results in a maximum of 2 minutes. The project page was used as a visual support for the presentation. + +## Agenda + +
+
+ + + + + + +[How to add this calendar to your own?](../common/Calendar.md) + +## Projects [(How to add a new project?)](Projects/README.md) + +### VR/AR and Rendering + +1. [PRISM Rendering](Projects/PRISMRendering/README.md) (Simon Drouin, Steve Pieper, Andrey Titov, Rafael Palomar) +1. [SlicerVR infrastructure](Projects/SlicerVRInfrastructure/README.md) (Csaba Pinter, Sankhesh Jhaveri, David Garcia Mato, Adam Rankin, Jean-Christophe Fillion-Robin) +) +1. [SlicerVR Tutorial](Projects/SlicerVRTutorial/README.md) (Monica Garcia-Sevilla, David Garcia-Mato, Csaba Pinter, ?) +1. [Slicer Photogrammetry](Projects/SlicerPhotoGram/README.md) (Chi Zhang, Murat Maga, Steve Pieper) +1. [Cinematic Rendering VTK PBR](Projects/CinematicRenderingVTK/README.md) (Shreeraj Jadhav, Jiayi Xu, Jean-Christophe Fillion-Robin) +1. [Slicer TMS Module](Projects/SlicerTMS/README.md) (Loraine Franke, Jax Luo, Raymond Yang, Steve Pieper, Lipeng Ning, Daniel Haehn) +1. [SlicerWGPU - WebGPU in Slicer](Projects/SlicerWGPU) (Steve Pieper) + +### Image-guided therapy (IGT) and low-cost systems + +1. [Slicer-Liver](Projects/SlicerLiver/README.md) (Rafael Palomar, Gabriella D'Albenzio, Ruoyan Meng, Ole V. Solberg, Geir A. Tangen, Javier Pérez de Frutos) +1. [Low-Cost Ultrasound Training](Projects/LowCostUltrasoundTraining/README.md) (David Garcia-Mato, Csaba Pinter, Rebecca Hisey, Matthew Holden,...) +1. [Virtual endoscopy](Projects/SlicerVirtualEndoscopy/README.md) (Yi Gao) +1. [SlicerTMS: Deep learning for E-field prediction](Projects/SlicerTMS_E-field/README.md)(Jax Luo, Loraine Franke, Raymond Yang, Steve Pieper, Daniel Haehn, Lipeng Ning) + +### Segmentation/Classification + +1. [Lung segmentation with MONAILabel](Projects/MONAILabelLung/README.md) (Rudolf Bumm, Andres Diaz-Pinto) +1. [Lumbar spine segmentation using MONAILabel](Projects/LumbarSpineSegmentation/README.md) (Nayra Pumar, María Rosa Rodriguez, David García Mato) +1. [Combined MONAI Label and MONAI Deploy SDK Applications](Projects/MONAILabelAndDeploy/README.md) (Erik Ziegler, Roya Khajavi, Steve Pieper, Ron Kikinis, Andres Diaz-Pinto) +1. [AMASSS CBCT](Projects/AMASSS_CBCT/README.md) (Maxime Gillot, Baptiste Baquero,Lucia Cevidanes, Juan Prieto) +1. [CT Lymph Node collection weakly annotated MONAI Label](Projects/LNQ/README.md) (Roya Khajavi, Erik Ziegler, Steve Pieper, Ron Kikinis) +1. [SlicerHeart polar maps](Projects/SlicerHeartPolarMaps/README.md) (Connor Haberl) +1. [IDC Prostate Segmentation](Projects/IDCProstateSegmentation/README.md) (Cosmin Ciausu, Andrey Fedorov) +1. [Outlier Detection of Large Scale Mammography Studies](Projects/OutlierDetectionLargeScaleMammograms/README.md) (Pablo Bendiksen, Ryan Zurrin, Neha Goyal, Daniel Haehn) +1. [Streamlined ROI Annotation Tool](Projects/StreamlinedROIAnnotationTool/README.md) (Ryan Zurrin, Neha Goyal, Pablo Bendiksen, Kendrick Kheav, Daniel Haehn) +1. [Mass spectomatry imaging using Slicer](Projects/ViPRE/README.md) (Parvin Mousavi, Amoon Jamzad, Jessica Rodgers, Mackenzie Sharp, Hanad Elmi) +1. [Multi-stage dental segmentation](Projects/MultistageTeethSegmentation/README.md) (Daniel Palkovics, Csaba Pinter, David Garcia Mato, Andres Diaz-Pinto) +1. [Automatic Landmark Identification](Projects/AutomaticLandmarkIdentification/README.md) (Maxime Gillot, Baptiste Baquero,Lucia Cevidanes, Juan Prieto) + +### Quantification + +1. [AQ3DC](Projects/AutomaticQuantitative3DCephalometrics/README.md) (Baptiste Baquero, Maxime Gillot, Lucia Cevidanes) +1. [Multichannel image colocalization statistics](Projects/SlicerColoc-Z-Stats/README.md) (Xiang Chen, Oscar Meruvia-Pastor, Touati Benoukraf) +1. [Poisson Shape Analysis](Projects/SlicerPoissonShapeAnalysis/README.md) (Yi Gao) +1. [SlicerScope - whole slide histopathology](Projects/SlicerScope/README.md) (Yi Gao, Steve Pieper) + +### Cloud + +1. [Imaging Data Commons](Projects/ImagingDataCommons/README.md) (Andrey Fedorov, Deepa Krishnaswamy, Dennis Bontempi, Cosmin Ciausu, Steve Pieper, Ron Kikinis) +1. [mpReview](Projects/mpReview/README.md) (Deepa Krishnaswamy, Andrey Fedorov) +1. [Slice level annotations in OHIF](Projects/OHIFviewSR/README.md) (Deepa Krishnaswamy, Davide Punzo, Markus Herrmann, Chris Bridge, Andrey Fedorov) + +### Infrastructure + +1. [SystoleOS: ](Projects/SystoleOS/README.md) (Rafael Palomar, Andras Lasso, Steve Pieper, Jean-Christophe Fillion-Robin, Sam Horvath) +1. [MarkupConstraints](Projects/MarkupConstraints/README.md) (David Allemang, Jean-Christophe Fillion-Robin) +1. [SlicerInternationalization](Projects/SlicerInternationalization/README.md) (Sonia Pujol, Steve Pieper, Andras Lasso) +1. [Slicer Batch Annonymize](Projects/SlicerBatchAnonymize/README.md)(Hina Shah, Juan Carolos Prieto) +1. [Slicer 5++: ](Projects/Slicer5Plus/README.md) (Sam Horvath, Andras Lasso, Steve Pieper, Jean-Christophe Fillion-Robin) +1. [EquivalentRotationSliders: ](Projects/EquivalentRotationSliders/README.md) (Mauro Dominguez, ...) + +## Registrants + +List of registered participants: + +1. Tina Kapur, Brigham and Women's Hospital and Harvard Medical School, USA +1. David Garcia-Mato, Ebatinca SL, Spain +1. Steve Pieper, Isomics, Inc., USA +1. Andrey Fedorov, Brigham and Women's Hospital, USA +1. Adama Rama WADE, Cheikh Anta Diop University, Senegal +1. Deepa Krishnaswamy, Brigham and Women's Hospital, USA +1. Simon Drouin, École de Technologie Supérieure, Canada +1. Roya Khajavibajestani, BWH, USA +1. RON KIKINIS, SPL, USA +1. Sonia Pujol, Brigham and Women's Hospital, Harvard Medical School, USA +1. Jax LUO, BWH, USA +1. Li Zhenzhu, Hwa Mei Hospital, China +1. Ole Vegard Solberg, SINTEF, Norway +1. Mauro Ignacio Dominguez, Independant, Argentina +1. Lucia Cevidanes, University of Michigan, USA +1. YAHYA TFEIL, UNIVERSITY NOUAKCHOTT ALAASRIYA, Mauritania +1. Rafael Palomar, Oslo University Hospital and NTNU, Norway +1. Pape Mady THIAO , École militaire de santé de Dakar, Senegal +1. Mónica García-Sevilla, Universidad de Las Palmas de Gran Canaria, Spain +1. Badiaa AIT AHMED , Universidad de Las Palmas de Gran Canaria, Spain +1. Cosmin Ciausu, Brigham and Women's Hospital, USA +1. Michael Dada, Federal University of Technology, Minna, Nigeria +1. Shiraz Yousif Abd Elsalam Yousif, University of Khartoum , Australia +1. Geir Arne Tangen, SINTEF, Norway +1. Marie NDIAYE, Université Assane Seck de Ziguinchor, Senegal +1. Ron Alkalay, Beth Israel Deaconess Medical Center, USA +1. Csaba Pintér, EBATINCA S.L., Spain +1. Samantha Horvath, Kitware, USA +1. Rebecca Hisey, Queen's University, Canada +1. Connor Haberl, University of Ottawa Heart Institute, Canada +1. Gang Fu, Amazon, USA +1. Shreeraj Jadhav, Kitware, Inc., USA +1. Eve LoCastro, Memorial Sloan Kettering, USA +1. Srivathsan Shanmuganathan, University of Alberta, Canada +1. David Allemang, Kitware Inc., USA +1. Michel David Raed, University of São Paulo, Brazil +1. Jiayi Xu, Kitware, USA +1. Gabriella d' Albezio, The Intervention Center (OUS), Norway +1. HINA SHAH, UNC Chapel Hill, USA +1. Sharanya Balachandran, University of Alberta, Canada +1. Muhammad zubair islam, Sejong University, Seoul, South Korea, South Korea +1. Daniel Haehn, University of Massachusetts Boston, USA +1. Sankhesh Jhaveri, Kitware, Inc., USA +1. li, hospital, China +1. Nayra Pumar, Ebatinca, Spain +1. Ruoyan Meng, Norwegian University of Science and Technology, Norway +1. Loraine Franke, University of Massachusetts Boston, USA +1. Javier, SINTEF, Norway +1. Chi Zhang, Seattle Children's Research Institute, USA +1. Nadya Shusharina, Massachusetts General Hospital, USA +1. Ahmedou Moulaye Idriss, Faculty of Medicine / University of Nouakchott Al Asriya , USA +1. Ryan Zurrin, University of Massachusetts Boston, USA +1. NANTENAINA Tina, ETS Montreal, Canada +1. Daniel Palkovics, Semmelweis University, Hungary +1. Matthew Holden, Carleton University, Canada +1. Pedro Moreira, Brigham and Women's Hospital, USA +1. Oumaima Saoud, ENIM, Morocco +1. Neha Goyal, University of Massachusetts Boston , USA +1. Ibtissam MEDARHRI, ENSMR, Morocco +1. Andras Lasso, PerkLab, Queen's University, Canada +1. Maxime Gillot, University of Michigan, USA +1. Baptiste Baquero , University of Michigan, USA +1. Gurnish Sidora, University of Toronto, Canada +1. Pablo Bendiksen, University of Massachusetts Boston, USA +1. Mariana Costa Bernardes Matias, Brigham and Women's Hospital, USA +1. Oumar SY, Cheikh Anta Diop University , Senegal +1. said, ENSMR, Morocco +1. Luiz Murta, University of São Paulo, Brazil +1. Mamadou Camara, Cheikh Anta Diop University of Dakar, Senegal +1. Mohamed Alalli BILAL, Ecole Supérieure polytechnique , Senegal +1. Patrick Remerscheid, Technical University of Munich, Germany +1. Khaled Younis, Philips, USA +1. Nicolas Yanez, Philips, USA +1. Renjie He, MDACC, USA +1. Hanad Elmi, Queen's University, Canada +1. Lukas Riedersberger, dev-threads, Germany +1. Johannes Pieger, dev-threads, Germany +1. Luca Boretto, University of Oslo, Norway +1. jiazeyu, None, China +1. Jean-Christophe Fillion-Robin, Kitware, USA +1. Yi Gao, Shenzhen University, China +1. Adriana H. Vilchis González, Universidad Autónoma del Estado de México, Mexico +1. Tamas Ungi, Queen's University, Canada +1. Kendrick Kheav, University of Massachusetts Boston, USA +1. Juan Carlos Avila Vilchis, Universidad Autónoma del Estado de México, Mexico +1. Xiang Chen, Memorial University of Newfoundland, Canada +1. Leah Groves, Queen's University , Canada +1. Lipeng Ning, Brigham and Women's Hospital, USA +1. Vianney Muñoz-Jiménez, Universidad Autónoma del Estado de México, Mexico +1. Mariana Alvarez-Carvajal, Universidad Autonoma del Estado de Mexico, Mexico +1. Michael Dada, Federal University of Technology, Minna, Nigeria +1. Joaquin Olivares, University of Cordoba, Spain +1. Étienne Léger, Brigham and Women's Hospital, USA +1. Samuelle St-Onge, École de technologie supérieure (ÉTS), Canada +1. Kyle Sunderland, Queen's University, Canada +1. NIrav Patel, Indian Institute of Technology Madras, India +1. Inés, Universidad Carlos III de Madrid, Spain +1. Dženan Zukić, Kitware, USA +1. María Rosa, Universidad de Las Palmas de Gran Canaria, Spain +1. Davit Aghayan, Oslo University Hospital, Norway +1. Kristof Moga, Perklab, Canada +1. Theodore Aptekarev, Slicer Community, Montenegro +1. Garth Macey, University of Wisconsin - Platteville, USA +1. Erik Ziegler, Open Health Imaging Foundation, Netherlands +1. Mouhamed DIOP, Cheikh Anta Diop University of Dakar, Senegal +1. Felicia Miranda, School of Dentistry, University of Michigan, USA +1. Christian Herz, Children's Hospital of Philadelphia, USA +1. Laura Connolly, Queen's University, Canada +1. Richard Doerer, Michigan State Univ, Yale Univ, USA +1. Mauro I. Dominguez, -, Argentina +1. Vinicius Pavanelli Vianna, University of Sao Paulo - CSIM, Brazil +1. Fatih, inonu university, Turkey +1. Matt McCormick, Kitware, USA +1. Adam Rankin, Robarts Research Institute, Canada +1. Pranjal Sahu, Stony Brook University, USA +1. Dada Michael, Federal University of Technology, Minna, Nigeria +1. Jasper van der Zee, University of Twente, Netherlands + +## Statistics +* 117 Registered attendees + * 41% first time attendees +* 20 countries +* 36 projects + +Attendees per country +Attendees per country +Attendees timezones + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). diff --git a/PW37_2022_Virtual/images/README.md b/PW37_2022_Virtual/images/README.md new file mode 100644 index 000000000..63663ca1e --- /dev/null +++ b/PW37_2022_Virtual/images/README.md @@ -0,0 +1 @@ +Upload here the images used on the main project week page. diff --git a/PW37_2022_Virtual/images/participants-map.png b/PW37_2022_Virtual/images/participants-map.png new file mode 100644 index 000000000..fab8d25bf Binary files /dev/null and b/PW37_2022_Virtual/images/participants-map.png differ diff --git a/PW37_2022_Virtual/images/participants-per-country.png b/PW37_2022_Virtual/images/participants-per-country.png new file mode 100644 index 000000000..9c4c7416c Binary files /dev/null and b/PW37_2022_Virtual/images/participants-per-country.png differ diff --git a/PW37_2022_Virtual/images/participants-per-timezone.png b/PW37_2022_Virtual/images/participants-per-timezone.png new file mode 100644 index 000000000..c780f53ae Binary files /dev/null and b/PW37_2022_Virtual/images/participants-per-timezone.png differ diff --git a/PW38_2023_GranCanaria/GranCanaria_TravelGuide.md b/PW38_2023_GranCanaria/GranCanaria_TravelGuide.md new file mode 100644 index 000000000..c26d7dae7 --- /dev/null +++ b/PW38_2023_GranCanaria/GranCanaria_TravelGuide.md @@ -0,0 +1,37 @@ +# Travel Guide: Las Palmas de Gran Canaria + +The city of Las Palmas de Gran Canaria has many cool places to discover. In this travel guide, you will find some tips from our team of local organizers. Contact us for more information. + +drawing + +## Map + +In this map, you will find different layers of information: event venue, restaurants, transport, sport, sights,... Check it out! + + + +## City Highlights + +Nice spots to go for a walk: + +- [Vegueta](https://goo.gl/maps/LbVtKHqAGfRahmoG7): The old town of Vegueta in Las Palmas de Gran Canaria is where all historical and architectural tales begin in the island. As such, it is a bustling district filled with sophisticated Spanish architecture, lively cobblestone streets, and surviving religious traditions. +- [From Las Canteras beach to El Confital](https://goo.gl/maps/XRJ6Mjx5zKKuN5ju8): Directly from Hotel Cristina (venue for Project Week), you can walk to the north along Las Canteras beach. After reaching the end of the beach, you can continue walking towards El Confital. This route offers great views of the whole city of Las Palmas de Gran Canaria and great spots to see the sunset. + +Rooftops: +- [Hotel Aloe Canteras](https://goo.gl/maps/BgBF8KmAT1v6pXMw5): great place to go for drinks with amazing views of Las Canteras beach. +- [Hotel LIVVO Lumm](https://goo.gl/maps/iAxBZUCReZCGnnRz6) +- [Hotel Santa Catalina](https://goo.gl/maps/z4Uz22SZ1d93XbTe9) + +Sport: +- [Beach volley](https://goo.gl/maps/wxCxpEpohU69ebAw5): Just in front of Hotel Cristina you will find the best spot to play beach volley in the island. Many people gather here everyday to enjoy beach volley. Even if you don't feel like joining for a game, it is still great to feel the atmosphere. This is the main meeting point for digital nomads living in Gran Canaria. +- [Surf](https://goo.gl/maps/PbRUcKjsHAkQ4nge7): Las Palmas de Gran Canaria is a great place to practice surf. The south end of Las Canteras beach (La Cicer) is the best spot in the city. Around that area you will find many stores where you can rent a surf board and a wetsuit. +- [Fitness](https://goo.gl/maps/Pqbf2MZxveHLMS9TA): There are several spots to calisthenics around the city. I recommend this [spot](https://goo.gl/maps/Pqbf2MZxveHLMS9TA) in the south end of Las Canteras beach. +- [Climbing](https://goo.gl/maps/mHAhoyXmKz37E3ou8): Indoors climbing gym in Las Palmas de Gran Canaria. +- [Basketball](https://goo.gl/maps/Dv7siSoZMttsNRNa8): Basketball courts near Las Canteras beach. + + +## Contact info + +For additional recommendations, tips or questions about the city/island, please contact **David García Mato** via email (david.garcia@ebatinca.com) or in person during the Project Week. + +We are planning to create a dedicated Discord channel for travel tips and social activities. diff --git a/PW38_2023_GranCanaria/MONAILabel_Workshop.md b/PW38_2023_GranCanaria/MONAILabel_Workshop.md new file mode 100644 index 000000000..716a82ed4 --- /dev/null +++ b/PW38_2023_GranCanaria/MONAILabel_Workshop.md @@ -0,0 +1,60 @@ +# MONAI Label Workshop / Tutorial + +Workshop will be **WAS** held: **January 25, 2023 9-11am EST** + +**A recording of the workhop is here:** https://www.youtube.com/watch?v=KtPE8m0LvcQ + +Space will be limited so **sign up by January 22th 2023** + +The goal of this workshop is to prepare users and developers to make use of MONAI and MONAI Label in their work at [Project Week 38](README.md). + +To request participation, please fill out [this registration form](https://docs.google.com/forms/d/1LmMofTzwlxNgOTgQanGddeKQKfNZwdoRr7l96-Ht1h8/edit). + +Each participant will be allocated a cloud-hosted GPU workstation provided by [AWS AppStream](https://aws.amazon.com/appstream2/faqs/). +Instances will be allocated in the US an EU based on the location of the participant. See [this information about bandwidth considerations](https://docs.aws.amazon.com/appstream2/latest/developerguide/bandwidth-recommendations-user-connections.html). + + + +## Background information + +Please refer to the information and watch the videos of the previous workshops. The current workshop will serve as a refresher and update of the previous ones. +* [January 12, 2022](https://discourse.slicer.org/t/monailabel-3d-slicer-for-cloud-computing-workshop-jan-12-2022-2-4-est/21152) ([video](https://youtu.be/PmD8umlcpF4)) +* [June 22, 2022](https://github.com/NA-MIC/ProjectWeek/blob/master/PW37_2022_Virtual/MONAILabel_Workshop.md) ([video](https://www.youtube.com/watch?v=wtiEe_jiUzg)) + +MONAILabel itself is described [in this paper](https://arxiv.org/abs/2203.12362). + + +## Agenda + +* Intro and welcome (Steve Pieper - 1 minute) +* Overview of MONAI Events: workshop and bootcamp (Michael Zeyphyr - 1 minute) +* Overview of MONAI Label (Andres Diaz-Pinto) 15 minutes) + * Intro for first time attendees / refresher for return attendees + * What's new in MONAI / MONAI Label since the last workshops? + * What's the overall architecture for MONAI model from training to deployment (e.g. considerations for someone doing project planning) +* [3D Slicer and MONAI on AWS](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerCloud/) (2 minutes, Rudolf Bumm / Steve Pieper) +* Preparation for Hands-on session on AWS (8 minutes, Qing Liu, Alex Lemm) +* What clinical/research applications developing with MONAI Label and what's the general status of them (10 minutes each) + * Lungs: Rudolf Bumm + * Stroke: Ken Butcher + * Cancer spines: Ron Alkalay +* Deeper dive into application example use cases (15 minutes Andres Diaz-Pinto) + * data format and preparation + * python code needs to be edited + * what decisions need to be made + * how to organize the training, etc. +* Hands-on session with pre-trained models + Q&A discussions (remainder of the time) + +Note that right after this workshop starting at 11am EST, Nvidia is holding two longer MONAI Bootcamp sessions on the 25th and 26th of January: [see information here](https://events.nvidia.com/janmonaibootcamp). + +## Event logistics + +* The zoom link info is: [https://us06web.zoom.us/s/85353009880](https://us06web.zoom.us/s/85353009880) + +* We will use the Project Week discord for tech support and discussion during the workshop. Please sign up here: https://discord.gg/d5Q6b5ug8u + +* Please use the same email for discord that you used to sign up for the workshop and also please use your first and last names and organization in your discord name. + +* The workshop will be recorded and made available. + +*Big thanks in advance to the AWS, NVIDIA, IDC, and Slicer teams for providing this activity!* diff --git a/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/PW38_SlicerInternationalization.png b/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/PW38_SlicerInternationalization.png new file mode 100644 index 000000000..b5427a599 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/PW38_SlicerInternationalization.png differ diff --git a/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/README.md b/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/README.md new file mode 100644 index 000000000..3861331b6 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/README.md @@ -0,0 +1,67 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# 3D Slicer Internationalization + +## Key Investigators + +- Sonia Pujol, (Brigham and Women's Hospital, Harvard Medical School, Boston, MA, USA) +- Steve Pieper (Isomics Inc., Cambridge, MA, USA) +- Andras Lasso (Queen's University, Kingston, Canada) +- Mamadou Camara (Cheikh Anta Diop University, Dakar, Senegal) +- Mouhamed DIOP (Cheikh Anta Diop University, Dakar, Senegal) +- Adama Wade (Cheikh Anta Diop University, Dakar, Senegal) +- Mohamed Alalli Bilal (Cheikh Anta Diop University, Dakar, Senegal) +- Ahmedou Moulaye Idriss (Faculty of Medicine of Nouakchott, Mauritania) +- Yahya Tfeil (Faculty of Medicine of Nouakchott, Mauritania) +- Adriana H. Vilchis González (Universidad Autónoma del Estado de México, Mexico) +- Luiz Otavio Murta Junior (University of Sao Paulo, Ribeirao Preto, Brazil) +- Attila Tanács (University of Szeged, Hungary) +- Attila Nagy (University of Szeged, Hungary) + +# Project Description + +The goal of the project is to develop a novel software infrastructure to enable the localization of 3D Slicer into multiple languages. +The project is funded through two Essential Open Source Software for Science awards of the Chan Zuckerberg Initiative. + +## Objective + + + +1. To identify members of the Slicer community interested in new Slicer activities in their language +2. To run daily translation hackathons in the languages represented at PW38 +3. To implement an infrastructure for the internationalization of CTK-based code in 3D Slicer +4. To automate Qt download in Slicer Language Packs Extension + +## Approach and Plan + + + + Daily Slicer internationalization session from 10 am to 11 am EST with members of the Slicer community + + Anyone is welcome to join the session that will be held online: + + * **Tuesday, Jan.31**: https://meet.google.com/umd-avcb-xom + * **Wednesday, Feb.1st**: same Zoom link as for the preparation meetings and breakout sessions + * **Thursday, Feb. 2nd**: same Zoom link as for the preparation meetings and breakout sessions + + New international members, please fill in the [PW38 Slicer internationalization form](https://forms.gle/iinkdKvN4ZG2vv2o6) prior to joining the session. + +## Progress and Next Steps + + + +- Non-translatable text in Welcome module: fix submitted +- List of languages in the Weblate / Languages combobox are now populated by querying the server +- New translations to Spanish, Portuguese, Farsi and Hungarian + + +# Illustrations + + +![Translation of Slicer Welcome module to French, Spanish, Portuguese and Hungarian](PW38_SlicerInternationalization.png) + + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/SlicerInternationalization.png b/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/SlicerInternationalization.png new file mode 100644 index 000000000..0700fbb70 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/3DSlicerInternationalization/SlicerInternationalization.png differ diff --git a/PW38_2023_GranCanaria/Projects/ALI_CBCT/README.md b/PW38_2023_GranCanaria/Projects/ALI_CBCT/README.md new file mode 100644 index 000000000..e00ee8619 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/ALI_CBCT/README.md @@ -0,0 +1,46 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automatic Landmark Identification in Cranio-Facial CBCT + +## Key Investigators +- Luc Anchling (UoM) +- Nathan Hutin (UoM) +- Maxime Gillot (UoM) +- Baptiste Baquero (UoM) +- Jonas Bianchi (UoM, UoP) +- Marcela Gurgel (UoM) +- Najla Al Turkestani (UoM) +- Marilia Yatabe (UoM) +- Lucia Cevidanes (UoM) +- Juan Prieto (UoNC) + + +# Project Description + +We propose a novel approach that reformulates anatomical landmark detection as a classification problem through a virtual agent placed inside a 3D Cone-Beam Computed Tomography (CBCT) scan. This agent is trained to navigate in a multi-scale volumetric space to reach the estimated landmark position. The agent movements decision relies on a combination of Densely Connected Convolutional Networks (DCCN) and fully connected layers. + +## Objective + + + +1. Retrain the different models with new data +1. Do some maintenance on the previously made code + +## Approach and Plan + + + +1. Use the available code to train with additional patient data for each landmarks + +## Progress and Next Steps + + + +1. ALI models are currently being retrained with new data + +# Illustrations +![Slicer screen](https://user-images.githubusercontent.com/46842010/174138265-66ab080e-e885-4f76-a150-7e4da3869aa0.png) + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/AMASSS_CBCT/README.md b/PW38_2023_GranCanaria/Projects/AMASSS_CBCT/README.md new file mode 100644 index 000000000..e1734ecb4 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/AMASSS_CBCT/README.md @@ -0,0 +1,101 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automatic multi-anatomical skull structure segmentation of cone-beam computed tomography scans using 3D UNETR + +![Segmentation](https://user-images.githubusercontent.com/46842010/172177602-8cbfc188-9715-488a-ad2e-abb8d219536d.png) + +## Key Investigators + +- Luc Anchling (UoM) +- Nathan Hutin (UoM) +- Maxime Gillot (UoM) +- Baptiste Baquero (UoM) +- Celia Le (UoM) +- Romain Deleat-Besson (UoM) +- Jonas Bianchi (UoM, UoP) +- Antonio Ruellas (UoM) +- Marcela Gurgel (UoM) +- Marilia Yatabe (UoM) +- Najla Al Turkestani (UoM) +- Kayvan Najarian (UoM) +- Reza Soroushmehr (UoM) +- Steve Pieper (ISOMICS) +- Ron Kikinis (Harvard Medical School) +- Beatriz Paniagua (Kitware) +- Jonathan Gryak (UoM) +- Marcos Ioshida (UoM) +- Camila Massaro (UoM) +- Liliane Gomes (UoM) +- Heesoo Oh (UoP) +- Karine Evangelista (UoM) +- Cauby Chaves Jr +- Daniela Garib +- F ́abio Costa (UoM) +- Erika Benavides (UoM) +- Fabiana Soki (UoM) +- Jean-Christophe Fillion-Robin (Kitware) +- Hina Joshi (UoNC) +- Lucia Cevidanes (UoM) +- Juan Prieto (UoNC) + + +# Project Description + +The segmentation of medical and dental images is a fundamental step in automated clinical decision support systems. +It supports the entire clinical workflow from diagnosis, therapy planning, intervention, and follow-up. +In this paper, we propose a novel tool to accurately process a full-face segmentation in about 5 minutes that would otherwise require an average of 7h of manual work by experienced clinicians. +This work focuses on the integration of the state-of-the-art UNEt TRansformers (UNETR) of the Medical Open Network for Artificial Intelligence (MONAI) framework. +We trained and tested our models using 618 de-identified Cone-Beam Computed Tomography (CBCT) volumetric images of the head +acquired with several parameters from different centers for a generalized clinical application. Our results on a 5-fold cross-validation showed high accuracy and robustness with an Dice up to 0.962 pm 0.02. + +## Objective + + + +1. Do some maintenance to the previously made code +1. Train new segmentations of stable regions of reference for image registration models (Cranial Base, Mandible, Maxilla) + +## Approach and Plan + + + +1. Use the previously made code to train a model for the segmentation of the masks structures + +## Progress and Next Steps + + + +1. New segmentation models have been trained and tested +1. An extension has been added to this module to take segmentation files as input to generate vtk files +1. Train models to detect bone defects and patients with alveolar and palatal cleft +1. Dicom File can be used as input + +# Illustrations + +## 1. Different process to perform a CBCT segmentation +- Contrast correction and rescaling to the trained model spacing +- Use the UNETR classifier network through the scan to perform a first raw segmentation +- Post process steps to clean and smooth the segmentation +- Upscale to the original images size + +![prediction](https://user-images.githubusercontent.com/46842010/172177605-b2e5d91c-3e10-4608-9c2d-1e5f2dfcc261.png) + +## 2. Screen of the slicer module during a segmentation +- Selection of the different parameters and which structure to segment +- Use of a dialog progress bar to show/cancel the progress of the segmentation in real time (top right end corner). +- One the 3D view, result of one of the segmentation with the generated VTK files + +- A prediction takes from 120s to 300s for one patient depending on the local computer GPU capacity ( 15GB down to 3GB) + +![Screen slicer](https://user-images.githubusercontent.com/46842010/176789535-b7473878-fbeb-494d-988a-5ee1afa7d4fa.png) + +## 3. Use of AMASSS to generate mask for a defacing tool +- The scan intensity in the pink region ( mainely nose, lips and eyes ) will be set to 0 to make it impossible to identify the patient +- The bones segmentations are used to make sure we dont remove important informations during the process + +![mask for defaceing](https://user-images.githubusercontent.com/46842010/176813614-f9ec9123-4c34-4f8c-828f-ed4a84d30132.jpeg) + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/ARinSlicer/README.md b/PW38_2023_GranCanaria/Projects/ARinSlicer/README.md new file mode 100644 index 000000000..602933c99 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/ARinSlicer/README.md @@ -0,0 +1,70 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# AR in Slicer + +## Key Investigators + +- Alicia Pose Díez de la Lastra (Universidad Carlos III de Madrid, Madrid, Spain) - [Presenter] +- Javier Pascau (Universidad Carlos III de Madrid, Madrid, Spain) +- Gabor Fichtinger (PerkLab, Queen's University , Kingston , Canada) +- Andras Lasso (PerkLab, Queen's University , Kingston , Canada) +- Adam Rankin (Robarts Research Institute / Western University, Canada) +- Csaba Pinter (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Lucas Gandel (Kitware, France) +- Jean-Christophe Fillion-Robin (Kitware, USA) +- Simon Drouin (École de Technologie Supérieure , Montreal , Canada) + +## Project Description +Up to date, there has been a lack of software infrastructure to connect 3D Slicer to any augmented reality (AR) device. This project presents a novel connection approach using Microsoft HoloLens 2 and OpenIGTLink. +This project has been developed in collaboration of [Universidad Carlos III de Madrid](https://biig-igt.uc3m.es/augmented-reality/) (Madrid, Spain) and Perk Lab in Queen's University. +The current solution is implemented in a 3 elements system. It is composed by A Microsoft HoloLens 2 headset, the Unity software, and the 3D Slicer platform. + +## Objective +Create a universal module in 3D Slicer that sends all types of messages via OpenIGTLink. + + +## Approach and Plan +1. 3D Slicer creates an OpenIGTLink server. +2. Unity, containing the AR application, creates an OpenIGTLink client that connects to the server. +3. When the application is executed in the Unity editor, it starts sending and receiving messages from 3D Slicer. Simultaneously, it wirelessly streams the app to Microsoft HoloLens 2 using Holographic Remoting. + + + +## Progress and Next Steps +We have already developed an application that transfers geometrical transform and image messages between the platforms. +It displays CT reslices of a patient in the AR device. The user wearing the glasses can manipulate the CT plane to see different perspectives. +The application was build for pedicle screw placement planning. + + +![20221213_161232_HoloLens](https://user-images.githubusercontent.com/66890913/212931527-035baf4c-4799-4d83-9c60-b8a0f839547e.jpg) + + +The final version will be able to transfer any type of messages. +To do so, we have to create necessary scripts in 3D Slicer and also in Unity (C#). + + + +## Background and References +Check out our app in [this GitHub repository](https://github.com/BIIG-UC3M/HoloLens2and3DSlicer-PedicleScrewPlacementPlanning). +This repository contains all the resources and code needed to replicate our work in your computer. + +Transfer of geometrical transforms from HoloLens 2 to 3D Slicer: + +![MovingSpine_GIF](https://user-images.githubusercontent.com/66890913/214097820-96b9f875-4651-4efd-879b-831eb88b7b07.gif) + +Transfer of images from 3D Slicer to HoloLens 2: + +![MovingCT_GIF](https://user-images.githubusercontent.com/66890913/214097469-17a1aa1a-2768-4f73-8c12-bb4ab7d393f0.gif) + + +## Outcomes +Use this system for multiple HL2 interaction: + +![Interaction2HL2_Simon_GIF](https://user-images.githubusercontent.com/66890913/216575916-e37b6a07-aab1-4710-b709-21ce56271eeb.gif) + +![Interaction2HL2_Leo_GIF](https://user-images.githubusercontent.com/66890913/216621686-27d7ec42-8ad3-400c-b90c-da59a2a92358.gif) + +## Acknowledgements +Research supported by projects PI122/00601 and AC20/00102  (Ministerio de Ciencia, Innovación y Universidades, Instituto de Salud Carlos III, Asociación Española Contra el Cáncer and European Regional Development Fund / EU “Una manera de hacer Europa”), project PerPlanRT (under the frame of ERA PerMed), TED2021-129392B-I00, TED2021-132200B-I00 and PID2023-149604OB-I00 (MCIU/AEI/10.13039/501100011033 and European Union “NextGenerationEU”/PRTR) and Comunidad de Madrid (MAGERIT-CM TEC-2024/COM-44 and Multiannual Agreement with UC3M SAFEDELIVERY-CM-UC3M). + + diff --git a/PW38_2023_GranCanaria/Projects/ASO_CBCT/README.md b/PW38_2023_GranCanaria/Projects/ASO_CBCT/README.md new file mode 100644 index 000000000..6722576d2 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/ASO_CBCT/README.md @@ -0,0 +1,204 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automated Standardized Orientation for Cone-Beam Computed Tomography (CBCT) + + + + +## Key Investigators + +- Luc Anchling (UoM) +- Nathan Hutin (UoM) +- Maxime Gillot (UoM) +- Baptiste Baquero (UoM) +- Jonas Bianchi (UoM, UoP) +- Antonio Ruellas (UoM) +- Felicia Miranda (UoM) +- Selene Barone (UoM) +- Marcela Gurgel (UoM) +- Marilia Yatabe (UoM) +- Najla Al Turkestani (UoM) +- Hina Joshi (UoNC) +- Lucia Cevidanes (UoM) +- Juan Prieto (UoNC) + + +# Project Description + + +To develop a standardized head orientation approach for medical and dental images is crucial to improve the reliability of automated image analysis towards clinical decision-making. Manual and user-dependent head orientation is time-consuming and prone to errors. For this reason, this study aims to automatically obtain the desired standardized orientation of Cone Beam Computed Tomography scans, regardless of the patient's positioning during the scan or any CT scanner initialization changes. + +The Automated Standardized Orientation (ASO) tool presented in this work automatically identifies landmarks on 3D volumes regardless of orientation, using a deep learning landmark identification algorithm that handles images with random orientation ([ALI_CBCT](../ALI_CBCT/README.md)). ASO uses a landmark-based registration approach to automatically orient a 3D volume to a common space. The method aligns the identified landmarks to a set of reference ones. The method starts by aligning 3 randomly chosen landmarks and refines their position using an Iterative Closest Point (ICP) transform. The tool also allows user-selected landmarks for precision purposes. All the transforms computed during this process are concatenated and the final transform is applied to the CBCT volume. + +To make ASO more robust, a pre-orientation algorithm has been developed. This part uses a deep learning algorithm to identify the head orientation and then rotates the volume to the desired orientation. This algorithm is currently being tested and will be implemented in the ASO module. The training has been realized with random rotations. + +## Objective + + + +1. Create a Slicer Module to use this algorithm with CBCT files +1. Make the algorithm more robust to different head orientations +1. Do some maintenance to the previously developed ASO module + +## Approach and Plan + + + +1. Develop in collaboration with Nathan Hutin ([ASO_IOS](../AutomaticStandardizeOrientation_IOS/README.md +)) a Slicer Module to make ASO work for both IOS and CBCT files +1. Implement the pre-orientation algorithm to this module +1. Use CLI version of previously developped code to make ASO FULLY-Automated (without any input from the user) + +## Progress and Next Steps + + + +1. Slicer Module has been developed: + - In a first step, only a SEMI-Automated version has been implemented (with scan and landmark files as inputs) + - In a second step, a FULLY-Automated version has been developed (with ONLY scan files as inputs and ALI module running in the background) +1. Pre-orientation algorithm, DenseNet169 from MONAI library, has been implemented in the ASO module +1. Receive input before deploying ASO to SlicerAutomatedDentalTool Extension + +ReadMe +# Automated Standardized Orientation (ASO) + +Automated Standerized Orientation (ASO) is an extension for **3D Slicer** to perform automatic orientation either on IOS or CBCT files. + +## ASO Modules + +ASO module provide a convenient user interface allowing to orient different type of scans: +- **CBCT** scan +- **IOS** scan + +> To select the *Input Type* in the Extension just select between CBCT and IOS here: +> + +## How the module works? + +### 2 Modes Available (Semi or Fully Automated) +- **Semi-Automated** (to only run the landmark-based registration with landmark and scans as input) +- **Fully-Automated** (to perform Pre Orientation steps, landmark Identification and ASO with only scans as input) + +| Mode | Input | +| ----------- | ----------- | +| Semi-Automated | Scans, Landmark files | +| Fully-Automated | Scans, ALI Models, Pre ASO Models (for **CBCT** files), Segmentation Models (for **IOS** files) | + +> To select the *Mode* in the Extension just select between Semi and Fully Automated here: +> + +> The **Fully-Automated** Mode `Input` section is slightly different: +> + + +### Input file: + +| Input Type | Input Extension Type | +| ----------- | ----------- | +| **CBCT** | .nii, .nii.gz, .gipl.gz, .nrrd, .nrrd.gz | +| **IOS** | .vtk | + +> To select the *Input Folder* in the Extension just select your folder with Data here: +> + +The input has to be IOS with teeth's segmentation. +The teeth's segmentation can be automatically done using the [SlicerDentalModelSeg](https://github.com/DCBIA-OrthoLab/SlicerDentalModelSeg) extension. +The IOS files need to have in their name the type of jaw (Upper or Lower). + +**Test Files Available:** +You can either download them using the link or by using the `Download Test Files`. +| Module Selected | Download Link to Test Files | Information | +| ----------- | ----------- | ----------- | +| **Semi-CBCT** | [Test Files](https://github.com/lucanchling/ASO_CBCT/releases/download/TestFiles/Occlusal_Midsagittal_Test.zip) | Scan and Fiducial List for this [Reference](https://github.com/lucanchling/ASO_CBCT/releases/download/v01_goldmodels/Occlusal_Midsagittal_Plane.zip)| +| **Fully-CBCT** | [Test File](https://github.com/lucanchling/ASO_CBCT/releases/download/TestFiles/Test_File.nii.gz) | Only Scan| +| **Semi-IOS** | | Mesh and Fiducial List| +| **Fully-IOS** | | Only Mesh | + +### Reference: + +The user has to choose a folder containing a **Reference Gold File** with an oriented scan with landmarks. +You can either use your own files or download ours using the `Download Reference` button in the module `Input section`. +| Input Type | Reference Gold Files | +| ----------- | ----------- | +| **CBCT** | [CBCT Reference Files](https://github.com/lucanchling/ASO_CBCT/releases/tag/v01_goldmodels) | +| **IOS** | [IOS Reference Files](https://github.com/HUTIN1/ASO/releases/tag/v1.0.1) | + +> To select the *Reference Folder* in the Extension just select your folder with Reference Data here: +> + +### Landmark selection + +The user has to decide which **landmarks** he will use to run ASO. + +| Input Type | Landmarks Available | +| ----------- | ----------- | +| **CBCT** | Cranial Base, Lower Bones, Upper Bones, Lower and Upper Teeth | +| **IOS** | Upper and Lower Jaw | + +For IOS: The user has to indicate array name of labels in the vtk surface. By default the name is PredictedID. + +> The landmark selection is handled in the `Option` Section: + +For IOS: + + + + +For CBCT: + + + +### Models Selection + +For the **Fully-Automated** Mode, models are required as input, use the `Download Models` Button or follow the following instructions: + +#### For CBCT ([Details](https://github.com/lucanchling/ASO#aso-cbct)): +A *Pre-Orientation* and *ALI_CBCT* models are needed + +> To add the *Pre-Orientation* models just download [PreASOModels.zip](https://github.com/lucanchling/ASO_CBCT/releases/download/v01_preASOmodels/PreASOModels.zip), unzip it and select it here: +> + +> To add the *ALI_CBCT* models go to this [link](https://github.com/Maxlo24/ALI_CBCT/releases/tag/v0.1-models), select the desired models, unzip them in a single folder and select it here: +> + + +#### For IOS: + +> INSERT YOUR BLABLA HERE To add the *Pre-Orientation* models just download [PreASOModels.zip](https://github.com/lucanchling/ASO_CBCT/releases/download/v01_preASOmodels/PreASOModels.zip), unzip it and select it here: +> + +### Outputs Options +> You can decide the *Extension* that the output files will have and the folder where they will go in here: +> + +### Let's Run it +> Now that everything is in order, just press the `Run` Button in this section: +> + + +## Algorithm +The implementation is based on iterative closest point's algorithm to execute a landmark-based registration. Some preprocessing steps are done to make the orientation works better (and are described respectively in **CBCT** and **IOS** part) + +### ASO CBCT +**Fully-Automated mode:** +1. a deep learning model is used to predict head orientation and correct it. +Models are available for download ([Pre ASO CBCT Models](https://github.com/lucanchling/ASO_CBCT/releases/tag/v01_preASOmodels)) + +1. a Landmark Identification Algorithm ([ALI CBCT](https://github.com/DCBIA-OrthoLab/ALI_CBCT)) is used to determine user-selected landmarks + +1. an ICP transform is used to match both of the reference and the input file + +For the **Semi-Automated** mode, only step **3** is used to match input landmarks with reference's ones. + +**Description of the tool:** +MethodASO + +### ASO IOS +# Acknowledgements +Nathan Hutin (University of Michigan), Luc Anchling (UoM), Felicia Miranda (UoM), Selene Barone (UoM), Marcela Gurgel (UoM), Najla Al Turkestani (UoM), Juan Carlos Prieto (UNC), Lucia Cevidanes (UoM) + + +# License +It is covered by the Apache License, Version 2.0: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/PW38_2023_GranCanaria/Projects/AnalyticRegistrationVerification/README.md b/PW38_2023_GranCanaria/Projects/AnalyticRegistrationVerification/README.md new file mode 100644 index 000000000..655eb6d81 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/AnalyticRegistrationVerification/README.md @@ -0,0 +1,49 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Analytic Registration Verification + +## Key Investigators + +- Gerry Gralton (University of Western Australia) +- Andy Huynh (University of Western Australia) +- Benjamin Zwick (University of Western Australia) + +# Project Description + +We are aiming to combine a few existing 3D Slicer modules into a single easy-to-use module that allows the verification of a given registration with the underlying, fully known displacement field. This will give researchers the ability to test the efficacy of different registration algorithms, different image resolutions and different gradient configurations. + +This will be achieved by analytically describing a displacement field and warping a set of images with that field using the Scattered Transform module. This pair of images can the be used to test and verify the accuracy of any registration algorithm within the Slicer environment. Accuracy of the registration could be reported on globally, at specific points or even on segmented strucutures within the images! + +## Objective + +1. Generate global numeric comparision metrics between two transforms in 3D Slicer. +1. Combine Scattered Transform and the General Registration (elastix) modules into one custom 3D Slicer module. +1. Add comparison metric from Step 1 into custom module. +1. Add ability to report comparison metric on segmented strucutures within the image. + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/AtlasYEBSEg.png b/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/AtlasYEBSEg.png new file mode 100644 index 000000000..fdaba910d Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/AtlasYEBSEg.png differ diff --git a/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/FromAtlasYEBRegistration.png b/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/FromAtlasYEBRegistration.png new file mode 100644 index 000000000..37527b85c Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/FromAtlasYEBRegistration.png differ diff --git a/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/README.md b/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/README.md new file mode 100644 index 000000000..ed0fc3c99 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/AtlasYEB_Plugin_WEB_API/README.md @@ -0,0 +1,58 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# New 3D Slicer extension for the YEB Atlas. + +## Key Investigators + +- Sara Fernandez Vidal (ICM, Paris Brain Institut. Stereotaxy Platform, https://institutducerveau-icm.org/en/cenir-stim/) +- Eric Bardinet (ICM, Paris Brain Institut. CENIR Platform. ) +- Severine Chandelier (ICM, Paris Brain Institut. Scientific Computing. IT Departement.) + +# Project Description + +The YEB Atlas is a Basal Ganglia atlas used in routine clinical practice at the Pitié Salpêtrière Hospital in DBS-related procedures, both preoperatively (planning/targeting) and postoperatively. +It is also widely used in multicenter research studies. We cannot share the atlas as a fully open free atlas because it is bound to Medtronic by a strict license agreement. That's why in 2021, we set up a web service for the YEB atlas that allows to get deformed atlas regions on the uploaded anatomical MRI in NIFTI format. [WEB PAGE](https://yeb-cenir.icm-institute.org/). + +Currently we are setting up a REST API on the same server. +The main idea is that this new extension will connect with the API to get a set of regions (we will give some choices) of the atlas in a anatomical brain MRI. +One of the advantages using 3D Slicer is that the user will be using other formats than nifti that is actually the only allowed on the web server platform. +A second advantage is to use the segmentation module of 3D Slicer. +Another utility should be to launch the process for a group of subjects. +This extension will also be used in a new module dedicated to assist the DBS targeting stage. + +## Objective + + + +1. The first objective is to create an initial prototype of the Extension with a curl request to de API and the import of the YEB segmentation to the Slicer Scene +2. + +## Approach and Plan + + + +1. To exchange with the 3D Slicer community about differents points lookup tables/ontologies and segmentations, requests, ... + + + +## Progress and Next Steps + + + +1. For this firt prototype Module, I launch Atlas Yeb Registration from Docker Image (and not from API) and create the segmentations objects from the results of the process. +3. Nexts steps will be to give the choice to launch the AtlasYEB Registration From the WEB API or from the docker. Explore the use of terminologies and modify the GUI of the module to choose the regions to save as mask or labelmaps. Verifiy the description of the structures ... And publish the Extension + +# Illustrations + + +[AtlasYEB Registration Result] + + + +[Inmporting Models to Segmentation Objects] + + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/AutomatedLandmarkIdentification_IOS/README.md b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkIdentification_IOS/README.md new file mode 100644 index 000000000..1e1a8c3f6 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkIdentification_IOS/README.md @@ -0,0 +1,51 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automated Landmark Identification on IOS + +## Key Investigators + +- Nathan Hutin (University of Michigan) +- Luc Anchling (UoM) +- Maxime Gillot (UoM) +- Baptiste Baquero (UoM) +- Jonas Bianchi (UoM, UoP) +- Marcela Gurge (UoM) +- Najla Al Turkestani (UoM) +- Marilia Yatabe (UoM) +- Lucia Cevidanes (UoM) +- Juan Prieto (UoNC) + +# Project Description + +We develop a method that identifies landmarks on dental crowns. The method must be robust and handle models with different orientation. Existing methods perform poorly if the surface models are not correctly oriented. The method proposed here is rotation invariant and handles 3D surface models with a wide variety of dental crown shapes. + + +Data come from clinician. All data are segmented manually or by SlicerDentalModelSeg (Slicer extension). Dataset create surface according to teeth’s number. Each surface is center on one tooth and scale according to the size of tooth. The dataset make a texture with normal of surface. The dataset make a texture where we can see a projection of landmark on the surface. For each batch the dataloader give two surface, one with normal texture and second one with landmark texture. We place cameras around the tooth in center on the 3D space. Each camera take a picture. We give images of surface with normal texture to the model. The lost function compare output of the model and images of surface with landmark texture. After we backward the model to improve him. + +Github repository : https://github.com/HUTIN1/ALIDDM/tree/refactoring + +## Objective + +1. Refactoring the code +2. Found good parameter to get better prediction +3. Train model for different landmark + + + +## Progress and Next Steps + +### Progress + +1. Refactoring the code + +### Next Step +1. Found good parameter to get better prediction +2. Train model for different landmark + + +# Illustrations +### User Interface +![ali_user_interface](https://user-images.githubusercontent.com/72212416/215179106-15994380-29d5-49b0-825e-be910dcb9b6c.png) + +### Landmark Output Example +![ali_output](https://user-images.githubusercontent.com/72212416/215205073-dec0a8d1-72b1-4584-a12e-42b4e10e838e.png) diff --git a/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/MarkupEditorUpdate.png b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/MarkupEditorUpdate.png new file mode 100644 index 000000000..fe894c48d Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/MarkupEditorUpdate.png differ diff --git a/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/NodeLinkPrototype.png b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/NodeLinkPrototype.png new file mode 100644 index 000000000..df8ffbbd3 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/NodeLinkPrototype.png differ diff --git a/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/README.md b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/README.md new file mode 100644 index 000000000..a2aa12388 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/README.md @@ -0,0 +1,65 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automated Landmarking Support + +## Key Investigators + +- Sara Rolfe (SCRI) +- Chi Zhang (SCRI) +- Murat Maga (SCRI) +- Steve Pieper (Isomics) +- Andras Lasso (Perk Labs) + +# Project Description +We are developing features to support the use of large-scale landmark sets generated automaticallly by SlicerMorph modules. The key functions +needed by the SlicerMorph workflows are: +1. Linking two or more landmark sets with identical landmark definitions and operating on them jointly +2. Flexibly selecting and editing point sets in the 3D scene. +3. Placing a grid of landmarks, constraining to a surface, and adjusting the point poisitions by dragging the grid across the surface. +4. Improve performance when setting state of a large number of points (around 1000) + +The first function has not yet been attempted and we would like to develop a working prototype. Our second objective is implemented in the +[Markup Editor](https://github.com/SlicerMorph/Tutorials/tree/main/MarkupsEditor), a module in the SlicerMorph extension developed by Steve Pieper. Recently, this module has been failing for some cases. We would like to identify +when and why these bugs are occuring and come up with a plan to update the module. The third objective is partially implemented by the Surface Markups +extension. We would like to discuss future plans for the Surface Markups extensions and whether SlicerMorph user needs may be covered by the ongoing development. + + +## Objective + + + +1. Objective A. Prototype joint operations on linked landmark sets +2. Objective B. Troubleshoot the Markup Editor and identify fixes +3. Objective C. Identify overlap between Surface Markups development and SlicerMorph user needs. +4. Objective D. Document plan to improve Markups module to improve performance when working with large number of points. + +## Approach and Plan + + + +1. Meet with other heavy users and developers of Markups infrastructure (Csaba, Rafael, Davide...) :white_check_mark: +2. Debug MarkupEditor (Sara and Steve) :white_check_mark: +3. Many discussions about the future of Markups and integration with machine learning + +## Progress and Next Steps + + + +1. [Baseline prototype of python module for landmark linking complete](https://github.com/smrolfe/MarkupLinkTest). :white_check_mark: +2. Markup Editor fixes and updates committed to the SlicerMorph repository. :white_check_mark: +3. Follow up with SlicerHeart team regarding SurfaceMarkup prototype +4. Further develop plans based on productive work and conversations this week :wrench: + + +# Illustrations +MarkupEditorUpdate +NodeLinkPrototype +Automated landmarks on two specimens + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/SLM_image.gif b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/SLM_image.gif new file mode 100644 index 000000000..10c6e3de8 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/AutomatedLandmarkingSupport/SLM_image.gif differ diff --git a/PW38_2023_GranCanaria/Projects/AutomaticQuantitative3DCephalometrics/README.md b/PW38_2023_GranCanaria/Projects/AutomaticQuantitative3DCephalometrics/README.md new file mode 100644 index 000000000..4de94e79b --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/AutomaticQuantitative3DCephalometrics/README.md @@ -0,0 +1,107 @@ +Back to [Projects List](../../README.md#ProjectsList) + +Automatic Quantification 3D Components + +## Key Investigators +- Nathan Hutin (University of Michigan) +- Luc Anchling (University of Michigan) +- Baptiste Baquero (University of Michigan) +- Maxime Gillot (University of Michigan) +- Lucia Cevidanes (University of Michigan) +- David Allemang (Kitware Inc) +- Jean-Christophe Fillion-Robin (Kitware Inc) + +# Project Description +The Automatic Quantification 3D Components(AQ3DC) was developed during Namic-project 37 is now available in Slicer Q3DC extension. + +The Automatic Quantification 3D Components(AQ3DC) aims to provide a user-friendly automated tool that decrease user time for extraction of quantitative +image analysis features. +AQ3DC is a Slicer extension to automatically compute lists of measurements seleted by users for a single case or a whole +study sample, at one or more time points. +The current implementation is aimed at automatic computation of 3D components like distances (AP, RL and SI) +between points, points to line, midpoint between two points or angles (Pitch, Roll and Yaw), interpretation of directionality,which can be further extended to any type of desired computation/quantitative image analysis. The design of the user interface is currently aimed at quantification of craniofacial dental, skeletal and soft tissue structures. + +This project to to get input regarding : +1. refactoring of the code to maintainable and more robust +2. discuss updates that solve AQ3DC's issues +3. verify remaining duplicates and hard coded components +4. add tests to the module +5. add user documentation. + + + +- Project link : https://github.com/DCBIA-OrthoLab/Q3DCExtension +- Refactoring link : https://github.com/HUTIN1/Q3DCExtension + + + +## Objectives + +- A. Receive guidance on whether the new types help encapsulate certain components in the code. +- B. Receive guidance on how to correct overload of the Python protocols for Group_landmark, MyList, and MyDict. +- C. Receive input regarding how to improve other utilities like Line, Measure, Point, etc. +- D. Clarify new functionalites added to resolve issues or improve flexibility: + - Added Midpoints meaning (interpretation of direction); + - Added choice of complamteray angel greater than 90 degress + - Added new functionality that allows users to upload landmark legends as excel files. This modifies the currently deployed AQ3DC code that displays only specific craniofacial dental, skeletal and soft tissue structure landmarks that were hard coded, and any different landmarks would appear as "other" landmarks. + - Added report error to detect where in the computation or lanmdmark list files an error occured. This help users to identify if for nay patient or landmark their files have typos or missing data. +- E. Update Readme in Github DCBIA/Orhtolab +- F. Create documentation on SlicerCMF ( https://cmf.slicer.org/) + +## Approach and Plan + +1. Completed Users beta test of AQ3DC's refactoring +2. Review code's robustness and clarity +3. Pull requests the code. +4. Update SlicerCMF workflow to document and integrate with AQ3DC. + + +## Progress and Next Steps + +# Progress: +1. Refactoring codes +2. Resolved old issue +3. Weekly review of code clarity with David + +# Next steps : + +1. Add class test +1. Make the new types help encapsulate certain components in the code. +2. Correct overload of the Python protocols for Group_landmark, MyList, and MyDict. +3. Improve other utilities like Line, Measure, Point, etc. Update SlicerCMF workflow to document and integrate with AQ3DC. +4. Verify remaining duplicates and hard coded components +5- Add tests to the module +6- Add user documentation. +7. Update README +8- Pull request + + + +# Illustrations + + +# 1. Slicer Interface +![Screenshot from 2022-06-30 18-31-37](https://user-images.githubusercontent.com/83285614/176789715-f90c3ea5-faf6-4e49-bdf3-2683b18ce375.png) + +# 2. List of measurements exported. +![Screenshot from 2022-06-30 18-29-01](https://user-images.githubusercontent.com/83285614/176789814-29e76874-1060-4681-bbe3-a4853975f510.png) + +# 3. Results of the computation for all the list of measurement for a sample of patient. +![Screenshot from 2022-06-30 19-01-23](https://user-images.githubusercontent.com/83285614/176792428-d5c3cb6f-4e56-45c0-95e2-fb24798453a8.png) + +# 4. Skeletal measurements signs meaning. +![skeletal_measurement](https://user-images.githubusercontent.com/83285614/176794349-fa99dcc8-bdf7-4518-ba8e-01451ebf05d8.jpeg) + +# 5. Linear measurements signs meaning. +![linear_measurement](https://user-images.githubusercontent.com/83285614/176794371-c87e7cba-8242-4149-bbda-5e67e28859cc.jpeg) + +# 6. Angular measurements signs meaning. +![angular_measurement](https://user-images.githubusercontent.com/83285614/176794405-c1e283e6-bad2-4da5-b777-991e93c419ce.jpeg) + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/AutomaticStandardizeOrientation_IOS/README.md b/PW38_2023_GranCanaria/Projects/AutomaticStandardizeOrientation_IOS/README.md new file mode 100644 index 000000000..6cc7b14d8 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/AutomaticStandardizeOrientation_IOS/README.md @@ -0,0 +1,66 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automatic Standardize Orientation IOS + +## Key Investigators + +- Nathan Hutin (University of Michigan) +- Luc Anchling (University of Michigan) +- Marcela Gurgel (University of Michigan) +- Felicia Miranda (University of Michigan) +- Najla Al Turkestani (University of Michigan) +- Selene Barone (University of Michigan) +- Lucia Cevidanes (Univerisity of Michigan) +- Juan Prieto (University of North Carolina) + +# Project Description + + A correct relationship of the teeth in upper and lower dental arches of IntraOral Surface (IOS) scans depends on standardized spatial orientation +in an antero-posterior (yaw rotation), lateral (pitch rotation), and axial (roll rotation) planes induced by differences in acquisiton of scans, growth and treatment. Serial IOSs have been used for evaluation and understanding of the changes resulting from +interactions of groiwth and treatment, as dental position and movement can be quantified by 3D linear and angular measurements based on homologous landmarks. + +Reliable and accurate measurements should be used for a precise diagnosis and for assessment of treatment outcomes. Inconsistency in the orientation fo the IOs can lead to errors in measurements at diffferent time points for the smae patient and can adversely affect research conclusions and treatment plans. Up to now IOS orientation in Slicer has been manually perfomed using the Slicer Transforms module, but the manual orientation is time consuming and prone to inconsistencies. + + +This project develops a Slicer extension for Automated Standardized Orientation of IOS (ASOIOS). The Automated Standardized Orientation (ASO) tool presented in this project includes the follwoing image processign steps: +1. Automatic identificatio of the centroid of the right and left first molar and the centroid of the buccal and lingual surfaces of one the central incisors that determine perpendicular lines, and then determine a third line perpendiculr to these first two. +2. Computation of the difference between the angles of these 3 lines and a new IOS and an IOS in a gold stardand orientation. +3. Apply transform matrix to approximate the scans spatial orientation (pre-orientation) +4. Compute ICP between the centroid of the 3 reference teeth in each scan. +5. Automatically Ccmpute occlusal landmarks on IOS r, using a deep learning landmark identification algorithm (ALIIOS). +6. ASOIOS then uses a landmark-based registration approach (ICP of he landmarks identified with ALIIOS) to automatically orient an IOS in a standardized spatial orientation. Availabble options includes, user choice of orienting each IOS separately or by pair of dental arches in occlusion;users can also choose which tooth to use to orient the scan. + +Link to GitHub repository https://github.com/lucanchling/ASO + +## Objective + + +1. Automatically orient IOS scans without failures +2. Receive feedback to improve my code and facilitate future maintenance +3. Display error window +4. Improve progress bar +5. Document the code and read me file. +6. Includetest cases + +## Approach and plan +1. Develop in collaboration with Luc Anchling [ASO_CBCT](../ASO_CBCT/REAMDED.md) a Slicer Module,ASO, that will be deployed as part of Slicer Automated Dental Tools. + + +## Progress and Next Steps + +### Progress +1. Add user documentation in [README](https://github.com/lucanchling/ASO/blob/main/README.md) + +### Next Step + +1. Receive feedback to improve my code and facilitate future maintenance +2. Beta Extension for internal users testing prior to deployment + +# Illustrations +### User interface +![aso_user_interface](https://user-images.githubusercontent.com/72212416/214947035-c955bbc0-4a6a-4687-9ed0-2ece672f8284.png) +### Oriented Output Example +![aso_demonstation](https://user-images.githubusercontent.com/72212416/214982300-d9174a64-5c28-41dd-b26c-9264bf3d852b.png) +Green : scan without orientation. +Red : Scan at the 5th step. +Yellow : Final Orientation. diff --git a/PW38_2023_GranCanaria/Projects/BreakoutSession-Atlases.md b/PW38_2023_GranCanaria/Projects/BreakoutSession-Atlases.md new file mode 100644 index 000000000..cd2c63c5f --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/BreakoutSession-Atlases.md @@ -0,0 +1,55 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Breakout Session: Anatomy Atlases and Atlas Development + +## Key Investigators + +- Michael Halle +- the atlas development community + +# Session Description + +This breakout session about atlases is designed to allow members of our community to +discuss the work they are doing to create atlases and use them in different applications. +We will also discuss new technologies in 3D Slicer that may be useful to atlas developers, +as well as challenges that the community may face. + +Please share the work that you are doing, and how you are using atlases! + +Atlases are datasets that serve as references, standards, examples, or teaching tools. +Recently, there has been an exciting growth in the development of new anatomic and +biomedical atlases. The availability of 3D Slicer has helped enable this growth. Today, +technologies using machine learning and automatic segmentation offer new opportunities +for atlas creation and use. The OpenAnatomy project at Brigham and Women's Hospital +is working to create a community dedicated to the creation of high quality +anatomical atlases. + +For this session, we are scheduled to be joined by Anatomist Paul Neumann, the current +editor of Netter's Atlas of Human Anatomy and a partner with the OpenAnatomy Project in the +creation of TA2Viewer (https://ta2viewer.openanatomy.org). + +We also hope to be joined by members of the UCLA Cardiac Arrhythmia Center and the [Amara Yad](https://www.uclahealth.org/medical-services/heart/arrhythmia/about-us/amara-yad-project) +project, who are developing open atlases based on a particularly noble cause. Their first +atlas, is an open access cardiac atlas: [Atlas of Cardiac Anatomy Vol. 1](https://drive.google.com/file/d/1yvaYaKXc517YzPWJL474dS_FlLH4_Cwe/view). + +## Rough schedule: + +- Community presentations about atlas development (please contribute!) +- New and in-development features in 3D Slicer + - Machine learning tools (TotalSegmentator, SynthSeg...) + - VTK SurfaceNets multi-object surface creation (in development) +- Shared Challenges + - Terminology (how to label atlas objects) + - Sharing and distribution of atlases - OpenAnatomy exporter + - Building a community of atlas creators and users + +# Illustrations + + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/ClinicalPanel-BreakoutSession.md b/PW38_2023_GranCanaria/Projects/ClinicalPanel-BreakoutSession.md new file mode 100644 index 000000000..318d62df8 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/ClinicalPanel-BreakoutSession.md @@ -0,0 +1,32 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Panel: clinical uses of 3D Slicer + +## Key Investigators + +- Rudolf Bumm +- Alexandra Golby +- Babacar Diao +- Ron Alkalay +- Lucia Cevidanes +- Andras Lasso + +# Session Description + +This panel will gather clinicians to discuss how do they use 3D Slicer in their practice. The panel will be moderated by Rudolf Bumm. The goal is to provide feedback and answer questions from the community, both from engineers interested in the point of view of clinicians and from other clinians interested in the possibilities of Slicer. Here's a tentative list of suggested topics discussed during preparation meetings: +- How do clinicians anonymize, annotate and share data? (Rudolf) +- How to test the effectiveness of Slicer-based tools in the clinic? (Lucia) +- Collaborators in Africa use Slicer atlases for training, but would like to move to intervention. There is a big gap, how to fill it? (Gabor) +- Slicer can be used to 1) build very specific applications with lots of constraints or 2) by curious clinicians to start doing research based on annotating and measuring on imaging. (Alexandra G.) +- What can we do to make Slicer more conformant with existing commercial clinical and radiologic tools? (Andras) + +# Illustrations + + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/CoursesMedicalImaging/README.md b/PW38_2023_GranCanaria/Projects/CoursesMedicalImaging/README.md new file mode 100644 index 000000000..1e5ec91d8 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/CoursesMedicalImaging/README.md @@ -0,0 +1,179 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Setting up University Courses on Computer Assisted Medical Imaging, Manufacturing and Interventions using Open Source Technologies and 3D Slicer + +## Key Investigators + +- Juan Ruiz (ULPGC) +- Idafen Santana (ULPGC) +- Mario Monzón (ULPGC) +- María Rosa Rodríguez (ULPGC) +- Marie Ndiaye y Sidi el Wafi +- Mamadou Samba Camara +- Adama Faye +- Idy Diop +- Mouahmed Diop +- Papa Alioune Cisse +- Youssou Faye +- Mame Diarra Sy +- Ahmed Dhahara Kane +- Attila Tanács (University of Szeged) +- Attila Nagy (University of Szeged) + +# Project Description + +In order to meet the demand for professionals in the field of computer assisted medical imaging, manufacturing and interventions, universities need to set up courses that provide students with the necessary knowledge and skills. Designing a university course on this topic requires careful consideration of the syllabus and design materials that will be used. This project will explore how universities can create a syllabus and design material for a course on computer assisted medical imaging, manufacturing and interventions using open source technology. + +## Objective + +1. Define the topics that should be covered in the syllabus +1. Select learning resources and bibliography to support the course contents +1. Design materials to be used in the lessons based on open source technologies +1. Develop common contents to facilitate exchange of personnel and students from different countries + +## Approach and Plan + +1. Explore the competences that are required in the professional market to define the course content +1. Discuss with the 3D Slicer community how to create materials based on open source technology and integrate them on a syllabus +1. Propose a biomedical engineering course program + +## Progress and Next Steps + +We have designed the following syllabus proposal: + +**Computer Assisted Medical Imaging and Interventions (60 hours – 6 ECTS)** + +An introductory course that uses 3D Slicer to demonstrate all the necessary concepts. + +- 40 hours (4 ECTS) to introduce all the main concepts along with practical demos using the well-known medical image computing open software ecosystem 3D Slicer split into 10 subjects, each one to be taught in two classes of two hours. + - Subject 1 provides a general introduction to medical images + - Subjects 2 trough 9 use Slicer Notebooks on Jupyter Lab + - Subject 10 develops an introductory application using Slicer + +- 20 hours (2 ECTS) to develop a practical use-case on 3D Slicer as a self-contained application. Projects offered: + - Creating a Virtual Reality application in Slicer + - Integration of a deep learning model to segment medical images + - Developing a module for image guided therapy (IGT) + +**Concepts (40 hours)** + +1. Introduction to medical imaging modalities +
    +
  1. Digital images (general intro, not specific to medical imaging)
  2. +
  3. Ultrasound
  4. +
  5. X-Ray, mammography, fluoroscopy
  6. +
  7. Digital volumetric images (general intro, not specific to medical imaging)
  8. +
  9. Computed Tomography (CT)
  10. +
  11. Magnetic Resonance Imaging (MRI)
  12. +
+ +2. Loading, storing & visualizing medical images +
    +
  1. File formats commonly used with images (general intro, not specific to medical imaging)
  2. +
  3. File formats for medical imaging research
  4. +
  5. An introduction to DICOM
  6. +
  7. The visualization model. Rendering scenes composed of multimodal data (general intro, not specific to medical imaging)
  8. +
  9. Digital volumetric images (general intro, not specific to medical imaging)
  10. +
  11. Surface vs volume rendering (general intro, not specific to medical imaging)
  12. +
  13. Putting it all together with 3D Slicer
  14. +
+ + 3. Segmentation of medical images (2D and 3D) +
    +
  1. Computer-assisted manual segmentation
  2. +
  3. A first introduction to automated segmentation using simple methods
  4. +
  5. Contour and surface extraction from image segments
  6. +
  7. Putting it all together with 3D Slicer
  8. +
+ + 4. Working with physical models in medical applications +
    +
  1. An introduction to 3D printing for medical applications
  2. +
  3. Building your own “phantoms” (homemade physical models)
  4. +
  5. Commercial manikins for clinical training
  6. +
  7. Virtualizing phantoms and manikins
  8. +
+ + 5. Building and rendering scenes with VR/AR in medical applications +
    +
  1. Importing virtual models of medical devices and clinical environments
  2. +
  3. Extracting anatomy surface models from 3D image data
  4. +
  5. Visualizing the scene on conventional, VR/AR and holographic displays
  6. +
  7. An introduction to collaborative VR
  8. +
  9. Putting it all together with 3D Slicer
  10. +
+ + 6. Registration of medical image data +
    +
  1. 2D and 3D image registration
  2. +
  3. Model (point cloud / surface) registration
  4. +
  5. Image-model registration
  6. +
  7. Putting it all together with 3D Slicer
  8. +
+ + 7. Quantitative analysis from medical image data +
    +
  1. Feature extraction workflow
  2. +
  3. Following-up variations through time (single individual)
  4. +
  5. Comparing individuals
  6. +
  7. Putting it all together with 3D Slicer
  8. +
+ +**ADVANCED TOPICS** + + 8. Advanced segmentation of medical images using neural networks +
    +
  1. An introduction to neural networks for image segmentation using Pytorch
  2. +
  3. An introduction to Monailabel for NN training with medical images
  4. +
  5. An introduction to inference with Pytorch and Monailabel
  6. +
  7. Putting it all together with 3D Slicer
  8. +
+ + 9. An introduction to image guided therapy (IGT) +
    +
  1. Geometrical transforms
  2. +
  3. Tracking systems
  4. +
  5. Intraoperative imaging
  6. +
  7. Putting it all together with 3D Slicer and the Plus Toolkit: building navigation systems
  8. +
+ +10. Building simple medical applications with 3D Slicer (IGT) +
    +
  1. An introduction to the SW architecture of 3D Slicer
  2. +
  3. The MRML scene description
  4. +
  5. An introduction to Qt and Qt designer
  6. +
  7. Using widgets in 3D Slicer
  8. +
  9. Taking advantage of the available logic in 3D Slicer
  10. +
  11. Putting it all together in a simple 3D Slicer application
  12. +
+ +**Use cases (20 hours)** + +The projects offered are based on the concepts studied in subjects 5, 8 and 9: + +1. Creating a Virtual Reality application in Slicer (Subject 5): +
    +
  1. Learn the basic actions needed to interact with objects and move around the Slicer scene using the VR controllers.
  2. +
  3. Develop a virtual reality system for medical applications which allows the user to share and interact in real time with data from medical images.
  4. +
+ + +2. Segmentation of medical images using neural networks (Subject 8): +
    +
  1. Develop a module in 3D Slicer to run the inference of a PyTorch model for the segmentation of medical images.
  2. +
+ +3. Developing a Python module for image guided therapy (IGT) (Subject 9): +
    +
  1. The objective is to create a module in 3D Slicer to load navigation data and perform fiducial-based registration.
  2. +
+ + +# Background and References + +Examples of material which could be used to supplement the course content: +* _Biomedical Signal and Image Processing_ by K. Najarian and R. Splinter +* _Principles of Medical Imaging for Engineers_ by M. Chappell +* [3D printing medical devices](https://formlabs.com/blog/3d-printing-medical-devices/) +* [Slicer Virtual Reality](https://github.com/KitwareMedical/SlicerVirtualReality): Extension for 3D slicer that enables user to interact with the 3D scene using virtual reality +* [Slicer3: Image Guided Therapy (IGT)](https://www.slicer.org/wiki/Slicer3:_Image_Guided_Therapy_(IGT)): Tools to enable research in image guided therapy diff --git a/PW38_2023_GranCanaria/Projects/DICOMSEG/README.md b/PW38_2023_GranCanaria/Projects/DICOMSEG/README.md new file mode 100644 index 000000000..862413aa0 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/DICOMSEG/README.md @@ -0,0 +1,202 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# DICOM Segmentation Optimization + +## Key Investigators + +- Steve Pieper (Isomics, Inc) +- Andrey Fedorov (BWH) +- Andras Lasso (Queens) +- Marco Nolden (DKFZ) +- Ralf Floca (DKFZ) +- Hans Meine (MeVis) +- Alireza Sedghi (Radical) +- Erik Ziegler (Yunu) +- Markus Hermann (Idependent) +- Chris Bridge (MGH) +- David Clunie (PixelMed Publishing) +- Sean Doyle (Independent) + +# Project Description + +Discuss our experiences and thoughts on the DICOM SEG standard. + +## Objective + +Compare notes, benchmarks, and experience with interoperability and performance of DICOM SEG instances across platforms. +Evaluate the extent to which any observed performance issues are inherent in the format or simply inefficient implementations. +Consider proposals to improve the standard to address any inherent issues. + +## Approach and Plan + +1. Collate experiences from any investigations and benchmarks to date +2. Meet at project week with those on site involving remote participants as possible +3. Add notes here about results and plans for any follow up proposals to add representations to the standard +4. Discuss if we should consider re-starting/re-thinking the [DICOM4QI initiative](https://dicom4qi.readthedocs.io/en/latest/) as a venue to openly promote DICOM interoperability testing and collect feedback from the community. + +## Progress and Next Steps + +Performed timings with various methods to load segmentations in Slicer + +* Quantitative Reporting: 4 minutes + * the underlying issue is due to saving each segment into separate file by dcmqi, this issue is being addressed in [https://github.com/QIICR/dcmqi/pull/464](https://github.com/QIICR/dcmqi/pull/464) +* pydicom-seg: 15 seconds +* seg.nrrd: .6 second + +We had several conversations about the importance of DICOM for organizing derived data from quantitative analysis, conversations which underlined the point of defining efficient implementations. + +In discussion with machine learning researchers, e.g. developers and users of tools like TotalSegmentator, the number of segments is set increase rapidly, perhaps doubling within months to 200 or more, and with over 1000 segments expected within a year. + +# Illustrations + +![image](https://user-images.githubusercontent.com/126077/216361639-a7d4aa90-1742-4681-b6cd-e78f15dce4cd.png) + +Example code to load with `pydicom-seg` vi Slicer 5.2.1 python console: +``` +try: + import pydicom_seg +except ModuleNotFoundError: + pip_install("pydicom_seg") + +import pydicom +import pydicom_seg +import SimpleITK as sitk + +dcm = pydicom.dcmread('/Users/pieper/slicer/latest/pydicom-seg/ABD_LYMPH_008_SEG.dcm') # 19 seconds + +reader = pydicom_seg.MultiClassReader() +result = reader.read(dcm) + +image_data = result.data # directly available +image = result.image # lazy construction + +sitk.WriteImage(image, '/tmp/segmentation.nrrd', True) +seg = slicer.util.loadSegmentation('/tmp/segmentation.nrrd') + +for segmentID in seg.GetSegmentation().GetSegmentIDs(): + segmentIndex = int(segmentID.split("_")[1]) + description = result.segment_infos[segmentIndex].SegmentDescription + seg.GetSegmentation().GetSegment(segmentID).SetName(description) +``` + +# Background and References + +The DICOM SEG standard has been around for several years and has been implemented as part of several tools in various languages: +* [dcmqi](https://github.com/QIICR/dcmqi) in C++ uses [DCMTK]([url](https://dicom.offis.de/dcmtk.php.en)) and provides support for SEG read/write through [the Quantitative Reporting extension](https://github.com/QIICR/QuantitativeReporting) to 3D Slicer +* [dcmjs](https://github.com/dcmjs-org/dcmjs) supports read/write of SEG in javascript for use in [OHIF](https://ohif.org/) +* [highdicom](https://github.com/ImagingDataCommons/highdicom) supports read/write of SEG in python: + - Creation of segmentations from masks numpy arrays (optionally as label map), with metadata copied from source image(s) + - Support for 2D single frame images, image series in patient coordinate system (CT, MR, ...), and multiframe tiled images (whole slide images) in slide coordinate sysmte + - Support for images aligned with source images, and in arbitrary space + - Support for "reconstructing" segmentation masks from the stored frames given arbitrary subsets of frames and segments, including conversion to label map + - Note that [this branch](https://github.com/ImagingDataCommons/highdicom/tree/docs/seg_explanation) + (under construction) has a much better user guide for SEGs than the current + documentation. I.e. [this page](https://github.com/ImagingDataCommons/highdicom/blob/docs/seg_explanation/docs/seg.rst) +* [pydicom-seg](https://github.com/razorx89/pydicom-seg) is using pydicom and SimpleITK, writes single output file. Performance for the TotalSegmentator sample is very good! +* (others - please add to this list) + +### Known Issues + +##### 1. Performance + +While interoperability has generally been good, performance of these SEG +implementation has in general been orders of magnitude slower than research +formats (e.g. nii.gz, nrrd, or seg.nrrd) at supporting segmentation use cases +such as using segmentation data for machine learning. For example, [this +notebook](https://colab.research.google.com/drive/1ZLqJwDIO1XKnnjOzClkSq8RIawm3sp9M) +shows that decoding a TotalSegmentator result from DICOM SEG with approximately +100 segments can take several minutes and consume very large amounts of memory +for a segmentation that takes less than a second to read from a research +format. + +Poor performance is due to at least two factors: +1. Sub-optimal algorithms/implementations that do not scale. + - The currently released version of highdicom (as of 24th Jan 2023) has an + implementation that was never designed to scale. [This pull + request](https://github.com/ImagingDataCommons/highdicom/pull/208) should + make significant progress to address this. Some further improvements should + be possible. + - At the pydicom level, iterating through long sequences is slow. + This limits the performance of the higher level highdicom because the + Per-Frame Functional Groups Sequence can get large in large segmentations. + There may be optimisations to make there. See [this + issue](https://github.com/pydicom/pydicom/issues/1728) +2. Lack of "label map" style encoding in the standard. This is an issue in its + own right (see below). + +We are interested in how the benefits of DICOM (standardized encoding, rich +metadata, coded concepts, etc) can coexist with efficient read-write +performance for real-world use cases. + +##### 2. Lack of "Label Map" Style Encoding + +A DICOM SEG may contain many segments (elsewhere known as "classes" or +"labels"). But these segments are each stored in separate frames in the +segmentation as multiple binary masks (0 or 1 everywhere). This is in contrast +to many other formats that use a "label map" style encoding in which a single +array contains many segments using pixel values to represent membership of a +segment (i.e. pixel value 1 for segment 1, pixel value 2 for segment 2). Using +separate frames does confer two important advantages over the label map +approach: + +1. Segmentations in which the segments overlap each other can be represented +2. Fractional segmentations for multiple segments can be represented + +However, this also comes at a steep cost for what is arguably the +overwhelmingly common use case of non-overlapping non-fractional multi-segment +segmentations. Especially in the case of a large number of segments (such as +the TotalSegmentator mentioned above), this can lead to a very large number of +frames and makes the memory/storage utilization much higher than would be +necessary with a "label map" style. When you imagine doing instance +segmentation of cells in a whole slide image, this becomes completely untenable. + +It has been proposed that this could be solved relatively simply by +adding a new Segmentation Type (e.g. "LABELED") in addition to the existing +"BINARY" and "FRACTIONAL". This is not a formal proposal at this stage. + +There is a [highdicom draft +implementation](https://github.com/ImagingDataCommons/highdicom/pull/184) of +what this could look like. + +One issue is that currently SEGs images are limited to 8 bits per pixel, which +would limit the number of segments representable in "LABELMAP" style to 255. +This may not be high enough for some applications (e.g. instance segmentation). +A proposal on "label map" encoding should consider whether this limitation +should be relaxed. + +##### 3. Limited precision for fractional segmentations + +Fractional segs are quantized and stored as integers. As mentioned above, the +bits allocated is limited to a maximum of 8 currently. This means that +fractional segmentations have limited precision and are quantized to 256 +values, which is a lower level of precision than users would generally expect. + +##### 4. Lack of compression in current implementations + +Even if it is encoded in labelmap representations, uncompressed data is inefficient for storing +segmentation data. A typical nii.gz or .seg.nrrd file is compressed with gzip and can +be 100 or more time smaller than the source data due to redundancy in the segmentation data (large +areas of uniform segmentation or repeating patterns that can be more efficiently represented +by short codes). DICOM currently offers some options for this like RLE, but as yet they have +not be widely supported in currently used open source tools. + +##### 5. Some interoperability concerns + +There are repeated reports of interoperability issues between segmentations +created with highdicom and viewed in OHIF. See [this +issue](https://github.com/OHIF/Viewers/issues/2833). + +##### 6. Expanding dimension organization methods + +Multiple users of highdicom have been asking for support for 2D+T files. This +is possible but not straightforward due to the need to create a dimension +organization methodology that includes time as a dimension. Due to time +limitations this has not been a priority for highdicom but remains an open +issue. See + +- [Higidicom issue 200](https://github.com/ImagingDataCommons/highdicom/issues/200) +- [Higidicom issue 174](https://github.com/ImagingDataCommons/highdicom/issues/174) +- Some related discussion [here](https://github.com/ImagingDataCommons/highdicom/issues/159) + +A broader issue is whether these would be understood by viewing software unless +the dimension organization method is standardized to some extent. diff --git a/PW38_2023_GranCanaria/Projects/Electrophysiological biosignals in 3D Slicer/Arduino-Conect-wih-3D-Hand_GIF.mp4 b/PW38_2023_GranCanaria/Projects/Electrophysiological biosignals in 3D Slicer/Arduino-Conect-wih-3D-Hand_GIF.mp4 new file mode 100644 index 000000000..f19c0f8c4 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/Electrophysiological biosignals in 3D Slicer/Arduino-Conect-wih-3D-Hand_GIF.mp4 differ diff --git a/PW38_2023_GranCanaria/Projects/Electrophysiological biosignals in 3D Slicer/README.md b/PW38_2023_GranCanaria/Projects/Electrophysiological biosignals in 3D Slicer/README.md new file mode 100644 index 000000000..944376537 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/Electrophysiological biosignals in 3D Slicer/README.md @@ -0,0 +1,77 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Electrophysiological biosignals in 3D Slicer: a case of EMG to control 3D models + +## Key Investigators + +- Jordan Ortega Rodríguez (Astrophysic Institute of Canary Islands - IACTEC) +- Gara Ramos (Astrophysic Institute of Canary Islands - IACTEC) + +# Project Description + +The aim of this project is to acquire and visualize electrophysiological biosignals in 3D Slicer through Arduino, +as these bio-signals can provide information on physiological data of the subject that can complement other applications +based on image analysis. In particular, the module "EMGArduino" allows to use the subjets electromyiogram (EMG) +to control the movement of an antropomorfic hand 3D model. + +## Objective + +Objective 1. Establish an interface between Slicer and Arduino to acquire electrophysiological signals. +Objective 2. Visualizate electrophysiological biosignals in Slicer in the time domain in terms of voltage amplitude. +Objective 3. Send commands from EMG signals to Slicer to control movements of 3D models loaded into the 3D scene. + + +## Approach and Plan + +1. First of all, it is necessary to have an electronic pcb for the acquisition and conditioning of electrophysiological + signals whose output can be connected to an Arduino board. In this project, we used the MySignals SW eHealth + and Medical IoT Development Platform (Libelium) [2]. However, any electonic signal acquisition pcb that is + propertly designed for the treatment of electrofisiological biosignals can be used. + +2. Develop the EMG signals acquisition IDE code for the Arduino board. + +3. There is an Slicer extension called ArduinoController and develovep by Paolo Zafinno et al. (Department of Clinical + and Experimental Medicine, University “Magna Graecia” of Catanzaro) [1] that allows connecting and receiving/sending + data from/to Arduino boards. Based on this extension, it is possible to create a module that uses the Arduino to receive + data from a subject's electrophysiological signals (for example: EMG) and transfer them to 3D Slicer. + +4. Adapting the created module to load 3D models into the Slicer's 3D scenes. + +5. Interacting with the loaded 3D models and the Arduino output data in function of the voltage amplitude variations + of the acquired subject's EMG biosignals. + + +## Progress and Next Steps + +1. Once we have an electronic PCB for acquisition (Fig.1) and electrophysiological signal processing, + the corresponding IDE code was developed and loaded onto the Arduino board. This code also allows the visualisation + of the EMG signal on a TFT screen integrated in the acquisition PCB (Video 1) and + the data transmission to a Slicer module (Fig.2). + +2. The visualization of the EMG signal in the slicer's plot scene was made through the ArduinoController module [1] (Video 2). + +3. The next step will be focused in to create a module that allows to load 3D models to a Slicer's scene and control/set some of their parameter + (such as position or colour) directly in function of the subject´s EMG signal voltage variation. As an ilustrative example purpose, + we previously developed this application in LabVIEW (Video 3), where the subject wear a surface EMG PCB that allows + him to control a 3D hand model. The aim is to replicate this application in 3D Slicer. + + +# Illustrations + + + +![Video - emg](https://github.com/NA-MIC/ProjectWeek/blob/master/PW38_2023_GranCanaria/Projects/Electrophysiological%20biosignals%20in%203D%20Slicer/Arduino-Conect-wih-3D-Hand_GIF.mp4) + +![Fig.1](https://github.com/JordanOrt/EMG_Slicer/blob/1546b5817a2116dead5ebac659b9e32520a62fc6/MySiganIoT.jpg) +![Video 1 (EMG DAQ-Arduino)](https://github.com/JordanOrt/EMG_Slicer/blob/1546b5817a2116dead5ebac659b9e32520a62fc6/EMG_MySignals-Arduino.mp4) +![Fig.2](https://github.com/JordanOrt/EMG_Slicer/blob/1546b5817a2116dead5ebac659b9e32520a62fc6/EMG-Arduino%20Plot.png) +![Video 2 (Real time streamming of EMG data from Arduino to 3D Slicer)](https://github.com/JordanOrt/EMG_Slicer/blob/1546b5817a2116dead5ebac659b9e32520a62fc6/EMG_Slicer.mp4) +![Video 3 (Example of controlling a 3D hand model by EMG signals developed in LabVIEW interface)](https://github.com/JordanOrt/EMG_Slicer/blob/1546b5817a2116dead5ebac659b9e32520a62fc6/EMG_Hand_Labview.mp4) + + +# Background and References + +[1] Zaffino P, Merola A, Leuzzi D, Sabatino V, Cosentino C, Spadea MF. SlicerArduino: A Bridge between Medical + Imaging Platform and Microcontroller. Bioengineering. 2020 Sep;7(3):109.) + +[2] MySignals - eHealth and Medical IoT Development Platform. Libelium. Video: https://www.youtube.com/watch?v=MiMDOT-Wt4w diff --git a/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/DAQ-board-and-Arduino.gif b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/DAQ-board-and-Arduino.gif new file mode 100644 index 000000000..c332c9459 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/DAQ-board-and-Arduino.gif differ diff --git a/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG-3DHand.gif b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG-3DHand.gif new file mode 100644 index 000000000..23160cbfc Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG-3DHand.gif differ diff --git a/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG-Arduino Plot.png b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG-Arduino Plot.png new file mode 100644 index 000000000..dd2d13a54 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG-Arduino Plot.png differ diff --git a/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG_Slicer.gif b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG_Slicer.gif new file mode 100644 index 000000000..942f5ea87 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG_Slicer.gif differ diff --git a/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/MySiganIoT.jpg b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/MySiganIoT.jpg new file mode 100644 index 000000000..938e9b260 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/MySiganIoT.jpg differ diff --git a/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/README.md b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/README.md new file mode 100644 index 000000000..05eddbc6f --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/README.md @@ -0,0 +1,80 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Electrophysiological biosignals in 3D Slicer: a case of EMG to control 3D models + +## Key Investigators + +- Jordan Ortega Rodríguez (Medical Technology Group - Instituto de Astrofísica de Canarias. IACTEC) +- Gara Ramos (Medical Technology Group - Instituto de Astrofísica de Canarias. IACTEC) +- Natalia Arteaga (Medical Technology Group - Instituto de Astrofísica de Canarias. IACTEC) + +# Project Description + +The aim of this project is to acquire and visualize electrophysiological biosignals on 3D Slicer via Arduino, +as these biosignals can provide information about the subject's physiological data that can complement other +applications based on image analysis. In particular, the module "EMGArduino" makes it possible to use the subject's +electromyogram (EMG) to control the movement of a 3D model. + +## Objective + +Objective 1. Establish an interface between Slicer and Arduino to acquire electrophysiological signals. +Objective 2. The streaming and visualization of the electrophysiological biosignals in Slicer in the time domain in terms of voltage amplitude. +Objective 3. Send commands from EMG signals to Slicer to control movements of 3D models loaded into the 3D scene (e.g. antropomorphic hand). + +## Approach and Plan + +1. First of all, it is necessary to have an electronic board for the acquisition and conditioning of electrophysiological + signals whose output can be connected to an Arduino board. In this project, the MySignals SW eHealth and Medical IoT Development Platform + (Libelium) [2] has been used. However, any electonic signal acquisition pcb that is suitably designed for electrofisiological + biosignal preprocessing can be used. + +2. Development of the Arduino IDE code for the acquisition of EMG signals. + +3. There is a Slicer extension called ArduinoController and develovep by Paolo Zafinno et al. (Department of Clinical + and Experimental Medicine, University “Magna Graecia” of Catanzaro) [1] that allows to connect and receive/send + data from/to Arduino boards. Based on this extension, it is possible to create a module that uses the Arduino to receive + data from a subject's electrophysiological signals (e.g. EMG) and transfer them to 3D Slicer. + +4. Adaptation of the module created to load 3D models into the 3D scenes of the Slicer. + +5. Interaction with the uploaded 3D models and the Arduino output data as a function of the acquired subject EMG biosignal + voltage amplitude variations. + +## Progress and Next Steps + +1. Once we had an electronic PCB for the acquisition (Fig.1) and processing of the electrophysiological signal, the corresponding IDE + code was developed and loaded onto the Arduino board. This code also allows the visualisation of the EMG signal on a TFT screen + integrated in the acquisition PCB (Video 1) and the transmission of data to a Slicer module (Fig.2). + +2. The visualisation of the EMG signal in the slicer scene was done through the ArduinoController module [1] (Video 2). + +3. The next step will focus on creating a module that allows loading 3D models into the Slicer scene and controlling/setting + some of its parameters (such as position or colour) directly depending on the voltage variation of the EMG signal of the subject. + + +# Illustrations + +### Data Acquisition Board for the EMG signals + + +### EMG data plot in 3D Slicer + + + +# Videos + +### Biosignals Data Acquisition Board with Arduino system +![](https://github.com/NA-MIC/ProjectWeek/blob/master/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/DAQ-board-and-Arduino.gif) + +### Streamming the EMG signal in 3d Slicer +![](https://github.com/NA-MIC/ProjectWeek/blob/master/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG_Slicer.gif) + +### Controlling a 3D Hand with EMG signals in 3D Slicer +![](https://github.com/NA-MIC/ProjectWeek/blob/master/PW38_2023_GranCanaria/Projects/Electrophysiological_Biosignals_In_3DSlicer/EMG-3DHand.gif) + +# Background and References + +[1] Zaffino P, Merola A, Leuzzi D, Sabatino V, Cosentino C, Spadea MF. SlicerArduino: A Bridge between Medical + Imaging Platform and Microcontroller. Bioengineering. 2020 Sep;7(3):109.) + +[2] MySignals - eHealth and Medical IoT Development Platform. Libelium. Video: https://www.youtube.com/watch?v=MiMDOT-Wt4w diff --git a/PW38_2023_GranCanaria/Projects/FetalUltrasoundSimulation/README.md b/PW38_2023_GranCanaria/Projects/FetalUltrasoundSimulation/README.md new file mode 100644 index 000000000..fb39d76f8 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/FetalUltrasoundSimulation/README.md @@ -0,0 +1,70 @@ +# Fetal Ultrasound Simulation for Delivery Training + +## Key Investigators + +- Felix von Haxthausen (University of Lübeck, Lübeck, Germany) +- David García Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Tolga-Can Çallar (University of Lübeck, Lübeck, Germany) +- José Carlos Mateo Pérez (ULPGC, Las Palmas de Gran Canaria, Spain) + +## Clinical collaboration + +- Dr. Juan de León Luis (Head of Obstetrics and Gynecology Department, Hospital General Universitario Gregorio Marañón, Madrid, Spain) + +## Medical Background +Intrapartum assessment of the **fetal head position** and pelvic station are essential in the management of labor. Precise knowledge of these parameters will assist in the correct identification of normal vs. abnormal labor patterns, and among the latter indicate when medical or operative intervention may be required. + +Identifying the fetal position is specially important during forceps-assisted delivery. Forceps are slick metal objects that resemble big spoons or tongs. They are contoured to fit the baby's head and are placed around the infant's head. Depending on the position of the fetus's head, the techniques and manouvers used by obstetricians are different. + +**Ultrasound imaging** is commonly used to estimate the fetal position. In a first step, the clinician identifies the fetus spine using ultrasound to infer the position of the fetus body. Then, the occiput is localized to determine the exact position of the head. + +drawing + +drawing + +## Project Description +The goal of this project is to generate fake ultrasound images that can be used during medical training. These images should be as realistic as possible to enable trainees to correctly identify the fetus position in the simulated ultrasound images. + +PLUS Toolkit already offers a [tool](http://perk-software.cs.queensu.ca/plus/doc/nightly/user/DeviceUsSimulator.html) to generate fake ultrasound images based on 3D models. However, a general problem is that one needs to provide acoustic properties of each 3D model/tissue for the ultrasound image generation. + +## Objective +1. Test ultrasound simulation tool offered by PLUS Toolkit and visualize simulated images and models in 3D Slicer. +2. Create a tool that automatically assigns acoustic properties to each 3D model and generates the according config file for the PLUS ultrasound image generation server. +3. Calculate acoustic properties for fetal ultrasound simulation and improve realism of simulated ultrasound images. +4. Discuss ultrasound simulation technology with the 3D Slicer community. Other methods? AI? + +## Approach and Plan +Development of tool to assign tissue acoustic properties: +1. Use the Total Segmentator to create the 3D models with assigned organ definition based on CT data. +2. Create and define a lookup table (LUT) that assigns the accoustic properties to each organ label. +3. Develop a tool or module that generates the config file based on the models and the LUT. + +## Progress and Next Steps + +1. Created a LUT with accoustic properties (density, speed of sound, accoustic attenuation) of 112 different tissue types. +2. Created a simple Matlab script to generate an XML config file for PLUS with the according accoustic properties for different models based on Total Segmentator + +drawing + +3. Generate scene with virtual models of mother and fetus. + +drawing +drawing + +4. NEXT STEP: Test ultrasound image simulation assigning acoustic properties to tissues. + +## Conclusion & Outlook +A central challenge regarding surface model based ultrasound simulation, i.e., the generation of realistic mesh models of internal anatomy, is greatly alleviated by the utilization of the Total Segmentator module that allows for the automated segmentation of multiple tissues and organs with appreciabe accuracy. In terms of various sound propagation and ray-tracing algorithms used ultrasound simulations, the classification of the respective segmentations enables the direct assignment of acoustic tissue properties that were researched and gathered from the related literature to build an tissue-specific acoustic lookup table. +All of this is in stark contrast to previously proposed approaches that employed laboriously hand-crafted mesh models and manual fine-tuning of acoustic parameters. + +Still, a major subject for future research remains: Usage of tissue segmentations delineating the outer border of structures within the framework of surface-based ultrasound simulation, e.g. as implemented in PLUS, disregards intra-structural heterogeneity of tissues and organs. Depending on the respective applications, this may lead to undesirable low simulation-fidelity. Hence, we intend to exploit image intensities within source images of the segmentations to perform intensity-based modifications of the acoustic parameters assigned within segmentations. + +## References + +- Related project from 35th NA-MIC Project Week: [VR for Birth Delivery Training](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/VRBirthDeliveryTraining/) + +- Training system for forceps-assisted delivery developed in 3D Slicer by Universidad Carlos III de Madrid: [VIDEO](https://www.youtube.com/watch?v=EEasWbH1jZI) + +- García-Sevilla, M. et al. (2018). Performance Evaluation to Improve Training in Forceps-Assisted Delivery. In: , et al. OR 2.0 Context-Aware Operating Theaters, Computer Assisted Robotic Endoscopy, Clinical Image-Based Procedures, and Skin Image Analysis. CARE CLIP OR 2.0 ISIC 2018 2018 2018 2018. Lecture Notes in Computer Science(), vol 11041. Springer, Cham. https://doi.org/10.1007/978-3-030-01201-4_9- + +- Sherer, D.M., Miodovnik, M., Bradley, K.S. and Langer, O. (2002), Intrapartum fetal head position II: comparison between transvaginal digital examination and transabdominal ultrasound assessment during the second stage of labor. Ultrasound Obstet Gynecol, 19: 264-268. https://doi.org/10.1046/j.1469-0705.2002.00656.x diff --git a/PW38_2023_GranCanaria/Projects/HistologyIntoIDC/README.md b/PW38_2023_GranCanaria/Projects/HistologyIntoIDC/README.md new file mode 100644 index 000000000..08907705b --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/HistologyIntoIDC/README.md @@ -0,0 +1,57 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Histology Data and Models Into IDC + +## Key Investigators + +- Curtis Lisle (KnowledgeVis) +- Andrey Fedorov (BWH) +- Maximilian Fischer (DKFZ) +- David Cllunie (PixelMed) +- others welcome + + +# Project Description +I am working on a histology project with NCI, which is producing whole-slide images and deep-learning segmentation and analysis models. Our images are of a rare pediatric cancer called Rhabdomyosarcoma. + +Our group is in discussion with the IDC core team to import our images and models into IDC for others to use. This Project Week project is about the process of converting a whole slide image (WSI) into DICOM for import and learning how to run models on histology images already managed by IDC. + +It is our hope that this will prepare us for converting our analytic models and submitting them to IDC later, after Project Week has completed. + +## Objective + + + +1. Run the IDC-recommended DICOM conversion process for a pyramidal input image +1. Execute existing analysis models (already in IDC) in Collab sessions +1. Learn about analysis models can access different levels/tiles in a pyramidal DICOM file so we can modify our models to run within the IDC environment. + +## Approach and Plan + + + +1. Acquire an anonymized WSI image, suitable for testing during Project Week +1. Explore existing Histology collections in IDC. [See existing collections here](https://portal.imaging.datacommons.cancer.gov/explore/filters/?access=Public&Modality_op=OR&Modality=SM) +1. Learn how to convert and submit a WSI image into the IDC +1. Study how analysis models loaded in IDC access pyramidal DICOM files. Study existing models: [See Existing Model Examples](https://github.com/ImagingDataCommons/IDC-Examples/tree/master/notebooks/pathomics) + +## Progress and Next Steps + + + +1. Acquire a set of anonymized WSI images from our project. + +# Illustrations + + + +# Background and References + + + +[David Clunie's DICOM Import process](https://github.com/ImagingDataCommons/idc-wsi-conversion) + +[Google's WSI to DICOM converter](https://github.com/GoogleCloudPlatform/wsi-to-dicom-converter) diff --git a/PW38_2023_GranCanaria/Projects/IDC_DICOM_WSI_workflow/README.md b/PW38_2023_GranCanaria/Projects/IDC_DICOM_WSI_workflow/README.md new file mode 100644 index 000000000..1a5bdd176 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/IDC_DICOM_WSI_workflow/README.md @@ -0,0 +1,77 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# DICOM WSI: conversion into DICOM WSI, analysis workflows operating on WSI, using DICOM WSI from IDC + +## Key Investigators + +- Maximilian Fischer (German Cancer Research Center, Germany) +- Andrey Fedorov (Brigham and Women’s Hospital, USA) +- Marco Nolden (German Cancer Research Center, Germany) +- Philipp Schader (German Cancer Research Center, Germany) +- David Clunie (PixelMed Publishing, USA) +- Daniela Schacherer (Fraunhofer MEVIS, Germany) +- André Homeyer (Fraunhofer MEVIS, Germany) +- Curtis Lisle (KnowledgeVis, USA) +- Theodore Aptekarev (Freelancer) +- Davide Punzo (Freelancer) +- Igor Octaviano (Radical Imaging) + +**[Project channel on Discord: #dicom-wsi](https://discord.com/channels/843934857620357130/1069591021928853574)** + + +# Project Description + + +The Imaging Data Commons (IDC) portal is a cloud based repository of public cancer imaging data, and inclludes access to histopathological image data in the DICOM standard. In recent years increasingly more automated image analysis algorithms for pathology data emerged. However they are mainly developed for proprietary WSI vendor file formats and not for the standardized DICOM WSI file format as input. In this project a tumor classification algorithm shall be developed for DICOM WSI files as input file format. +The aim of this project is to develop an end to end Google Colab notebook to define a data cohort from the IDC database. The selected cohort is retrieved in the DICOM WSI standard and an examplary deep learning based image analysis algorithm is applied on the selected DICOM studies. As a use case task we select a classification algorithm, which is a common downstream task in computational pathology. The algorithm generates for the selected DICOM WSI files a tumor heatmap and provides the analysis result as DICOM parametric map, which can be visualized together with WSI image in the interoperable web-based slide microscopy viewer and annotation tool (SLIM), which is fully integrated in the IDC database. +A basis of this project provides the DICOM WSI support in the Kaapana platform, which also shall be improved in this project. The custom DICOM conversion pipeline for WSI files, which is integrated in the platform shall be improved and further applications with DICOM WSI data integrated in the platform. + + + +## Objective + + + +1. An analysis pipeline to visualize the analysis results for IDC digital pathology data +2. Deploy a deep learning based tumor classification algorithm via Google Colab +3. Develop further pathology applications in the Kaapana platform + +## Approach and Plan + + + +1. Define Query/Retrieve operations to define a data cohort from IDC portal +2. Test several image analysis algorithms for tumor classification +3. Store the result in a DICOM WSI file format +4. Integrate workflow in Google Colab + +## Progress and Next Steps + + +1. Had a project kick-off meeting to discuss the plan. + * Agreed for Max and Andrey will work to set up initial part of the colab notebook that searches and downloads WSI from IDC and extracts tiles, then this can be used both by Max and Curt for workflow development + * Agreed to use Colab notebook to set up conversion pipeline using David and google converter + * Established a notebook [here](https://colab.research.google.com/drive/1sbuGggwmbE-JgkO8LS5ndXIzLpf7yhKd?usp=sharing) to document/share recipes for using various conversion tools and validation of the resulting images. Andrey will continue developing this after the PW. +2. Curt and David tested three DICOM-WSI converters (PixelMed's converter, Google's wsi2dcm, and the wsidicomizer) using a pyramidal Aperio cancer image from NCI. All converters were able to run on the image, but produced varying results. PixelMed produced the most complete set of layers and DICOM tags with **wsidicomizer** second and Google's converter coming in third place. (See the high-res example inset below) +3. We made good progress adapting a pretrained tissue segmentation algorithm developed by NCI to run on a converted DICOM-WSI source image instead of the proprietary original format the model was designed for. (See below for the sample CMU-1 DICOM WSI and the segmentation output generated from the model.) This effort will contue after the project week. Some WSIs are compressed with JPEG2000, which can present decompression problems in some conversion packages. +4. Made progress developing a Colab notebook "recipe" for deploying Slim viewer on GCP via Firebase. This is not yet completed, but made great progress. Notebook is [here](https://colab.research.google.com/drive/1aNw08RV9nFVR39b1se065-5hZ1-kEKcQ?usp=sharing), Andrey will continue working on this following the PW. + +# Illustrations + +![pw38-dicom-wsi-conversion-and-model](https://user-images.githubusercontent.com/2152950/216508907-afbc2a5b-f510-4297-95a8-12208afd57b1.png) + + + +# Background and References + + + +- worfklow used by IDC to create DICOM WSI: [https://github.com/ImagingDataCommons/idc-wsi-conversion](https://github.com/ImagingDataCommons/idc-wsi-conversion) +- Colab notebooks with examples of using DICOM WSI in analysis workflows: [https://github.com/ImagingDataCommons/IDC-Examples/tree/master/notebooks/pathomics](https://github.com/ImagingDataCommons/IDC-Examples/tree/master/notebooks/pathomics) +- DICOM WSI converter from Google: [https://github.com/GoogleCloudPlatform/wsi-to-dicom-converter](https://github.com/GoogleCloudPlatform/wsi-to-dicom-converter) +- sample image [https://cytomine.com/collection/cmu-1/cmu-1-svs](https://cytomine.com/collection/cmu-1/cmu-1-svs) +- [Dropbox folder for storing related artifacts](https://www.dropbox.com/sh/2wkpn4iypxyvg7o/AACkI5F9f2yk42Jp9a2uat02a?dl=0) +- Max github repo: [https://github.com/maxfscher/DICOMwsiWorkflow](https://github.com/maxfscher/DICOMwsiWorkflow) +- Colab Notebook for the conversion process [https://colab.research.google.com/drive/1sbuGggwmbE-JgkO8LS5ndXIzLpf7yhKd?usp=sharing](https://colab.research.google.com/drive/1sbuGggwmbE-JgkO8LS5ndXIzLpf7yhKd?usp=sharing) +- Slim deployment instructions (WIP): [https://docs.google.com/document/d/1r6r8w4FZnzeQO47TDn9DTj78nTjDsxuy4jSULmrCTwM/edit](https://docs.google.com/document/d/1r6r8w4FZnzeQO47TDn9DTj78nTjDsxuy4jSULmrCTwM/edit) +- wsidicomizer repository: [https://github.com/imi-bigpicture/wsidicomizer](https://github.com/imi-bigpicture/wsidicomizer) diff --git a/PW38_2023_GranCanaria/Projects/IDC_Tutorial/README.md b/PW38_2023_GranCanaria/Projects/IDC_Tutorial/README.md new file mode 100644 index 000000000..1d7576b10 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/IDC_Tutorial/README.md @@ -0,0 +1,59 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# NCI Imaging Data Commons Tutorial / Workshop + +## Key Investigators + +- Andrey Fedorov (Brigham and Women’s Hospital, USA) +- Deepa Krishnaswamy (Brigham and Women’s Hospital, USA) +- Cosmin Ciausu (Brigham and Women’s Hospital, USA) +- Vamsi Thiriveedhi (Brigham and Women’s Hospital, USA) +- Dennis Bontempi (AIM Lab, Brigham and Women’s Hospital, USA) +- Leonard Nuerenberg (AIM Lab, Brigham and Women’s Hospital, USA) + +# Project Description + + + + +[NCI Imaging Data Commons](https://imaging.datacommons.cancer.gov/) is a cloud-based repository of cancer imaging data, which among other features provides free access to the DICOM files curated in Google Storage public buckets. + +This workshop will introduce IDC and some recent related developments. + +**Logistics**: Wednesday, Feb 1, 11am-1pm Las Palmas time. + +See agenda and notes for the workshop in [this document](https://docs.google.com/document/d/1HMmqVXSshEHf90Vu9LORaXVuoSpJzKxy7StrMBqfSdQ/edit?usp=sharing). + +## Objective + + + +1. Present IDC and recent developments. +2. Collect feedback from the community. +3. Dentify new applications and collaboration opportunities. + +## Approach and Plan + + + +1. + +## Progress and Next Steps + + + +1. IDC tutorial took place, slides presented are linked from the [agenda document](https://docs.google.com/document/d/1HMmqVXSshEHf90Vu9LORaXVuoSpJzKxy7StrMBqfSdQ/edit?usp=sharing). +2. Discussed IDC with interested attendees. + +# Illustrations + + + +# Background and References + + +* [PW38 IDC tutorial agenda and slides](https://docs.google.com/document/d/1HMmqVXSshEHf90Vu9LORaXVuoSpJzKxy7StrMBqfSdQ/edit?usp=sharing). +* [Google Blog post about IDC](https://cloud.google.com/blog/topics/developers-practitioners/advancing-cancer-research-public-imaging-datasets-national-cancer-institute-imaging-data-commons) (released Feb 3, 2023) diff --git a/PW38_2023_GranCanaria/Projects/IDC_with_VolView/README.md b/PW38_2023_GranCanaria/Projects/IDC_with_VolView/README.md new file mode 100644 index 000000000..e02eecd1b --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/IDC_with_VolView/README.md @@ -0,0 +1,79 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Using VolView with data in Google Storage buckets / IDC buckets + +## Key Investigators + +- Andrey Fedorov (Brigham and Women’s Hospital, USA) +- Forrest Li (Kitware, USA) +- Stephen Aylward (Kitware, USA) + +# Project Description + + +[VolView](https://volview.kitware.com/) is an open source radiological viewer developed by Kitware that excels in 3D visualization. Currently, this viewer can be used to visualize files uploaded into the browser. + +[NCI Imaging Data Commons](https://imaging.datacommons.cancer.gov/) is a cloud-based repository of cancer imaging data, which among other features provides free access to the DICOM files curated in Google Storage public buckets. + +While IDC integrates OHIF and Slim viewers, the instances of the viewers maintained by IDC can only be used to visualize data in IDC. Users that want to visualize analysis results they produce in OHIF or Slim need to deploy their own instances of the viewers. That process is documented, but involves many steps and can be too difficult for many users. Furthermore, OHIF Viewer v2 used in IDC does not have any functionality to support 3D visualization. + +In this project we want to investigate the use of existing VolView instance maintained by Kitware at [https://volview.kitware.com/](https://volview.kitware.com/) to visualize data in public Google Storage buckets. This will allow: +1. 3D visualization of public data in IDC. +2. Not only "zero footprint", but more importantly "zero deployment" and "zero maintenance" viewer that can be used by IDC users to visualize analysis results. + +## Objective + + + +1. Investigate ability of VolView to pull data from Google Storage public buckets - in IDC and/or from user projects. +2. Investigate representation of the manifest to be used to define VolView input. + +## Approach and Plan + + + +1. Experiment with CORS settings on a public "test" bucket. +2. Define minimum necessary settings for CORS configuration. +3. Coordinate with GCP support and other GCP experts as needed. + +## Progress and Next Steps + + + +1. Defined steps for setting up GCS bucket to be accessible by VolView, defined the format of the manifest: + * install GCP SDK (or use Colab), initialize GCP, create project, set up billing (billing required for creating buckets!) - can use [this IDC tutorial](https://github.com/ImagingDataCommons/IDC-Examples/blob/master/notebooks/getting_started/part1_prerequisites.ipynb) + * Create a public GCS storage bucket + * Set up CORS configuration using these settings: + ``` + [ + { + "origin": ["https://volview.netlify.app/"], + "method": ["*"], + "responseHeader": ["Content-Type"], + "maxAgeSeconds": 3600 + } + ] + ``` + * update CORS configuration for the bucket using this command: + ```$ gcloud storage buckets update gs:// --cors-file=./idc-volview-pilot-cors.json``` + * you may need to wait for several hours for the CORS configuration to propagate + * create JSON manifest that refers to the files corresponding to the specific study/series in your bucket that you want to visualize in VolView using this format, and put the manifest in the bucket alongside the files: + ``` + {"resources":[{"url":"gs:///lymph/000cdc4d-7700-46be-82ec-7eff30eacd63.dcm"},{"url":"gs:///lymph/00187779-6957-48c5-ad00-8aeaa8f34642.dcm"},{"url":"gs:///lymph/0024f696-98a8-4251-8702-c9bb690e281a.dcm"},{"url":"gs:////lymph/00c78534-a63f-46cd-abbc-32003a58d3ec.dcm"},{"url":"gs:////lymph/01e2fad8-386b-4e04-8a74-9f52c9af9919.dcm"} + [...] + ``` + * when CORS config is propagated, you should be able to open the images in Kitware hosted VolView instance using this URL format: `https://volview.netlify.app/?urls=https://storage.googleapis.com//.json` + 2. Reached out to Google Public Datasets Program support asking if CORS can be configured for the public IDC buckets to allow GET from VolView, waiting for the response. + +# Illustrations + + + +![CT series in GCS bucket loaded in VolView via manifest in GCS bucket](https://github.com/NA-MIC/ProjectWeek/raw/master/PW38_2023_GranCanaria/Projects/IDC_with_VolView/gcs-bucket-volview.gif) Link to try out: [https://volview.netlify.app/?urls=https://storage.googleapis.com/idc-volview-pilot/idc-test.json](https://volview.netlify.app/?urls=https://storage.googleapis.com/idc-volview-pilot/idc-test.json) + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/IDC_with_VolView/gcs-bucket-volview.gif b/PW38_2023_GranCanaria/Projects/IDC_with_VolView/gcs-bucket-volview.gif new file mode 100644 index 000000000..e045fecbb Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/IDC_with_VolView/gcs-bucket-volview.gif differ diff --git a/PW38_2023_GranCanaria/Projects/IbisInSlicer/README.md b/PW38_2023_GranCanaria/Projects/IbisInSlicer/README.md new file mode 100644 index 000000000..420d0dfca --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/IbisInSlicer/README.md @@ -0,0 +1,32 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Ibis in Slicer + +## Key Investigators +- Étienne Léger (Montréal Neurological Institute and Hopital, Canada) +- Houssem Eddine Gueziri (Montréal Neurological Institute and Hopital, Canada) +- Simon Drouin (École de technologie supérieur, Montréal, Canada) + +# Project Description + +Continuing the trend set in the [GPU Rigid Registration project](https://github.com/NA-MIC/ProjectWeek/blob/master/PW35_2021_Virtual/Projects/GPURigidRegistration/README.md), the purpose of this project is to port functionalities from the [Ibis Neuronav](http://ibisneuronav.org/) platform to 3D Slicer to increase compatibility between the two systems. During this week, we will focus on the HardwareModule of Ibis, which handles reading hardware set configuration files and creating scene objects and OpenIGTLink connectors accordingly. + +## Objective + +1. Final Objective. Be able to read Ibis configuration files from Slicer to produce an equivalent scene. + +## Approach and Plan + +1. Assess which classes need to be ported. +2. Port/wrap/reimplement necessary components. +3. Test + +## Progress +1. Created repository, Ibis extension (to also harbor more eventual ports (next Project Week!)) and loadable IbisHarwareModule +2. Defined approach +3. Started implementation + +# Background and References + +- http://ibisneuronav.org +- https://github.com/IbisNeuronav/Ibis diff --git a/PW38_2023_GranCanaria/Projects/KaapanaClinicalData/README.md b/PW38_2023_GranCanaria/Projects/KaapanaClinicalData/README.md new file mode 100644 index 000000000..cce4e11c1 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/KaapanaClinicalData/README.md @@ -0,0 +1,66 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Integration of clinical data into medical imaging pipelines + +## Key Investigators + +- Philipp Schader (German Cancer Research Center, Germany) +- Andrey Fedorov (BWH) + +Project channel in Discord [#kaapana-clinical-data-integration](https://discord.com/channels/843934857620357130/1069601002174566420) + +# Project Description + +Clinical data like age, blood type, diagnosis and other non-imaging biomarkers are highly relevant in medical image processing as they provide context for the analysis of imaging datasets. Incorporating this additional data layer into image processing tools facilitate the development of complex biomarkers. While platforms for medical image processing like Kaapana focus on the imaging layer they often lack for features to relate clinical data to it. + +This projects aims to integrate clinical data better into the Kaapana medical image processing platform by using the FHIR standard. To facilitate this a FHIR server will be integrated in the platform and linked to the imaging data stored in the internal PACS. Additionally the workflow component will be extended to be able to store and retrieve FHIR objects from the internal server. This forms the basis to create import procedures allowing the import of clinical data from tabular data into the internal FHIR store. By extending the preexisting radiomics workflow of the platform to store its results in the FHIR server a first imaging biomarker is made available. In a last step a joint analysis using the Jupyter Lab service of the platform joining the clinical data with the imaging biomarkers from the radiomics analysis is performed. + +img-clinical-data-kaapana + +## Objective + +1. Integration of a data store for clinical data in the Kaapana Platform +2. Integration of clinical data retrieval and storage from processing pipelines (Try to store analysis results like radiomics reports in FHIR) +3. Establish import procedures for clinical data (e.g. for data formats available via TCIA or IDC - up for discussion) +4. Integration of the clinical data store into the analysis frameworks of the platform and example of a joint analysis + +## Approach and Plan + + + +1. Review clinical data available in IDC, start with the [clinical data IDC tutorial](https://github.com/ImagingDataCommons/IDC-Examples/blob/master/notebooks/clinical_data_intro.ipynb) +1. Find a small collection of datasets including images, segmentations of pathologies and according clinical data +2. Decide on an open-source FHIR server (like HAPI FHIR) integrate it into Kaapana (and link it to the PACS if possible) +4. Creation of query / retrieve operators for FHIR objects within the workflow components +5. Create import workflow to import different clinical data fromats from the object store into the FHIR server and discuss which data formats to support (csv, Excel, RedCap, odm) +6. Represent the results of an workflow (maybe by extending the preixisitng radiomics workflow - feedback welcome) as FHIR objects and store them in the FHIR Server of the platform +7. Perform an example analysis using the workflow results and enriching them using other clinical data + +## Progress and Next Steps + + + +1. Explored TCIA clinical data collection via IDC and selected [NSCLC-Radiomics]([url](https://wiki.cancerimagingarchive.net/display/Public/NSCLC-Radiomics)) since it contains a csv as well as segmentations to perforem radiomics. +2. Created a Kaapana Extension for the HAPI FHIR JPA server to store and retreive the resources created in the next steps. +3. Draft a mapping from the clinical data from NSCLC-Radiomicss into FHIR Resources. +5. Created Jupyther Notebook to create the the actual resources for imaging and clinical data and import them into the platforms FHIR Server. +6. WiP: Translate the Jupyther Notebook into Workflow Operators to automate the import +7. WiP: Wrap Radiomics Features +8. WiP: Performe Joint Analysis + +# Illustrations + + +image + + +# Background and References + + +- [Kaapana](https://github.com/kaapana/kaapana) the imaging platform to use +- [HAPI FHIR](https://hapifhir.io/) a potential open source FHIR server +- [CCE_DART](https://cce-dart.com/) a project using Kaapana to discover complex biomarkers +- Clinical data in IDC - start with [this tutorial](https://github.com/ImagingDataCommons/IDC-Examples/blob/master/notebooks/clinical_data_intro.ipynb) diff --git a/PW38_2023_GranCanaria/Projects/KaapanaConnectingKaapanaToGoogleCloudAndHealthAndFHIR/README.md b/PW38_2023_GranCanaria/Projects/KaapanaConnectingKaapanaToGoogleCloudAndHealthAndFHIR/README.md new file mode 100644 index 000000000..8928d420d --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/KaapanaConnectingKaapanaToGoogleCloudAndHealthAndFHIR/README.md @@ -0,0 +1,57 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Connecting/Using Kaapana to Google Cloud/Google Health/Google FHIR + +Kaapana tutorial for the 38th NA-MIC project week: https://drive.google.com/file/d/1A7-8Ru0uTJHFFa17rZtkBpvNhJao_F7x/view?usp=share_link + +## Key Investigators + +- Jonas Scherer (German Cancer Research Center, Germany) +- Andrey Fedorov (Brigham and Women's Hospital, USA) +- Klaus Kades (German Cancer Research Center, Germany) + +# Project Description + +Working on a native integration of Kaapana and Google Cloud services. + +## Objective + + + +1. Running Kaapana on Google Kubernetes Engine +2. Substituting dcm4che with a Google Healthcare DICOM store + +Relate to: +- [Kaapana overall](https://github.com/NA-MIC/ProjectWeek/tree/master/PW38_2023_GranCanaria/Projects/Kaapana_overall) + +## Approach and Plan + + + +1. Set up GCP project for experiments. +2. Evaluate the effort needed to adapt Kaapana to GKE. +3. Start working on adaptation. + +## Progress and Next Steps + + + +1. GCP project set up. +2. Jonas started work on this (remotely!) +3. Unfortunately, other meetings of Jonas prevented Andrey and Jonas to meet to discuss this. +4. But: updates from the project channel from Jonas: "I think I have what I need right now 🙂 I made some progress yesterday and have a connected cluster running in GCP. Now I need to make a couple adjustments regarding the storage claims within our project (that was expected). I'll write you as soon as I have a first version to try on GCP" and "I still have to change ~ 80% of our storage handling (we should do this anyway)". +5. Andrey and Klaus discussed the strategy for integration with GCP Healthcare (see notes in the umbrella project) and identified initial steps. + +# Illustrations + + + +tbd + +# Background and References + +tbd + diff --git a/PW38_2023_GranCanaria/Projects/KaapanaDataAndModelExchangeAcrossDifferentSources/README.md b/PW38_2023_GranCanaria/Projects/KaapanaDataAndModelExchangeAcrossDifferentSources/README.md new file mode 100644 index 000000000..e4350071c --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/KaapanaDataAndModelExchangeAcrossDifferentSources/README.md @@ -0,0 +1,107 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Data and model exchange across different sources + +Kaapana tutorial for the 38th NA-MIC project week: + +https://drive.google.com/file/d/1A7-8Ru0uTJHFFa17rZtkBpvNhJao_F7x/view?usp=share_link + +## Key Investigators + +- Benjamin Hamm (German Cancer Research Center, Germany) +- Ünal Akünal (German Cancer Research Center, Germany) +- Markus Bujotzek (German Cancer Research Center, Germany) +- Klaus Kades (German Cancer Research Center, Germany) +- Andrey Fedorov (Brigham and Women's Hospital, USA) + +# Project Description + +Implementations and discussion about a standardized data and model exchange between different platforms such as Kaapana and MONAI. Working on integrating Kaapana with other toolkits. +- Motivation: Running Kaapana platforms in multiple (inter-)national projects: RACOON, DART, ... +- Goal: Standarized and Federated Data Analysis / Federated Learning require standardized model exchange formats + +![image](https://user-images.githubusercontent.com/103252889/215480450-23dfe16c-fd20-473a-a185-9e0262a275c0.png) + + +## Objective + + + +Support standardized data and AI model I/O interfaces in Kaapana. + +1. Support of various AI model sources +- Integration of MONAI Model Zoo into Kaapana + - inference pipeline as a Kaapana workflow / as a Kaapana extension + - training pipeline + - generic support of MONAI Bundles (MONAI Label / MONAI Deploy / MONAI FL) +- Standardized remote model execution, execution of models from modelhub.ai within Kaapana +2. Integration/support of data sources: +- TCIA download/(upload) into Kaapana +- Integration with IDC: download of data via Google Cloud SDK +3. Integration of new analysis tools into Kaapana +4. Javascript/Python library client to communicate with Kaapana + +Relate to: +- [Kaapana overall](https://github.com/NA-MIC/ProjectWeek/tree/master/PW38_2023_GranCanaria/Projects/Kaapana_overall) + +## Approach and Plan + +1. Support of various AI model sources +- Integration of MONAI Model Zoo into Kaapana + - inference pipeline as a Kaapana workflow / as a Kaapana extension + - training pipeline + - generic support of MONAI Bundles (MONAI Label / MONAI Deploy / MONAI FL) +- Standardized remote model execution, execution of models from modelhub.ai within Kaapana +- Current progress: +![image](https://user-images.githubusercontent.com/103252889/215465416-394f3a57-176b-469b-a6ce-505bd359908b.png) + +2. Integration/support of data sources: +- TCIA download/(upload) into Kaapana + - Kaapana workflow to download specific TCIA datasets + - select to-be-downloaded dataset via UI + - send downloaded dataset to Kaapana's PACS + +## Progress and Next Steps + +1. Support of various AI model sources +- Integration of MONAI Model Zoo into Kaapana + - Proof of concept: Intgration of MONAI Model Zoos spleen CT segmentation works + - tbd: Finalize integration in Kaapana + - tbd: Add more monai bundles +- Support of MHub + - Completed the implementation of a workflow in Kaapana for modelhub.ai + - Supports each model already available in mhub + - A wrapper around the dockerfile of models in mhub + - Ability to visualize the segmentations using Slicer, MITK or OHIF on a web browser + +2. Integration/support of data sources: +- TCIA download/(upload) into Kaapana + - Implemented `service-tcia-download`. Now it is possible to drag and drop a .tcia manifest file into Kaapana (in minio). This will start a workflow which downloads the data from TCIA via their REST-API. Number of workers can be set in the operator. + + +# Illustrations + +Screen Shot 2023-02-03 at 13 39 36 +Screen Shot 2023-02-03 at 13 41 09 + +![Screen Shot 2023-02-03 at 14 13 08](https://user-images.githubusercontent.com/16197349/216636372-16924281-e1ab-436d-a656-c38cedd84eaa.jpg) + + +![mitk_p](https://user-images.githubusercontent.com/16197349/216622165-10c09abc-63fa-4703-9f67-63345a810c56.PNG) +![mitk_ts](https://user-images.githubusercontent.com/16197349/216622179-15b9d365-8e58-4b48-9966-db93dde5294b.PNG) + + + +tbd + +# Background and References + +- https://www.kaapana.ai/ +- http://app.modelhub.ai/ +- https://www.cancerimagingarchive.net/ +- https://monai.io/ + + diff --git a/PW38_2023_GranCanaria/Projects/KaapanaFastViewingAndTaggingOfDICOMImages/README.md b/PW38_2023_GranCanaria/Projects/KaapanaFastViewingAndTaggingOfDICOMImages/README.md new file mode 100644 index 000000000..3a1e875fd --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/KaapanaFastViewingAndTaggingOfDICOMImages/README.md @@ -0,0 +1,58 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Fast viewing and tagging of DICOM Images + +Kaapana tutorial for the 38th NA-MIC project week: +https://drive.google.com/file/d/1A7-8Ru0uTJHFFa17rZtkBpvNhJao_F7x/view?usp=share_link + +## Key Investigators + +- Stefan Denner (German Cancer Research Center, Germany) +- Klaus Kades (German Cancer Research Center, Germany) +- Andrey Fedorov (Brigham and Women's Hospital, USA) +- Davide Punzo (Freelancer, France) +- Alireza Sedghi (Radical Imaging) + +# Project Description + +Creating a cohort or tagging DICOM images is a very time-consuming and error-prone procedure. +In this project, we are extending the functionality of the so-called Gallery View within Kaapana. + +![](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW38__KaapanaFastViewingAndTaggingOfDICOMImages__NA-MIC.gif) + +## Approach and Plan + +The Gallery View is part of the kaapana open source toolkit and is based on open source tools such as +OpenSearch, dcm4chee, dcmjs and Cornerstone.js. +So far, the Gallery View is a proof of concept for fast viewing and tagging of DICOM Images. + +**We are actively looking for new use cases, in which the functionality of the Gallery View can be either applied or extended.** + + +Some use-cases we have in mind: +- Proper visualization of thumbnails for segmentation data +- More efficient/faster loading of DICOM Series in the detail view (on the right) +- Adding metadata to the DICOM Image viewer (similar to OHIF Viewer) +- Download of tags as CSV files + +If you have any ideas in mind, please feel free to contact us (-> stefan.denner@dkfz-heidelberg.de). + +## Progress and Next Steps + +- Upgraded Cornerstone to Cornerstone3D with the great help of Alireza and Davide +- Proof of Concept for creating segmentation thumbnails in the Gallery View +- Proof of Concept for multi select of items +- Proof of concept virtual scrolling to improve performance + +TODO: +- Cleanup and integrate into kaapana code base +- Add segmentation support to Cornerstone3D viewer +- Store segmentation thumbnails to S3 bucket to load them from there. + +# Illustrations + +tbd + +# Background and References + +https://github.com/kaapana/kaapana diff --git a/PW38_2023_GranCanaria/Projects/KaapanaIntegrationOfDesktopApps/README.md b/PW38_2023_GranCanaria/Projects/KaapanaIntegrationOfDesktopApps/README.md new file mode 100644 index 000000000..7c3b52eab --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/KaapanaIntegrationOfDesktopApps/README.md @@ -0,0 +1,64 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Integration of desktop apps + +Kaapana tutorial for the 38th NA-MIC project week: +https://drive.google.com/file/d/1A7-8Ru0uTJHFFa17rZtkBpvNhJao_F7x/view?usp=share_link + +## Key Investigators + +- Hanno Gao (German Cancer Research Center, Germany) +- Klaus Kades (German Cancer Research Center, Germany) +- Andrey Fedorov (Brigham and Women's Hospital, USA) +- Ralf Floca (German Cancer Research Center, Germany) + +# Project Description + +It could be useful for Desktop applications such as 3D slicer or MITK to run within a browser, for this a containerization of the application is necessary. Also it could be useful for Desktop applications to communicate with third-party endpoints to, for examples, run a model on images to get a segmentation. In this project, we focus on solution to containerize desktop applications and on communicating with third-party tools + +## Objective + + + +1. Desktop apps in containers (Slicer, MITK, OHIF, …), Improved Slicer integration +2. Desktop interaction with Kaapana (Slicer, MITK, ITK-SNAP …) + +Relate to: +- [Kaapana overall](https://github.com/NA-MIC/ProjectWeek/tree/master/PW38_2023_GranCanaria/Projects/Kaapana_overall) +- [SlicerCloud](https://github.com/NA-MIC/ProjectWeek/tree/master/PW38_2023_GranCanaria/Projects/SlicerCloud) +- [OHIFSlicerBridge](https://github.com/NA-MIC/ProjectWeek/blob/master/PW33_2020_GranCanaria/Projects/OHIFSlicerBridge/README.md) + + +## Approach and Plan + + + + +1. Look at the current state of Slicer and MITK integration in Kaapana (container and client/server interaction) +![image](https://user-images.githubusercontent.com/49161877/215472354-28e8e2bd-60c4-4bc5-9b20-69c98de61a80.png) +2. Finish/adapt integration. +3. Improve desktop (running in a browser) streaming solutions - (noVNC, guacamole...) in the Kaapana kubernetes cluster. +4. Create documented API for an interaction Kaapana with destop clients + +## Progress and Next Steps + + + +1. Integrated Slicer extension +1. Startet workflow "Slicer-flow" +1. Created uniform Kaapana RestApi for dicomweb + +# Illustrations + + + +# Background and References + + + +* [branch](https://github.com/fedorov/kaapana/tree/develop-slicer) that attempts to integrate Slicer into Kaapana +* [MITK TaskList](https://phabricator.mitk.org/T29160) +* [Example data to working with MITK Tasklist](https://drive.google.com/drive/folders/18HZWEENZaKYA6F4nv4ZFgdPOGuO1KWwv?usp=sharing) diff --git a/PW38_2023_GranCanaria/Projects/Kaapana_overall/README.md b/PW38_2023_GranCanaria/Projects/Kaapana_overall/README.md new file mode 100644 index 000000000..691e044cd --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/Kaapana_overall/README.md @@ -0,0 +1,77 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Kaapana related experiments/discussions/collaboratons + +## Key Investigators + +- Andrey Fedorov (Brigham and Women’s Hospital, USA) +- Nadya Shusharina (Brigham and Women's Hospital, USA) (online) +- Marco Nolden (German Cancer Research Center, Germany) +- Hans Meine (Fraunhofer MEVIS, Germany) +- Klaus Kades (German Cancer Research Center, Germany) + +**[Project channel on Discord: #kaapana-idc-integration](https://discord.com/channels/843934857620357130/1069559070295588954)** + +# Project Description + +[Kaapana](https://kaapana.readthedocs.io/en/stable/intro_kaapana.html#what-is-kaapana) is a Kubernetes-based open source toolkit for platform provisioning in the field of medical data analysis. Kapana is leveraging a number of open source tools that are relevant for the NA-MIC community (specifically, OHIF Viewer, MITK, nnU-Net segmentation tools) and relies on DICOM for managing images, image-derived data and metadata. + +In this project current, perspective and aspiring users of Kaapana will have the opportunity to work with the developers of the platform to get help with deploying and using the platform, and to discuss potential problems or directions for future development and collaboration. + +## Objective + + + +1. Deploy latest version of the platform locally and on GCP. +1. Discuss specific topics of interest. +1. Document results of discussion, share any code developed in the process. + +## Approach and Plan + + + +1. Deploy Kaapana on Andrey's linux laptop. +1. Deploy Kaapana on a GCP VM. +1. Establish shared GCP project for collaboration. +1. Discuss specific topics of interest as summarized below, document the main points of the discussion (in the below, "I" refers to Andrey Fedorov). + + +**Improved Slicer integration** : we already have Slicer app added to Kaapana following the example of MITK (see https://github.com/fedorov/kaapana/tree/0.1.2-november-slicer). However, communication to / out of the app is quite clunky. Specifically, we have not figured out how to be able to select cases from the dashboard and open those directly in Slicer. Also, we would like to have a workflow that writes DICOM segmentations etc back into the DICOM server. Related to [Integration of Desktop Apps](../KaapanaIntegrationOfDesktopApps/README.md). + * approach to pass the list of UIDs to mpReview: create a workflow that will save a file on minio with the list of UIDs that are selected in metadata dashboard + * how to start: create a cohort in the metadata dashboard, then in Experiments select the DAG (to be created) that would write the manifest to minio; to create a DAG start with the `collect-metadata` DAG - 1) read `cohort_identifiers` (this is the conf object that is accessed from `start` function of `LocalWorkflowCleanerOperator` 2) write manifest to minio - this is as in `dag_collect_metadata.py` workflow. Probably will be better to combine manifest export with launching Slicer with the cohort opened. + +**Integration with GCP Healthcare DICOM stores** : right now we use dcm4chee as the DICOM server. This is problematic while deploying kaapana on the cloud, since 1) it is huge waste of resources: we already have our data in storage buckets, we need to replicate those files on attached disk (and attached storage is very expensive), then import into dcm4chee (which is very very very slow, and does not work for all types of DICOM objects - SRs are rejected); 2) I am not sure it is scalable to use dcm4chee. We can very easily set up a DICOM store under GCP Healthcare, which is cheaper, faster, is highly scalable, and can be accessed using standard DICOMweb interface with authentication. It would be extremely helpful to be able to use that GCP DICOM Store in place of dcm4chee. Related to [Connecting/Using Kaapana to Google Cloud/Google Health/Google FHIR](../KaapanaConnectingKaapanaToGoogleCloudAndHealthAndFHIR/README.md). + * related [https://cloud.google.com/healthcare-api/docs/how-tos/dicomweb#healthcare-store-transaction-python](https://cloud.google.com/healthcare-api/docs/how-tos/dicomweb#healthcare-store-transaction-python) + * break the task by creating a custom workflow that is intialized with the variables 1) series instanceUIDs to be processed; 2) DICOMweb endpoint; 3) credentials. Start with any workflow that interacts with dcm4chee. + +**Integration with IDC** : All of IDC data is available from public GCP buckets, egress is free. All you need is to have Google Cloud SDK https://cloud.google.com/sdk installed, and to do searching, one needs to have a GCP project and credentials. Maybe we can discuss this. Related to [Data and model exchange across different sources](../KaapanaDataAndModelExchangeAcrossDifferentSources/README.md). + +**Integration of new analysis tools into Kaapana** : we have been developing use cases that utilize publicly available AI tools, starting from DICOM images and producing DICOM output, see some here: https://app.modelhub.ai/. It would be good to go over the process of adding one of those to kaapana as an experiment, so I can understand the process. We could also use prostate cancer segmentation model from MONAI zoo that we are going to investigate in this project: https://github.com/NA-MIC/ProjectWeek/pull/486/files#diff-1b4e320dd5db1df87192959dee521ff75d94129c1b97ede523d6b740271191b7R3. Related to [Data and model exchange across different sources](../KaapanaDataAndModelExchangeAcrossDifferentSources/README.md). Relatred questions: + * how to debug failures? e.g., see [this as an example](https://kaapana.slack.com/archives/C018MPL9404/p1674230282696369?thread_ts=1674181916.424089&cid=C018MPL9404) + +**Running Kaapana on Google Kubernetes Engine** : while using GCP, we've been following an extremely naive and inefficient approach for deploying Kaapana. We allocate a fixed linux VM, and install it as if we are on a on-prem server. As I understand it, to fully leverage the power of k8s, it would make a lot more sense to use Google Kubernetes engine. My knowledge of k8s and microk8s is very close to 0, so maybe this is something that is highly trivial. Maybe we could experiment with this together. We can even set up a shared GCP projects where I can add you, so you can experiment directly. Related to [Connecting/Using Kaapana to Google Cloud/Google Health/Google FHIR](../KaapanaConnectingKaapanaToGoogleCloudAndHealthAndFHIR/README.md). + * Jonas is working on removing the use of `hostpath` + +**Maintenance of Kaapana instance** : discuss the process of checking for security vulnerabilities, updating the developers of identified vulnerabilities, communicating the need to update to the users, look if scanning features available in GCP could be helpful. + +## Progress and Next Steps + + + +1. Set up latest kaapana `develop` instance on a linux laptop that was then used for development. +2. GCP GKE was used by Jonas to investigate deployment on GKE. +3. Debugged, finalized and tested 3D Slicer extension, integrated into Kaapana in this PR: [https://github.com/kaapana/kaapana/pull/14](https://github.com/kaapana/kaapana/pull/14) +4. Discussed specific steps towards addressing the topics of interest discussed above, notes added in the above. +5. Work on the topics above to continue after the PW! + +# Illustrations + + + +# Background and References + +* [Kaapana docs](https://kaapana.readthedocs.io/en/stable/intro_kaapana.html#what-is-kaapana) + diff --git a/PW38_2023_GranCanaria/Projects/LongCOVID/README.md b/PW38_2023_GranCanaria/Projects/LongCOVID/README.md new file mode 100644 index 000000000..94187d1ad --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/LongCOVID/README.md @@ -0,0 +1,67 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Long-COVID: Study Design and Delineation of Fiber Tracts of the Brainstem + +## Key Investigators + +- Zora Kikinis (Harvard Medical School, USA) +- Nikos Makris (Harvard Medical School, USA) +- Greg Gasic +- Fan Zhang (Harvard Medical School, USA) + + +# Project Description + +Symptoms of long-COVID19 might linger for several weeks or months after the initial infection by the SARS-CoV-2 virus. Symptoms are headache, problems with memory and concentration, anxiety, extreme fatigue, chest pain or tightness, gastrointestinal symptoms and difficulty sleeping. In addition, abnormal immune response to viral infection is reported in patients with long-COVID. A dysfunctional brainstem might explain the neuropsychiatric, and autonomic symptoms as a consequence of abnormal immune response. + +## Objective + + + +1. Are long-COVID symptoms associated with changes in white matter fiber tracts of the brainstem? . +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Segment ROIs in the brainstem. +2. Delineate white matter tracts of the brainstem in a postmortem dMRI data set (50 micro m resolution) and in a 3T dataset of a patient with long-COVID. Candidate tracts are Medial Forebrain Bundle (MFB) and Dorsal Longitudinal Fiber (DLF) and other fiber tracts of the brainstem. +3. Brainstorm hypotheses about which symptoms of long-COVID might be associated with individual tracts of the brainstem. + +## Progress and Next Steps + + + +1. Segmentation of the Nucleus Tractus Solitarius (solitary complex)–Area Postrema (NTS-AP). +NTS-AP plays an impotant role in neuro-immune regulatory loops and autonomic reflexes. + +2. Delineated the Medial Forebrain Bundle (MFB) and Dorsal Longitudinal Fiber (DLF) within the brainstem in the postmortem dMRI data set. +3. Run whole brain tractography on the 3T data set. +4. Registering the 3T data set to the postmortem dMRI data set. + +# Illustrations + +Screenshot 2023-02-02 at 11 18 07 AM + +Screenshot 2023-02-02 at 11 18 57 AM + + +# Background and References + +3D Exploration of the Brainstem in 50-Micron Resolution MRI +Richard Jarrett Rushmore, Peter Wilson-Braun, George Papadimitriou, Isaac Ng, Yogesh Rathi, Fan Zhang , Lauren Jean O'Donnell, Marek Kubicki, Sylvain Bouix, Edward Yeterian , Jean-Jacques Lemaire, Evan Calabrese, G Allan Johnson, Ron Kikini, Nikos Makris +Frontiers in Neuroanatomy, 2020 + +https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7538715/ + + +Larger gray matter volumes in neuropsychiatric long-COVID syndrome +Bianca Besteher, Marlene Machnik, Marie Troll, Antonia Toepffer, Ani Zerekidze, Tonia Rocktäschel, Carina Heller, Zora Kikinis, Stefan Brodoehl, Kathrin Finke, Philipp A. Reuken, Nils Opel, Andreas Stallmach, Christian Gaser and Martin Walter +Psychiatry Res. 2022 + +https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9444315/pdf/main.pdf diff --git a/PW38_2023_GranCanaria/Projects/LungSegmentation/README.md b/PW38_2023_GranCanaria/Projects/LungSegmentation/README.md new file mode 100644 index 000000000..cf5b47ad7 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/LungSegmentation/README.md @@ -0,0 +1,58 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# 3D Slicer Lung CT Segmentation + +## Key Investigators + +* Rudolf Bumm (KSGR) +* Ron Kikinis (Brigham and Women's Hospital) +* Raúl San José Estépar (Brigham and Women's Hospital) +* Steve Pieper (Isomics) +* Eserval Rocha jr. (University of Sao Paulo Medical School) +* Andras Lasso (Perk Labs) +* Curtis Lisle (KnowledgeVis) + +# Project Description + +This is a follow-up to previous 3D Slicer lung CT segmentation PW projects.  + +## Objective + +Our objective is to improve the lung CT segmentation and analysis processes in 3D Slicer. + +* Improve **vessel** segmentation +* Implement **emphysema** and COPD analysis +* Develop a concept for **lung segment** segmentation in 3D Slicer +* Fine-tune the workflow for **surgical planning** +* Work on possible **grant application** + +## Specific Approach and Plan + +* make lung, lobe, and airway segmentation fully automatic (no manual intervention) +* write a batch script that makes use of the LCTA logic +* test the script on the OpenSourceCovidDatabase +* evaluate results and compare them to radiology score  +* discuss strategies for vessel segmentation and segment detection + +## Progress and Next Steps + +During the project week, we were able to apply SlicerLiver (with fantastic help of the liver team) to a working demo lung segmentation dataset, demonstrating that segment-oriented lung resection can be simulated in SlicerLiver using VMTK and dedicated software functions. +Furthermore, we talked with Jakob Wasserthal, the creator of TotalSegmentator, about how to incorporate pulmonary artery and vein segmentation into his deep-learning tool. Raoul San Jose Estbar agreed to provide the TotalSegmentator training dataset with ground truth data. We elaborated the skeleton of a grant application for vessel-based lung segment segmentation. + +We ran LCTA over the complete dataset of the OpenSourceCovidDataset with great results and a good correlation between radiology expert and machine.  + +![](https://user-images.githubusercontent.com/18140094/216458521-1df25eb4-63b2-4946-8b67-6881f8050024.png) + +![](https://user-images.githubusercontent.com/18140094/216458649-a7862df4-4c2a-4518-a1f8-c1e0b441be9c.png) + +# Illustrations + +![](https://user-images.githubusercontent.com/18140094/216455289-bbf2d613-57f4-423f-8e17-0263a5cda126.png) + +![](https://user-images.githubusercontent.com/18140094/216455423-5c2990be-b31d-4691-9bf9-1c3540366e4c.png) + +# Background and References + +[Lung CT Analyzer extension](https://github.com/rbumm/SlicerLungCTAnalyzer) + +[Open Source COVID Database](https://www.mdpi.com/2306-5354/8/2/26) diff --git a/PW38_2023_GranCanaria/Projects/MHub_Integration/README.md b/PW38_2023_GranCanaria/Projects/MHub_Integration/README.md new file mode 100644 index 000000000..643987569 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/MHub_Integration/README.md @@ -0,0 +1,59 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# MHub Integration + +## Key Investigators + +- Leonard Nürnberg (Department of Radiology, Brigham and Women’s Hospital, Boston, MA) +- Dennis Bontempi (Department of Radiology, Brigham and Women’s Hospital, Boston, MA) +- Andrey Fedorov (Department of Radiology, Brigham and Women’s Hospital, Boston, MA) + +# Project Description + +We are working on a repository to standardize Deep Learning models in medical imaging and make them easily accessible to everyone. +A central point of our efforts is to develop a standardized I/O framework for all models to unify the data stream into and out of these models, making them interchangeable. Seamless integration of our repository with Slicer would allow immediate application and exploration of models without the need to set up model environments locally. + +Therefore, we are planning a Slicer extension that will allow to search our repository from within Slicer to deploy and run these models locally, using the Slicer interface for data input and output. + +Link to the [Plugin](https://github.com/AIM-Harvard/SlicerMHubRunner/tree/docs). + +## Objective + + + +1. Objective A. Discover how to run models in Slicer securely, conflict-free, and platform-independent. +2. Objective B. Validate and customise our definitions of a generic I/O framework. +3. Objective C. Document pros and cons of docker vs native python integration of the model, support with experimental results. Concerns re Docker communicated earlier: + * Docker may be challenging to install and setup (org constraints, permissions, expertise) + * Docker images are large and slow to download + * Support of GPU with Docker is not straightforward/limited + * + +## Approach and Plan + + + +1. We have two approaches in mind: packaging models in Docker containers and running models in separate Python environments. For both options, we need to weigh the pros and cons to find the most appropriate solution that maximizes the user base while avoiding or minimizing manual per-model customization. +2. We want to create a clear understanding of the challenges and drawbacks of using Docker. We will provide a step-by-step guide to setting up Docker and are looking for volunteers to try out the setup under our guidance and report back their valuable feedback. +2. We would like to discuss and find the best standard for model outputs (e.g., segmentation label names). +4. We plan to develop a slicer plugin that connects data loaded into Slicer to a model via our i/o framework and transfers the model outputs back into Slicer, using selected DL models as proofs of concept. + +## Progress and Next Steps + + + +1. We have developed an experimental modular conversion framework to bridge between a standardized I/O and specific model requirements. +2. We dockerized two models (Totalsegmentator, Platipy) using our I/O FW. + +# Illustrations + + + +# Background and References + + + +Plugin Module Overview diff --git a/PW38_2023_GranCanaria/Projects/MONAILabel2bundle/README.md b/PW38_2023_GranCanaria/Projects/MONAILabel2bundle/README.md new file mode 100644 index 000000000..20ce9943f --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/MONAILabel2bundle/README.md @@ -0,0 +1,111 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Conversion of MONAI Label trained network into a MONAI bundle + +## Key Investigators + +- Deepa Krishnaswamy (Brigham and Women's Hospital, USA) +- Cosmin Ciausu (Brigham and Women's Hospital, USA) +- Umang Pandey (Universidad Carlos III de Madrid, Spain) +- Nazim Haouchine (Brigham and Women's Hospital, USA) +- Andres Diaz-Pinto (NVIDIA, USA) +- Jesse Tetreault (NVIDIA, USA) +- Roya Hajavi (Brigham and Women's Hospital, USA) +- Khaled Younis (Philips) +- Stephen Aylward (Kitware, USA) +- Steve Pieper (Isomics Inc, USA) +- Andrey Fedorov (Brigham and Women's Hospital, USA) + +# Project Description + +MONAI Label has become a very popular tool in the NA-MIC community for developing new trained models and incorporating expert feedback into the training process. + +Unfortunately, it is currently not straightforward to take the models trained using MONAI Label and apply them in batch mode. + +MONAI supports bundles, which are designed for batch mode processing, but the process of converting MONAI Label trained networks into MONAI bundle representation is not well understood and (per MONAI experts) currently requires support from MONAI developers. + +In this project we want to explore the process of converting MONAI Label trained networks into MONAI bundle format, and demonstrate how the resulting bundles can be applied to datasets in [NCI Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/). + +## Objective + + +1. Develop a complete example transforming MONAI Label network to MONAI bundle. +1. Improve existing documentation. +1. Demonstrate how MONAI Label trained network converted to bundle can be applied to a representative sample of data from IDC. + + +## Approach and Plan + + + +1. Use MONAI Label trained model for [segmentation of vertebrae in CT](https://github.com/Project-MONAI/MONAILabel/blob/main/sample-apps/radiology/main.py#L174) as the use case. +2. Identify MONAI documentation for transforming MONAI Label trained network into MONAI bundle format. +3. Develop MONAI bundle from the network in 1. +4. Select applicable representative subset of data from IDC and apply resulting bundle to a produce segmentations, save segmentations as DICOM SEG, confirm visualization with OHIF. +5. Document the process and any refinements to the existing instructions. + +## Progress and Next steps + +1. We decided instead to convert the full CT segmentation MONAI label app from Andres to a bundle, as it has a single stage compared to the 3 stage vertebare pipeline. This model was trained on TotalSegmentator data and used a SegResNet architecture. +2. We were able to convert the app to a bundle for inference! We had to modify a few transforms for orientation. Now you can use a single command to run inference instead of manually opening 3DSlicer and choosing data to run on. +3. We tested the bundle on a spleen dataset from decathalon data (Figure 1 below). +4. We can compare this approach to actual TotalSegmentator segmentation (Figure 2 below) +5. Now we want to test on data from IDC (NSCLC-Radiomics patient that has some ground truth segmentation). Unfortunately we are getting a lot of CUDA memory errors since these datasets are a lot larger than the spleen dataset we previously tested on. We're working on making changes to the inference.json file and are trying to crop the images before inference. (Figures 3 and 4) +6. Future work involves solving these memory errors, saving the output as DICOM SEG, and a more thorough comparison between the MONAI bundle and TotalSegmentator output. Testing on large collections and comparing to the ground truth segmentations is also part of the future work. + +Github repo: https://github.com/deepakri201/monai_full_ct_segmentation_bundle + +# Illustrations + + + +Figure 1 - Full CT segmentation on subject from spleen decathalon data +![Figure 1 - Full CT segmentation on spleen data from decathalon data](https://user-images.githubusercontent.com/59979551/216036231-cab022f4-dbb1-4932-928f-af9b061733fc.JPG) + +Figure 2 - Comparison on spleen decathalon data of the MONAI full CT segmentation bundle we created (left) to the output TotalSegmentator produces (right) +[monai_bundle_vs_total_seg_spleen.webm](https://user-images.githubusercontent.com/59979551/216606510-047a0105-17ca-4765-8186-4132edf2c0e9.webm) +![](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW38__MONAILabel2bundle__monai_bundle_vs_total_seg_spleen.gif) + +Figure 3 - Full CT segmentation on subject from IDC +![02_03_23_full_ct_segmentation_success_idc](https://user-images.githubusercontent.com/59979551/216612414-649813e5-945b-4719-aaa8-8954aeb44d18.JPG) + +Figure 4 - Comparison on IDC data of the MONAI full CT segmentation bundle we created (left) to the output TotalSegmentator produces (right) +[monai_bundle_vs_total_seg_idc.webm](https://user-images.githubusercontent.com/59979551/216612449-013d7dad-7bc6-43b0-9780-0c9f9b848007.webm) +![](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW38__MONAILabel2bundle__monai_bundle_vs_total_seg_idc.gif) + +## Discussion notes + + + +1. Identified MONAI Label network location; discussed the project with Stephen, identified relevant expertise on MONAI side; planning to have coordination meeting with Roya. +1. Identified from Andres an example of a [MONAI Label app](https://github.com/Project-MONAI/MONAILabel/blob/main/sample-apps/radiology/lib/configs/deepedit.py) and the corresponding [MONAI bundle](https://github.com/Project-MONAI/model-zoo/tree/dev/models/spleen_deepedit_annotation/configs). +1. Identified another possible example of a [MONAI Label app](https://github.com/Project-MONAI/MONAILabel/blob/main/sample-apps/radiology/lib/configs/segmentation_spleen.py) and the corresponding [MONAI bundle](https://github.com/Project-MONAI/model-zoo/tree/dev/models/spleen_ct_segmentation/configs). +1. In progress [colab notebook](https://github.com/ImagingDataCommons/idc-vertebrae-ct-segmentation/blob/main/MONAI_spine_localization_task.ipynb) for the conversion of spine localization task +1. Discussion with Jesse, Andres, Stephen, Steve: Look into creating a bundle vs MONAI deploy app SDK. We started a public discussion of MONAI label app to bundle creation [here](https://github.com/Project-MONAI/MONAI/discussions/5894#discussioncomment-4769712) +1. Yesterday during the MONAILabel AWS workshop, we tried out the localization_spine step. It seemed to work on the dataset, but we noticed that the model was both named differently and in a different app folder. We wanted to make sure that we could also perform the localization_spine successfully either in Slicer or in a script. We started with the scripted version [here](https://github.com/Project-MONAI/MONAILabel/blob/main/sample-apps/radiology/main.py), on a nifti file from VERSE, which was also used for training. We expected this to produce a somewhat reasonable segmentation of the spine, but it produced an empty segment. We tried the second stage of localization_vertebra model, and this produced a partial segmentation of one vertebra. +1. We then explored into setting up MONAILabel locally. We managed to install everything and start the server on Windows (Mac is a WIP), and ran inference using the localization_spine model through Slicer on a nifti file from the training data in VERSE. This again yielded empty segments... +1. We found a post in the [Slicer discourse forum](https://discourse.slicer.org/t/monailabel-vertebrae-segmentation-sample-app-doesnt-work-for-sample-data/27243) where others also had problems with the vertebrae_pipeline. However, this doesn't completely address our problem as this one uses a network trained on TotalSegmentator data to perform segmentation of organs+vertebrae. Though the segmentations might be acceptable, this might not work for all as there is no preservation of the ordering of the vertebrae, which might be better addressed by the 3 stage vertebra_pipeline (localization_spine, localization_vertebra, vertebra_segmentation). +1. We will talk to Nazim tomorrow to see if he has encountered issues using the localization_spine step on data that it was trained on. In the meantime, we will try out the updated model provided by Andres for vertebra segmentation [here](https://drive.google.com/drive/folders/17eJan-8_oNCnZyJk8B9zLQpwaOjtDuKi?usp=share_link) to make sure we can at least get results with this in Slicer. Perhaps we can convert this to a bundle first? --> As a test, this model worked on a dataset from the training set of TotalSegmentator,and also worked on a dataset from VERSE, with expected differences in segmentation accuracy because of resolution etc. +1. Cosmin and I met with Nazim to talk about our issues with the localization_spine step in Slicer producing empty labels. We tried running all three stages and got a runtime error - tensor shape. We then tried the segmentation_spleen model on training data from Task09_Spleen, this should produce a proper spleen label. It did not, kind of a fragmented spleen. Is this a CPU vs GPU problem? Cosmin will try to test on his Linux machine that has a GPU. Do we have the lastest versions of the pretrained models? The spleen model is coming from [here](https://github.com/Project-MONAI/MONAILabel/releases/download/pretrained/radiology_segmentation_unet_spleen.pt) which is the most recent one. Nazim suggested trying to install everything again. I will also try segmentation_spleen model using a script. +1. I posted on Slicer discourse about some issues with MONAI Label and the 3 stage vertebra segmentation pipeline. https://discourse.slicer.org/t/using-monailabel-for-vertebrae-segmentation/27511 + +1. We tried installing the latest preview release of Slicer to see if inference worked with localization_spine on 2019 and 2020 VERSE dataset, it did not. We also tried the whole vertebrae pipeline and we have the same error with tensor shape size - RuntimeError: Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: [1, 1, 0, 0, 0]. Umang - also where is the temp file saved for the first localization_spine step? +2. In the meantime we will try converting the full CT seg (trained using TotalSegmentator data) to a bundle. If that works we can go back to the vertebra pipeline? Steve suggested also that we do this instead of focusing on the vertebra. And maybe if we want to train the full CT with higher resolution data (change the target_spacing is the main thing we need to do?) we could think about this at a later stage. +3. We've posted two issues for the vertebrae segmentation, [here](https://github.com/Project-MONAI/MONAILabel/issues/1267) and [here](https://github.com/Project-MONAI/MONAILabel/issues/1268). We got some responses -- they suggested running inference on a dataset from decathalon instead. Localization_spine ran successfully! Not the best, but there is some spine segmented. So this is probably because of the resolution. The original VERSE dataset is pretty high res, but looks like the target_spacing is (1.3,1.3,1.3) for localization_spine. So we will try resampling VERSE to the target_spacing and then try inference. We tried running the full vertebra segmentation on the spleen dataset, and all three stages seem to work with no errors related to tensor shape. +4. We created the bundle for full ct segmentation, and here is the first run on a spleen dataset. We'll have to fix the transforms. +5. Steve suggested we might need to do something like this: https://github.com/LymphNodeQuantification/Monailabel-LNQ/blob/main/apps/radiology-retrain-2022-12/lib/infers/segmentation.py. We need to save out the nifti file at each stage of the transforms to see where the orientation changes. Check Invertd transform, Orientationd transforms etc. +6. This post from yesterday on creating a bundle for SegResNet trained on TotalSegmentator data: https://github.com/Project-MONAI/MONAILabel/issues/1269 +7. I'm able to get the inference to work for the above! (image below). We had to remove the Orientationd transform. We will test on more data and start looking into vertebrae segmentation pipeline. + +# Background and References +- [Github repo](https://github.com/deepakri201/monai_full_ct_segmentation_bundle) +- [NCI Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/) +- [MONAI bundle docs](https://github.com/Project-MONAI/tutorials/blob/main/bundle/get_started.md) +- [MONAI Label app for vertebrae segmentation](https://github.com/Project-MONAI/MONAILabel/blob/fullCTSegmentation/sample-apps/radiology/lib/configs/segmentation_full_CT.py) +- [MONAI Label app for whole body segmentation](https://github.com/Project-MONAI/MONAILabel/blob/fullCTSegmentation/sample-apps/radiology/lib/configs/segmentation_full_CT.py) +- [MONAI bundle specification](https://docs.monai.io/en/stable/mb_specification.html) +- Script from Steve to run MONAILabel model in batch from Slicer: [https://github.com/LymphNodeQuantification/Monailabel-LNQ/blob/main/Experiments/reviewer.py](https://github.com/LymphNodeQuantification/Monailabel-LNQ/blob/main/Experiments/reviewer.py) +- Notes from meetings [here](https://docs.google.com/document/d/1d1vUYdUzSbnitJDyzi-FCCeccGzHQGUHcZKCBeSi_28/edit?usp=sharing) diff --git a/PW38_2023_GranCanaria/Projects/MONAI_IDC_PCa_detection/README.md b/PW38_2023_GranCanaria/Projects/MONAI_IDC_PCa_detection/README.md new file mode 100644 index 000000000..43efb82d3 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/MONAI_IDC_PCa_detection/README.md @@ -0,0 +1,83 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# HOWTO: Detection of prostate cancer in IDC images using MONAI prostate_mri_anatomy model + +## Key Investigators + +- Cosmin Ciausu (Brigham and Women's Hospital, USA) +- Deepa Krishnaswamy (Brigham and Women's Hospital, USA) +- Patrick Remerscheid (Brigham and Women's Hospital, USA and Technical University Munuch, Germany) +- Tina Kapur (Brigham and Women's Hospital, USA) +- Sandy Wells (Brigham and Women's Hospital, USA) +- Andrey Fedorov (Brigham and Women's Hospital, USA) + +# Project Description + +[MONAI Zoo] has a growing number of pre-trained models for solving a range of image analysis tasks. It is of interest to understand robustness of those models on independent datasets, evaluate their performance. + +[NCI Imaging Data Commons (IDC)]() has a growing number of imaging datasets, most of which do not have accompanying annotations, complicating downstream analysis. + +In this project we will demonstrate how an existing pre-trained MONAI model packaged as a bundle can be applied to a suitable subset of data from IDC, and how existing annotations can be used to validate results produced by this model. + +## Objective + + +1. Develop an end-to-end documented example demonstrating the use of MONAI bundle on IDC prostae MRI. +1. Understand and quantify the performance of the model using ground truth annotations. +1. If applicable (results are of good quality), consider sharing the produced annotations within IDC. + +## Approach and Plan + + + +1. Develop a Google Colab notebook that contains the following steps: + + 1. Install prerequisites and the MONAI bundle https://github.com/Project-MONAI/model-zoo/tree/dev/models/prostate_mri_anatomy. + + 1. Select applicable subset of MRI series from IDC (ProstateX and QIN-Prostate-Repeatability collections). + + 1. Convert images from DICOM to the format acceptable by the model. + + 1. Run inference. + + 1. Visualize results. + + 1. Perform quantitative evaluation of the results. + + 1. Convert results into DICOM representation, visualize in OHIF. + +2. Document performance of the model. + +3. Consider sharing analysis results if they are of good quality. + +## Progress and Next Steps + + + +1. Preliminary work applying the model in question to segment prostate anatomy. +1. Created bundle segmenting prostate tumors +1. [Minimum working example on training data sample](https://github.com/ImagingDataCommons/idc-prostate-mri-analysis/blob/main/cancer_bundle_train_prostate158.ipynb) +3. Examination of results on pre-trained model training data : prostate158 +4. Multi-modal input : T2,ADC, DWI, understand acquisition process of DWI used for training +5. Bundle creating thoughts : More extensive documentation about required parameters in inference.json and the relation between anatomy.json and inference.json should be provided. +6. Document process of creating bundle, difficulties encountered +8. Next steps : Confirm DSC results on prostate158 and evaluate on IDC data(DWI acquisition parameters -- QIN Prostate repeatability similar to prostate158 ?) + +# Illustrations + + + +Screen Shot 2023-02-03 at 1 10 45 PM + + +# Background and References + +- [NCI Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/) +- **[Minimum working example of pc segmentation on training sample prostate158](https://github.com/ImagingDataCommons/idc-prostate-mri-analysis/blob/main/cancer_bundle_train_prostate158.ipynb)** +- **[Minimum working example of pc segmentation on QIN-Prostate-Repeatability collection sample](https://github.com/ImagingDataCommons/idc-prostate-mri-analysis/blob/main/MONAI_prostate158_cancer_qin_prost_rep.ipynb)** +- [MONAI zoo prostate_mri_anatomy bundle](https://github.com/Project-MONAI/model-zoo/tree/dev/models/prostate_mri_anatomy) +- [MONAI PC segmentation custom bundle](https://github.com/ImagingDataCommons/idc-prostate-mri-analysis/tree/main/pcDetectionBundle) +- [PC segmentation model paper](https://www.sciencedirect.com/science/article/pii/S0010482522005789?via%3Dihub#kwrds0010) diff --git a/PW38_2023_GranCanaria/Projects/MeshComparison/Boolean_difference.png b/PW38_2023_GranCanaria/Projects/MeshComparison/Boolean_difference.png new file mode 100644 index 000000000..fb6eb26b8 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/MeshComparison/Boolean_difference.png differ diff --git a/PW38_2023_GranCanaria/Projects/MeshComparison/Closed_model.png b/PW38_2023_GranCanaria/Projects/MeshComparison/Closed_model.png new file mode 100644 index 000000000..a651badfa Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/MeshComparison/Closed_model.png differ diff --git a/PW38_2023_GranCanaria/Projects/MeshComparison/MeshComparison_figure.png b/PW38_2023_GranCanaria/Projects/MeshComparison/MeshComparison_figure.png new file mode 100644 index 000000000..802d6915f Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/MeshComparison/MeshComparison_figure.png differ diff --git a/PW38_2023_GranCanaria/Projects/MeshComparison/Module_UI.png b/PW38_2023_GranCanaria/Projects/MeshComparison/Module_UI.png new file mode 100644 index 000000000..752bd9f3f Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/MeshComparison/Module_UI.png differ diff --git a/PW38_2023_GranCanaria/Projects/MeshComparison/Open_mesh.png b/PW38_2023_GranCanaria/Projects/MeshComparison/Open_mesh.png new file mode 100644 index 000000000..9d2adeb30 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/MeshComparison/Open_mesh.png differ diff --git a/PW38_2023_GranCanaria/Projects/MeshComparison/README.md b/PW38_2023_GranCanaria/Projects/MeshComparison/README.md new file mode 100644 index 000000000..2f37d49a7 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/MeshComparison/README.md @@ -0,0 +1,64 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Mesh Comparison + +## Key Investigators + +- Paolo Zaffino (Magna Graecia University of Catanzaro, Italy) +- Maria Francesca Spadea (Institute of Biomedical Engineering, KIT - Karlsruher Institut für Technologie, Germany) +- Michela Destito (Magna Graecia University of Catanzaro, Italy) +- Amerigo Giudice, the clinical mind behind the idea (Magna Graecia University of Catanzaro, Italy) +- Anyone who wants to join (bring a coffee!) + +# Project Description + + + +In the oral surgery field, it is important quantifying the level of swelling after a surgical procedure. Several centers can easily acquire volumetric scans of the patient, both before and after the intervention. Slicer already offers several modules for dealing with surfaces (including difference quantification), but lacks a tool for computing the volume between two meshes. Our aim is to provide such a tool. + +## Objective + + + +1. Write a module for computing the volume between two meshes. +1. Propose to integrate the developed module into a larger project (Slicer CMF?) + +## Approach and Plan + + + +1. Write a module for computing volume between two meshes (they can be also "open-mesh") +1. Use pyvista library for tasks involving mesh +1. Deal with open meshes + +## Progress and Next Steps + + + +1. Created a proptype [extension](https://github.com/pzaffino/SlicerMeshVolumeComparison). This can be improved a lot, it is just a starting point. +1. The module is able to close open models (by using pymeshfix library) +1. The module is able to compute volume difference and boolean difference (be careful, it could fail in some cases) +1. Ask our clinical partner to test it on clinical scenario (feedbacks are more than welcome!) + +# Illustrations +At the beginning of project week + +![Two meshes to compare in terms of volume](MeshComparison_figure.png) +![Open mesh to close](Open_mesh.png) +------------------------------------------------------------------------- +At the end of project week + + +![Module UI](Module_UI.png) +![Closed model](Closed_model.png) +![Boolean difference](Boolean_difference.png) + +# Background and References + +https://github.com/PerkLab/BreastReconstruction + +https://github.com/pzaffino/SlicerMeshVolumeComparison + + diff --git a/PW38_2023_GranCanaria/Projects/MetaDashboard/README.md b/PW38_2023_GranCanaria/Projects/MetaDashboard/README.md new file mode 100644 index 000000000..beb271fab --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/MetaDashboard/README.md @@ -0,0 +1,91 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# DICOM Meta-Dashboard + +## Key Investigators + +- Hans Meine (Fraunhofer MEVIS, Germany) +- Stefan Denner (German Cancer Research Center, Germany) +- Klaus Kades (German Cancer Research Center, Germany) +- Marco Nolden (German Cancer Research Center, Germany) +- Andrey Fedorov (BWH) + +**[Project channel on Discord: #dicom-meta-dashboard](https://discord.com/channels/843934857620357130/1069602293764337665)** + +# Project Description + +In practice, importing DICOM files into workspaces (e.g., for reader studies, analyses, ...) requires some manual filtering, sorting, and selection. +* sorting out "scout" images (localizers) +* sorting out images with artifacts, the acquisition of which was repeated anyhow +* identifying studies that need to be merged, e.g. when an imaging study is cancelled / incomplete and has to be repeated / completed within a few days, leading to two DICOM studies that together make up a single logical timepoint +* classifying the remaining images as "native", "contrast-enhanced" (incl. phase), "showing the complete liver", "T1-weighted", "DCE-MRI sequence", ... +* counting the number of complete "cases", e.g. "I need a prior and a matching follow-up image", "I need a native and a contrast-enhanced image", "I need n different dynamic contrast enhanced images" +Basically, one could describe the above as "putting images into the correct buckets", and a related task would be to check how many cases are "complete", in the sense that a specified number of buckets is "filled". + +There are probably countless attempts at supporting this workflow (some of which I am aware of), but this project is +about checking what the Meta dashboard that comes with +[Kaapana](https://kaapana.readthedocs.io/en/stable/intro_kaapana.html#what-is-kaapana) +already supports and could (/should) support in the future. + +Having a dashboard summarizing a data collection in a meaningful way is a recurring theme also outside of kaapana. We would like to investigate to which degree the requirements coming with common use cases (such as AI annotation, cohort definition, AI model training, automatic quality assurance) are already met and if they're not, how extensible the existing dashboard is. Furthermore, it would be interesting to assess whether such a dashboard can be shared with other projects (IDC, Grand-Challenge), and whether that really makes sense in practice. +Related to [Fast viewing and tagging of DICOM Images](../KaapanaFastViewingAndTaggingOfDICOMImages/README.md) (as well as to previous PW endeavors around Chronicle and DICOMweb by Steve Pieper). + +## Objective + + + +1. Use the Meta Dashboard without kaapana, on data that is not in a PACS. +2. Understand the limits of the current OpenSearch/Kibana stack, and whether it supports all intended use cases. + +## Approach and Plan + + + +1. Find out about the schema / information used by the Meta dashboard. +1. Try to set up the Meta dashboard outside of kaapana. +1. Feed OpenSearch with data from a dataset that is not in a PACS. +1. Document results (code / schema / design / use cases). + +## Progress and Next Steps + +1. Hans has learned (mostly from Stefan) much about the current process / integration of the Meta dashboard in kaapana, and about its code location(s). Many things have been **documented on this page** to help others as well. +2. Hans spent a lot of time reviewing Kaapana code and opened a pull request with a few refactoring steps (https://github.com/kaapana/kaapana/pull/13). +3. Hans wrote a **MeVisLab module "DICOMTree2JSON" that mimicks what dcm2json does** and converts in-memory DICOM information into JSON. (The output has been verified to be "mostly identical" except for the pixel data which is not dumped. Other exceptions are integer "1\u0000" -> 1, for instance.) +4. Kaapana's dashboard defaults to documents on the series level (but the code would also support SOPInstances / single frames). Hans is using **MeVisLab's / SATORI's image level, which can be both below or above the series level**, depending on the import configuration (e.g., composing DCE-MRI volumes from multiple images). Of course, this needs to be taken into account when configuring the dashboard. +5. Kibana nicely allows filtering on any level (patient / study / series / image properties), but the dashboard will only list the number of patients that contain an image matching a certain criterion, not the patients that contain *only* images matching a criterion. A query such as "give me all patients that have at least two timepoints with a T1 and a T2 weighted image" does not seem to be possible. There seem to be more complex aggregation options in OpenSearch, however, so it remains to be investigated if that could be implemented as well. + +# Illustrations + +Screenshot of resulting meta dashboard with data ingested from a MeVisLab-based SATORI workspace: + +![Kibana dashboard showing information about 18,613 images](meta_dashboard_wth_MeVisLab_data.png) + +# Background and References + +General information / pointers: + +- There is a [dag_service_extract_metadata.py](https://github.com/kaapana/kaapana/blob/develop/data-processing/kaapana-plugin/extension/docker/files/dags/dag_service_extract_metadata.py) which is responsible for the metadata extraction. +- That dag uses a [LocalDcm2JsonOperator](https://github.com/kaapana/kaapana/blob/develop/data-processing/kaapana-plugin/extension/docker/files/plugin/kaapana/operators/LocalDcm2JsonOperator.py) and [LocalJson2MetaOperator](https://github.com/kaapana/kaapana/blob/develop/data-processing/kaapana-plugin/extension/docker/files/plugin/kaapana/operators/LocalJson2MetaOperator.py) which seem to be the most important classes to look at. +- [LocalTaggingOperator](https://github.com/kaapana/kaapana/blob/master/data-processing/kaapana-plugin/extension/docker/files/plugin/kaapana/operators/LocalTaggingOperator.py) could also be relevant / interesting? This operator manages a set (/list) of tags per document in the meta index (cf. attribute `dataset_tags_keyword`). It is possible to add/remove tags, they can come from JSON files, and it is possible to read DICOM tags (e.g., ClinicalTrialProtocolID) into tags. +- The cohort definition is implemented in [kaapana/services/meta/os-dashboards/workflow-trigger](https://github.com/kaapana/kaapana/tree/develop/services/meta/os-dashboards/workflow-trigger) (the name is no longer descriptive for legacy reasons) as a Kibana plugin that triggers a kaapana dag. + +Reviewing LocalDcm2JsonOperator revealed the following functionality (not complete): + +- calls dcmodify to remove pixel data tags +- calls dcm2json to produce a JSON file +- seems to work around dcm2json producing non-standard float representations (caveat: `cleanJsonData` will have false positives / modify non-numeric tags as well) +- uses a [DICOM dictionary file (dicom_tag_dict.json)](https://github.com/kaapana/kaapana/blob/develop/services/flow/airflow/docker/files/scripts/dicom_tag_dict.json) to convert tag IDs to names (`get_new_key`) +- the resulting JSON document will have keys such as `0008103E SeriesDescription_keyword` (`_keyword` is the default suffix, but depending on the VR, there are other: DA -> `_date`, DT -> `_datetime`, TM -> time, DS/FL/FD/OD/OF -> `_float`, IS/SL/SS/UL/US -> `_integer`, SQ -> `_object` – the point of these suffixes is that [OpenSearch is then configured to index the attributes according to their suffix](https://github.com/kaapana/kaapana/blob/develop/services/meta/meta-init/docker/files/init_meta.py#L117)) +- RTSTRUCT and SEG files are treated specially and will have additional keys such as `rtstruct_organ_list_keyword` +- adds a `timestamp` key based on acquisition/series/content/study/current date+time (using the first one available) +- adds `timestamp_arrived_datetime`, `timestamp_arrived_date`, `timestamp_arrived_hour_integer` +- adds `00101010 PatientAge_integer` based on `00100030 PatientBirthDate_date` or `00101010 PatientAge_keyword` +- splits `00120020 ClinicalTrialProtocolID_keyword` into a list (under the same key) + +Summary of steps I performed in order: +- Spun up https://hub.docker.com/r/opensearchproject/opensearch-dashboards with docker-compose (reduced the example .yml to one node) +- Used `INIT_OPENSEARCH=true kaapana/services/meta/meta-init/docker/files/init_meta.py` to create the OpenSearch index (I manually set use_ssl=True). If you remove the index (e.g., with `http --verify=no DELETE https://admin:admin@localhost:9200/meta-index`), you need to repeat this step. It does not have to be done before setting up the dashboard, though – the latter just defines a view into OS, so it is really independent. +- Similarly, ran the script with `INIT_DASHBOARDS=true DASHBOARDS_URL=http://admin:admin@localhost:5601 DASHBOARDS_JSON=kaapana/services/meta/meta-init/meta-init-chart/files/dashboard_import.json` to upload the default dashboard config (commenting out the call to `set_ohif_template()`) +- Exported dcm2json-like information from MeVisLab, with the new module I developed for that purpose +- Ran a small hacked together supplement of the LocalDcm2JsonOperator on my exported files (see the above summary of its functionality). +- This already allows to play with the dashboard. What's not working yet is the cohort definition. diff --git a/PW38_2023_GranCanaria/Projects/MetaDashboard/meta_dashboard_wth_MeVisLab_data.png b/PW38_2023_GranCanaria/Projects/MetaDashboard/meta_dashboard_wth_MeVisLab_data.png new file mode 100644 index 000000000..fd1779e1d Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/MetaDashboard/meta_dashboard_wth_MeVisLab_data.png differ diff --git a/PW38_2023_GranCanaria/Projects/Metadata_IDC_HMC/README.md b/PW38_2023_GranCanaria/Projects/Metadata_IDC_HMC/README.md new file mode 100644 index 000000000..4877f52f8 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/Metadata_IDC_HMC/README.md @@ -0,0 +1,71 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# FAIRification of medical imaging data and analysis tools + +## Key Investigators + +- Marco Nolden (German Cancer Research Center, Helmholtz Metadata Collaboration, Germany) +- Andrey Fedorov (Brigham and Women’s Hospital, USA) +- Paolo Zaffino (Magna Graecia University of Catanzaro, Italy) +- Maria Francesca Spadea (Institute of Biomedical Engineering, KIT - Karlsruher Institut für Technologie, Germany) + +# Project Description + +**“Metadata is a love note to the future”** + + - Jason Scott + +The [Helmholtz Metadata Collaboration](https://helmholtz-metadaten.de/en) is a cross-domain initiative across the whole Helmholtz Association, which is the largest funding agency in Germany. It follows the goal to develop and establish novel methods and tools documenting and sharing research data by means of enriched metadata, as well as improved interoperability of data across disciplines. The Hub Health of this initiative is anchored in the Division of Medical Image Computing at the German Cancer Research Center Heidelberg. + +The [FAIR principles](https://www.go-fair.org/fair-principles/) are guidelines to make your data, including software, findable, accessible, interoperable and reusable. They are an important component of Open Science. + +[NCI Imaging Data Commons](https://datacommons.cancer.gov/repository/imaging-data-commons) is tasked with establishing publicly available repository of cancer imaging data, and in this role is developing workflows to harmonize image and image-derived data representation into DICOM, make metadata searchable, and connect imaging metadata with clinical metadata. Thus, this project might be helpful to the HMC project. We will explore this connection this week! + +We will investigate relevant metadata descriptions of medical images, cohorts, and medical image analyis pipelines and results like machine learning models. + +An additional aspect to look at will be aspects of generating, reviewing and sharing of metadata of research data which contains personally identifiable information. + +## Objective + + + +Common standards, tools and practices can make interoperability much easier. Within this project we want to investigate which tools are already used in our community, which lessons were already learned, and perform experiments regarding interoperability of data and analysis pipelines as well as analysis results. + +1. Objective A. Create an overview on existing tools and standards +2. Objective B. Identify challenges. +3. Objective C. Perform interoperability experiments + +## Approach and Plan + + + +1. Have a walkthrough of the IDC project and tech stack - starting from this introductory tutorial series in IDC: [https://github.com/ImagingDataCommons/IDC-Examples/tree/master/notebooks/getting_started](https://github.com/ImagingDataCommons/IDC-Examples/tree/master/notebooks/getting_started) +2. Discuss best practices of data sharing with project attendees. + +## Progress and Next Steps + + + +1. Marco completed [IDC getting started tutorial](https://github.com/ImagingDataCommons/IDC-Examples/tree/master/notebooks/getting_started) +1. Set up cloud project for experimentation, Andrey added Marco to a project that has billing set up. +1. Worked on exploring BigQuery for querying of IDC data and exporting metadata into JSON for exploration outside of IDC. +2. Met with Paolo Zaffino and Maria Francesca Spadea to discuss recommended practices for data sharing (representation, repositories, issues related to de-identification). + +# Illustrations + + + +# Background and References + + + +- Wilkinson, M., Dumontier, M., Aalbersberg, I. et al. The FAIR Guiding Principles for scientific data management and stewardship. Sci Data 3, 160018 (2016). https://doi.org/10.1038/sdata.2016.18 + +- Bridge, C.P., Gorman, C., Pieper, S. et al. Highdicom: a Python Library for Standardized Encoding of Image Annotations and Machine Learning Model Outputs in Pathology and Radiology. J Digit Imaging 35, 1719–1737 (2022). https://doi.org/10.1007 + +- Deepa Krishnaswamy, Dennis Bontempi, David Clunie, Hugo Aerts, & Andrey Fedorov. (2023). AI-derived annotations for the NLST and NSCLC-Radiomics computed tomography imaging collections [Data set]. Zenodo. https://doi.org/10.5281/zenodo.7539035 + +- Zaffino P, Marzullo A, Moccia S, Calimeri F, De Momi E, Bertucci B, Arcuri PP, Spadea MF. An Open-Source COVID-19 CT Dataset with Automatic Lung Tissue Classification for Radiomics. Bioengineering. 2021; 8(2):26. https://doi.org/10.3390/bioengineering8020026 diff --git a/PW38_2023_GranCanaria/Projects/MultiSpectralSensorIntegration/README.md b/PW38_2023_GranCanaria/Projects/MultiSpectralSensorIntegration/README.md new file mode 100644 index 000000000..68f9d4c1b --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/MultiSpectralSensorIntegration/README.md @@ -0,0 +1,54 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Integration of infrared, ultraviolet and hyperspectral sensors in Slicer via Plus Toolkit and OpenIGTLink. +## Key Investigators + +- Francisco J. Marcano Serrano + +# Project Description + + +Integration of a predefined set of sensors in the 3D Slicer platform, via Plus Toolkit and OpenIGTLink, for future use in clinical applications. +Several sensors/cameras operating in different electromagnetic ranges and using different communication standards (USB, CameraLink, GigE, GenICam) will be integrated. +The set of cameras to integrate includes (but not limited to): visible light USB cameras, PCO (ultraviolet, USB, several models), Thermal Expert (infrared, USB/CameraLink, several models), FLIR (infrared, GigE, several models), Specim (hyperspectral, GenICam, several models). + + +## Objective + + + +1. Adding selected sensors as new devices in PTK. +2. Visualization and control of integrated sensors from Slicer. + +## Approach and Plan + + + +1. Integration of at least one USB camera (sensor) as a PTK device. +1. Integration of at least one CameraLink sensor as a PTK device. +1. Integration of at least one GigE sensor as a PTK device. +1. Integration of at least one GenICam sensor as a PTK device. + +## Progress and Next Steps + + +1. Environment configuration (Win10, Visual Studio 2019, Qt5, installation of PTK (https://plustoolkit.github.io) & cameras drivers + SDK's ). +2. Thermal Expert EV2 infrared camera added, following instructions from PTK site (plustoolkit.github.io/devicecode). +3. PCO Ultraviolet camera added, following instructions from PTK site. Code modified to change camera exposure values from config file (XML). +4. Simultaneous image acquisition from TE-EV2 & PCO UV tested from Slicer (OpenIGTLink, see figure). +5. Next steps: integration of CameraLink, GigE, GenICam cameras; interactive control of camera parameters. + +# Illustrations + + +Fig. 1: Integration of Thermal Expert EV2 & PCO Ultraviolet Cameras (OpenIGTLink) +[TEEV2PCOUV-2.gif](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW38__MultiSpectralSensorIntegration__TEEV2PCOUV-2.gif) +
+ +# Background and References + + +1. Plus ToolKit. Adding a Device. URL: https://plustoolkit.github.io/devicecode (Last seen: 02/02/2023). diff --git a/PW38_2023_GranCanaria/Projects/MultiSpectralSensorIntegration/TEEV2+PCOUV.gif b/PW38_2023_GranCanaria/Projects/MultiSpectralSensorIntegration/TEEV2+PCOUV.gif new file mode 100644 index 000000000..d07d64a6a Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/MultiSpectralSensorIntegration/TEEV2+PCOUV.gif differ diff --git a/PW38_2023_GranCanaria/Projects/OHIF_DATSCAN/README.md b/PW38_2023_GranCanaria/Projects/OHIF_DATSCAN/README.md new file mode 100644 index 000000000..e54db11ac --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/OHIF_DATSCAN/README.md @@ -0,0 +1,40 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# DATSCAN Viewer implementation in OHIF + +## Key Investigators + +- Salim Kanoun MD (Pixilib, Toulouse, France) +- Alireza Sedghi (Radical Imaging) +- Celian Abadie (Pixilib) +- Sofien Sellamo (Pixilib) + +# Project Description + +The aim of the projet is to make a initial implementation of a conventional nuclear medicine application in OHIF. This will be a DATSCAN viewer (https://en.wikipedia.org/wiki/Ioflupane_(123I)) +This implementation will allow us to identify the key issues that are needed to be solved to use OHIF as a framework to develop nuclear medicine applications in the future (more than 20 applications made in Fiji should be ported to OHIF in a long term roadmap). + +## Objective + +1. Vizualisation of DATSCAN, using the ongoing multiframe support of OHIF +- Dedicated HP +- Image reorientation +- Custom LUT +2. Programatically set predifined ROI in the volume (can use TMTV extension) +3. Basic quantification insides the ROIs (mean, max) + +## Approach and Plan + +An anonymized DATSCAN image will be provided for developpement. +Public repository for contributors. + +## Progress and Next Steps + +Report of keys issues to solve in OHIF for further nuclear medicine applications developpement. + +# Illustrations +https://link.springer.com/article/10.1007/s12149-011-0564-1 => See Figure 1 + +# Background and References +https://jnm.snmjournals.org/content/50/6/893.long +https://jnm.snmjournals.org/content/43/10/1324.long diff --git a/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/README.md b/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/README.md new file mode 100644 index 000000000..dbd4b3fb7 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/README.md @@ -0,0 +1,67 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# PolySeg representations for OHIF Viewer + +## Key Investigators + +- Alireza Sedghi (OHIF, Accolade Imaging) + +# Project Description + +The aim of the project is to continue on the effort of adding polySeg representations for segmentation in OHIF Viewer. This will be a continuation of the work done in the previous [project week](../../../PW31_2019_Boston/Projects/PolySeg4Web/README.md) + +## Objective + +1. Try to implement the polySeg representation in OHIF Viewer +2. Use the WASM version of the polySeg library + +## Approach and Plan + +1. Start from a simple example with contour representation +2. Add the polySeg library to the project +3. Implement the polySeg representation in OHIF Viewer + +## Progress and Next Steps + +### Added new geometry loader to Cornerstone3D + +Aiming towards polySeg representations require support for loading a geometry. As part of this task a geometry loader were added to the cornerstone3D which can support loading and caching two geometries: + +- ContourSet: A set of 3D contours (list of contours each with a list of points) +- Surface: A 3D surface (points and polygons) + +![geometry](./geometryLoader.png) + +### Implemented the contour segmentation representation in Cornerstone3D + +The contour representation is the simplest representation of a polySEG segmentation. It is a set of 3D contours (list of contours each with a list of points). The representation +utilizes the geometry loader to load the contours and render them in the cornerstone3D viewport using the vtkActors + +![contour](./contour.png) + +Try it out in our contour demo [here](https://www.cornerstonejs.org/live-examples/contoursegmentationrepresentation) + +### Implemented the surface segmentation in Cornerstone3D + +The surface representation is a 3D surface (points and polygons). The representation utilizes the geometry loader to load the surface and render them in the cornerstone3D viewport using the vtkActors + +![surface](./surface.png) + +Try it out in our contour demo [here](https://deploy-preview-418--cornerstone-3d-docs.netlify.app/live-examples/surfacesegmentationrepresentation) + +### Debugged the build for the WASM file to make it smaller + +The WASM file was too big to be used in the browser. The build was debugged to make it smaller. Tried the following with no success: + +- Making the biuld optimize for size instead of speed + +## Next Steps + +- Implement the OHIF side of the polySeg representation +- Make the WASM file smaller to be used in the browser + +# Background and References + +[ICRPolySeg-WASM](https://bitbucket.org/icrimaginginformatics/polyseg-wasm/src/master/), a wrapper package that implements PolySeg for the Web written by ICR team. This work is sponsored by NCITA, a flagship multi-institutional collaboration between leading UK universities, funded by Cancer Research UK - ncita.org.uk. The functionality has now been integrated into the XNAT OHIF Viewer v3.5.0-RC1. + +https://github.com/SlicerRt/SlicerRT/tree/master/DicomRtImportExport/ConversionRules diff --git a/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/contour.png b/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/contour.png new file mode 100644 index 000000000..ba84c37e8 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/contour.png differ diff --git a/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/geometryLoader.png b/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/geometryLoader.png new file mode 100644 index 000000000..535fd54ff Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/geometryLoader.png differ diff --git a/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/surface.png b/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/surface.png new file mode 100644 index 000000000..8bfcde5bf Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/surface.png differ diff --git a/PW38_2023_GranCanaria/Projects/OHIF_SyncCrosshair/README.md b/PW38_2023_GranCanaria/Projects/OHIF_SyncCrosshair/README.md new file mode 100644 index 000000000..031164580 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/OHIF_SyncCrosshair/README.md @@ -0,0 +1,42 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Cross study sychronizer for OHIF Crosshair + +## Key Investigators + +- Salim Kanoun MD (Pixilib, Toulouse, France) +- Alireza Sedghi (Radical Imaging) +- Celian Abadie (Pixilib) +- Sofien Sellamo (Pixilib) + +# Project Description + +Create a tool to synchronise two volume viewport comming for 2 different study to help physicians to track lesions of follow up scans + +## Objective + +In follow up studies, physicians needs to compares two studies (ex baseline vs end of treatement). +It would be nice to sync the display between viewport to help them comparing tumor evolution. + +## Approach and Plan + +As Spatial coordinate change between study, we will use the crosshair to the user to set in a same anatomical region (ex : pubis). +At sync we either : +- track crosshair distance for each crosshair update and apply to the other crosshair +- Calculate an offset to apply to convert crosshair A to B coordinate + +Would be nice if can sync rotation as well. + +## Progress and Next Steps + +We made an implementation of the sync that works well for syncrhonizing crosshair but do not handle rotation. Would need to extend rotation. +Crosshair implementation might need some refactoring. +Once done tools is meant to be integrated as an OHIF tool (as an improvement of the stack sync tool) + +# Illustrations + +None + +# Background and References + +None diff --git a/PW38_2023_GranCanaria/Projects/OpenSourceSimulationCenter/README.md b/PW38_2023_GranCanaria/Projects/OpenSourceSimulationCenter/README.md new file mode 100644 index 000000000..041fd8d08 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/OpenSourceSimulationCenter/README.md @@ -0,0 +1,77 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Taking Advantage of Open Source Technologies for the Development of Clinical Simulation Centers and Virtual Hospitals for Training and R&D + +## Key Investigators + +- Juan Ruiz (ULPGC) +- Idafen Santana (ULPGC) +- Mario Monzón (ULPGC) +- Miguel Angel Rodriguez-Florido (ULPGC) +- Marta Latorre (ULPGC) +- Marina Elistratova Elistratova (ULPGC) +- Maica Fernández (ULPGC) +- David Garcia-Mato (Ebatinca S.L.) +- Javier Gonzalez-Fernandez (ITC) +- Natalia Arteaga-Marrero (IACTEC) +- Oumar Kane +- Ablaye Tacko Diop +- Ousmane Dia +- Mame Abdoulaye Gueye +- Mouhamedi Bah +- Aliou Barry +- Cheikh Sidi Ethmane +- Edmée Clémence Mansilla +- Estelle Tcheple Tuo +- Geir Arne Tangen (SINTEF - Norway) +- Javier Pascau (UC3M - Spain) +- Juan León (HGUGM - Spain) + +# Project Description + +Open source technologies have opened up a world of possibilities for the development of clinical simulation centers and virtual hospitals. This proect aims to explore how these technologies can be used to create innovative projects in the healthcare sector. We will discuss how open source technologies can support virtual hospitals and simulation centers, and how this could benefit both patients and healthcare providers. We will also look at the potential use cases of such projects, their impact on patient care, and their potential cost savings. Finally, we will discuss the challenges that need to be addressed in order to make these projects successful. + +## Objectives + +1. To identifiy which tecnology and computer-based applications can be usefull for the partner's simulation centers. +2. To know the profile of trainees for each simulation center. +3. To identify how to improve the simulation resources that we currently have. +4. To take into account the training needs. + +## Approach and Plan + +1. We created a workteam to discuss the objetives. +2. We shared information with the partners about computer-based simulation in Africa. +3. We defined some tasks to work in: To identifiy apps based-on Slicer that could be used for education, to join researchers from other PW38 that propose open-source resourses (phantoms, apps, etc.) and to discuss in small groups for regions/countries. +4. We proposed to study and research about the impact of simulation in economy in health. + +## Progress and Preliminar Results +1. We had a good acceptance of the participants to work in this project. We did an start-meeting on Tueday and a wrap-up one a Friday. +pw-project-simulacion5 +2. We merged this project with the presentation and demos at the [Virtual Hospital at the University of Las Palmas](https://youtu.be/3NVaVnAUZyc): + +3. We saw and used apps based on Slicer that are thought for training and medical education: +

+ +pw-project-simulacion-2 +pw-project-simulacion-3 +

+ +## Conclusion & Outlook + +This project let us advance in the team cooperation for merging ideas and getting information of each partner. We learnt the capabilities and needs of each partner and how to include the open source technology developed around Slicer. +In summary, we can show the main ideas that we got from our partners in Africa: +- Our partners from Africa are interested in training for gynecologists and the reanimation for newborn babies. In our workgroup, and around Slicer, there are some applications for both kind of education. Then, these apps will be evaluated to test their performance and learning goals. +- Oumar is interested for his Center in the VR apps for learning anatomy that they saw in the visit of the Virtual Hospital. Also, he is interested in the delivery app (2) based on Slicer that it’s proposed in this PW38 event. He proposes to develop applications to make the anamnesis (oral and clinical exploration). In this line, we think that the VR could help us to achieve this goal. He is interested in all apps that can help for endotracheal intubation and reanimation. +- Mr. Mouhamedi Ahmed Haidara Bah, director of the Public Health School at Rosso, said that they don’t have technical resources to implement the technology that ha has seen at this event, but his group is very interested for including this kind of tech in vessels’ punctions and obstetrics’s healthcare. +- Khedijetou Vilaly M.D., is an gynecologist from Hospital Mère Enfant de Nouakchott, interested in the ultrasound guided procedures and the manufacturing of breast’s phantoms. +- Ahmed Dhahara Kane, professor at the Medical School and physician in Pediatric Surgery, is interested in virtual reality apps and the manufacturing of phantoms for teaching. + +Also, from a point of view of the economy in health, Marina Elistratova and Maica Fernández are working in research about how the education in health using this kind open source tools can help to save money for the health system. This work is just starting with this project and it is in progress now. + +## References +1. [Training system for US-guided lung interventions](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/) +2. [Fetal Ultrasound Simulation for Delivery Training](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/FetalUltrasoundSimulation/) +3. [Slicer + IMSTK for low cost training setups](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerIMSTK/) +4. [Development of Anatomy Atlases and Training Tools with 3D Slicer](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/TTTAtlas/) +5. [Virtual Hospital at the University of Las Palmas](https://youtu.be/3NVaVnAUZyc) diff --git a/PW38_2023_GranCanaria/Projects/ParameterNodeWrapper/README.md b/PW38_2023_GranCanaria/Projects/ParameterNodeWrapper/README.md new file mode 100644 index 000000000..c428626df --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/ParameterNodeWrapper/README.md @@ -0,0 +1,93 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Parameter Node Wrapper + +## Key Investigators + +- Connor Bowley (Kitware, USA) +- Sam Horvath (Kitware, USA) +- David Allemang (Kitware, USA) + +# Project Description + +Best practice when writing new ScriptedLoadableModules is to use a "parameter node" to store state (most commonly the state of the GUI). However, the existing parameter node system was in essence a string to string map (which some exceptions regarding storing MRML node references), while many times the state should not logically be a string. + +A wrapper system was created, the `parameterNodeWrapper`, to allow better typed access to parameter nodes. This system allows using many Python built-ins by default, and is extensible for custom classes. + +Because the `parameterNodeWrapper` uses type annotations, automatic connections can also be made to GUI components. + +Example: + +```py +import enum +from slicer.parameterNodeWrapper import parameterNodeWrapper + +class ConversionMethods(enum.Enum): + LUMINANCE = 1 + AVERAGE = 2 + SINGLE_COMPONENT = 3 + + +@parameterNodeWrapper +class VectorToScalarVolumeParameterNode: + InputVolume: slicer.vtkMRMLVectorVolumeNode + OutputVolume: slicer.vtkMRMLScalarVolumeNode + ConversionMethod: ConversionMethods + ComponentToExtract: int + + +class VectorToScalarVolumeWidget(ScriptedLoadableModuleWidget): + def setup(self): + self._parameterNode = VectorToScalarVolumeParameterNode(self.logic.getParameterNode()) + + def updateParameterNodeFromGUI(self, caller=None, event=None): + # Modify all properties in a single batch + with slicer.util.NodeModify(self._parameterNode): + self._parameterNode.InputVolume = self.ui.inputSelector.currentNode() + self._parameterNode.OutputVolume = self.ui.outputSelector.currentNode() + self._parameterNode.ConversionMethod = self.ui.methodSelectorComboBox.currentData + self._parameterNode.ComponentToExtract = self.ui.componentsSpinBox.value + + def onApplyButton(self): + self.logic.run(self._parameterNode.InputVolume, + self._parameterNode.OutputVolume, + self._parameterNode.ConversionMethod, + self._parameterNode.ComponentToExtract) +``` + +## Objective + + + +1. Work on automatic GUI creation from a `parameterNodeWrapper`. +2. Create examples/templates/documentation on how to use the `parameterNodeWrapper` functionality. +3. Determine new features. +4. Implement new features as able. + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Added ability to use the `typing.Any` annotation in the parameter node wrapper, with some limitations. +2. Added automatic gui generation for the parameterNodeWrapper (PR coming soon). +3. Bug fixes. +4. ... + +# Illustrations + + + +# Background and References + +- [Parameter node wrapper documentation](https://slicer.readthedocs.io/en/latest/developer_guide/parameter_nodes.html) diff --git a/PW38_2023_GranCanaria/Projects/README.md b/PW38_2023_GranCanaria/Projects/README.md new file mode 100644 index 000000000..f1a87d9f8 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/README.md @@ -0,0 +1,20 @@ +# How to create a new project + +- Post questions about the project idea and team on the [Project Week forum][forum], our communication mechanism as of PW28. +- When you are ready, add a new entry in the list of **Projects** by creating a new `README.md` file in subfolder in `Projects` folder, and copying contents of [project description template][project-description-template] file into it. Step-by-step instructions for this are: + +1. Open [project description template][project-description-template] and copy its full content to the clipboard + * If the link does not work (https issues) please try [here](https://github.com/NA-MIC/ProjectWeek/blob/master/PW30_2019_GranCanaria/Projects/Template/README.md) +3. Go back to [Projects](https://github.com/NA-MIC/ProjectWeek/tree/master/PW38_2023_GranCanaria/Projects) folder on GitHub +4. Click on "Create new file" button +5. Type `YourProjectName/README.md` +6. Paste the previously copied content of project template page into your new `README.md` +7. Update at least your project's __title, key investigators, project description sections__ +8. Create a [pull request](https://help.github.com/articles/creating-a-pull-request/) with the new page +9. Note in the pull request which category you would like the project to be in +10. Once the pull request is merged, @sjh26 / Sam Horvath (or similar) will add the project to the main list +11. If you have write access to the repository and are not using a pull request, please ping @sjh26 to add the project to the main list + + +[forum]: https://discourse.slicer.org/c/community/project-week +[project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW30_2019_GranCanaria/Projects/Template/README.md diff --git a/PW38_2023_GranCanaria/Projects/RealTimeUltrasoundSegmentationAI/README.md b/PW38_2023_GranCanaria/Projects/RealTimeUltrasoundSegmentationAI/README.md new file mode 100644 index 000000000..66fe03cca --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/RealTimeUltrasoundSegmentationAI/README.md @@ -0,0 +1,102 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Real-time ultrasound AI segmentation using Tensorflow and PyTorch models + +## Key Investigators + +- María Rosa Rodríguez Luque (Universidad de Las Palmas de Gran Canaria, Spain) [on site] +- Tamas Ungi (Queen’s University, Canada) [remote] +- David García Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) [on site] +- Chris Yeung (Queen’s University, Canada) [remote] + +# Project Description + +The module "Segmentation U-Net", from extension [SlicerAIGT](https://github.com/SlicerIGT/aigt), applies deep learning models on an ultrasound image stream to generate the predicted segmentation in real time. This is shown in the following example, where it is used to detect tumour tissue (highlighted in red) on breast images. That way, we can apply a live volume reconstruction on this prediction and visualize the complete region of interest (in this case, the area of the tumour). Another instance, using spine images, is shown in (Figure 1). + +

+ +        + +

+ + +Currently, this module supports models trained with the TensorFlow ecosystem. However, in recent years, PyTorch has become an increasingly popular machine learning framework, especially in medical imaging applications (an example of this is the MONAI framework, which is based on PyTorch). + +We have developed a separate module to run the inference of PyTorch model for the segmentation of breast ultrasound images: [Breast Lesion Segmentation](https://github.com/EBATINCA/UltrasoundAI) (Figure 2). However, our module does not integrate parallel processing to enable real-time image segmentation. + +In this project, we aim to adapt the current "Segmentation U-Net" module to enable the use of models trained with both ecosystems, PyTorch and TensorFlow, for real-time ultrasound image segmentation. + +In addition, we will discuss further improvements for this module. For instance, automatically visualize the prediction overlayed on the input ultrasound image and avoid changing to different modules to activate the visualization. + +## Objective + +1. Adapt the current "Segmentation U-Net" module to support models trained with PyTorch +2. Automatically display the AI segmentation overlayed on the input ultrasound image + +## Approach and Plan + +1. Integrate a TensorFlow/PyTorch model selector, so the module would automatically use the one give by the user +1. Develop the image pre-and post-processing required by the PyTorch model +1. Record an ultrasound image stream and run the inference in real time using a PyTorch model +1. Apply the selected prediction transform on the output volume automatically + +## Progress and Next Steps + +1. The module uses the file extension to know the model framework (.h5 for TensorFlow and .pth or .pt for PyTorch) and execute the corresponding actions in each case + + + +2. We have recorded a stream from a breast ultrasound phantom where an inclusion that simulates injured tissue is shown. A PyTorch model previously trained with the [Dataset BUSI](https://www.sciencedirect.com/science/article/pii/S2352340919312181) was used to run the inference for the real-time segmentation. + +* Original stream recorded: + + + + +* Steps to run the inference and visualize the predicted segmentation with this new version of the "Segmentation U-Net" module: + + + + +* Lesion reconstruction using _Volume Reconstruction_ and _Volume Rendering_ modules: + + + +3. When we the box "Use separate process for prediction" is not checked, we automatically apply the prediction transform selected and display the AI segmentation overlayed on the input ultrasound image (as it was shown before). When this box is checked, the input stream and the prediction have different frame rate and it is more convenient to visualize the prediction in a different view, so we should make it visible manually. + + +**Next Steps** + +* Currently, it is required to define the PyTorch network and load only the trained weights. To make the module more flexible it is desired to directly load the complete model (as it is done in the case of TensorFlow). + +* The pre-and post-processings steps have been defined according to the process carried out to train the PyTorch model used in this case. However, these steps should be more generalized to work with different models. + +* PyTorch models are only supported when we use the same process for prediction (the check box is not selected), so it is necessary to improve this. + +# Illustrations +Previous work: + +

+ +            + +

+ +_**Figure 1. Real-time spine segmentation and volume reconstruction using the module "Segmentation U-Net"**_ + + + + _**Figure 2. Segmentation of breast ultrasound images using the module "Breast Lesion Segmentation"**_ + +# Background and References +This project is based on the previous **Segmentation Unet** and **Breast Lesion Segmentation** modules: +- **Segmentation Unet** admits Tensorflow models to develop the segmentation task on an ultrasound image stream: + - GitHub repository: [Segmentation Unet](https://github.com/mrluque/aigt/tree/master/SlicerExtension/LiveUltrasoundAi) module. + - The [tutorial](https://github.com/PerkLab/PerkLabBootcamp/blob/master/Doc/day2_2_SlicerIGT-U38_LiveAiRec.pptx) about how to use the previous module was shown during PerkLabBootcamp held virtually on May 24-26, 2022. + - The video tutorials of the [breast](https://youtu.be/WyscpAee3vw) and [spine](https://youtu.be/l0BcW8c9CnI) segmentation are also available. + +- **Breast Lesion Segmentation** deploy deep learning models trained in PyTorch for segmentation of 2D Ultrasound images: + - GitHub Repository: [Breast Lesion Segmentation](https://github.com/EBATINCA/UltrasoundAI) module. + +Integration of PyTorch and Slicer: +* To use a deep learning model trained in PyTorch inside Slicer, we install the [PyTorch extension](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/PyTorchIntegration/), presented during 35th Project Week held virtually June 28-July 2, 2021. diff --git a/PW38_2023_GranCanaria/Projects/Slicer4MaxillofacialSurgery/README.md b/PW38_2023_GranCanaria/Projects/Slicer4MaxillofacialSurgery/README.md new file mode 100644 index 000000000..55055d415 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/Slicer4MaxillofacialSurgery/README.md @@ -0,0 +1,79 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Maxillofacial Surgery Virtual Planning Applications based on Slicer + +## Key Investigators + +- Miguel Ángel Rodriguez-Florido, PhD (GTMA, University of Las Palmas de Gran Canaria and Children’s, Women’s and General Hospital Insular) +- Christian Buritica, MD (Children’s, Women’s and General Hospital Insular) +- Mauro Domínguez (Software Developer of BoneReconstructionPlanner) +- Thank you Sam Horvath for her help and support with Osteotomy Planner and other functionalities in Slicer. + +# Project Description + +This project proposes to study the capabilities of two extensions of Slicer (Osteotomy Planner and BoneReconstructionPlanner) to be included at the classes at the Medical School in our University and to the planning tools used for clinical maxillofacial cases. + +## Objective + +1. To know and test the features and parameters of both extensions for trying them with our own clinical requisites and datasets. +2. To detect needs to improve the capabilities of both extensions. +3. To get ability for using both extensions in real clinical cases. + +## Approach and Plan + +1. To Install and run the software with our own clinical cases, checking if [BoneReconstructionPlanner videotutorial](https://youtu.be/g9Vql5h6uHM) should be updated and executing the [automatic-tests](https://github.com/SlicerIGT/SlicerBoneReconstructionPlanner/issues/68). Later we'll play with the features of this extesion. +2. To discuss with extension’s developers the best configuration of the software for our clinical cases. +3. Beta testing of the extensions in our environment and send feedback to the community. + +## Progress and Next Steps + +1. We discussed with Sam Horvath about the Osteomy Planner extension and how to use it to get our goals. We have learnt how it works in the last release of Slicer and we learnt more general surfaces/meshes operators that can help us for teaching general concepts of the surgery technique to medical students. +2. We have checked that the BoneReconstructionPlanner extension is running at the current stable release of Slicer and we have been using it for one of our own clinical cases. +3. We worked with our own clinical case and we got some encouraging results. We can try to include this extension in the daily work of the hospital (support at clinical cases). + +# Illustrations of results + +1. We used the Osteotomy planner extension, and other functionalities of Slicer, to work with some of our own cases. We understand how the software works in order to explain the procedure's concepts to the medical students: + +

+ + + + +

+ +

+ + +

+ +Now, we can improve the use of the Slicer in our Medical School and include more features for our technological teaching. + +2. We followed the instructions of the videotutorial and we got some points where, perphaps, it could be good to include more information for new users of Slicer (more in the case of clinical users). In any case, we got the next results with our own case: + +a. 3D models segmented for the mandible and the fibula: + +

+ + +

+ +b. Fibula and mandible guides: + +

+ + +

+ +c. Final result: + +

+ +

+ +Of course, we'll print in 3D these models for teaching to our medical students. + +# Background and References + +1. Osteotomy planner: https://www.kitware.com/osteotomy-planner-2-0-release/ +2. Continue with the project: https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/MandibleReconstructionAutomaticPlanning/ diff --git a/PW38_2023_GranCanaria/Projects/SlicerActiveViewport/ActiveView.png b/PW38_2023_GranCanaria/Projects/SlicerActiveViewport/ActiveView.png new file mode 100644 index 000000000..a10dc4a99 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerActiveViewport/ActiveView.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerActiveViewport/README.md b/PW38_2023_GranCanaria/Projects/SlicerActiveViewport/README.md new file mode 100644 index 000000000..eda760e0f --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerActiveViewport/README.md @@ -0,0 +1,41 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Active Viewport + +## Key Investigators + +- Davide Punzo (Freelancer, France) +- Andras Lasso (Perk Labs, Canada) +- Anyone is welcome to join + +# Project Description + +Add the concept of active viewport in 3DSlicer + +## Objective + +1) Track the currently manipulated viewport by the user in a parameter in the Slicer logic + +2) Add a colored border to the view (2D, 3D, Plot, Table, etc...) + +## Approach and Plan + +1) Design the solution: discuss the potential use cases (i.e. keyboard/mouse focus) and then use it for example in show/hide node in the selected view (similarly to Paraview). + +2) Implement it: add implementation in CTK (e.g. QFrame around views with method to set color, thickness, style, etc... ), and the uid parameter of the selected view (shall we save it in the scene? or just in the slicer logic?). + +## Progress and Next Steps + +1) We (Sam, Steve, Andras, Davide) discussed this and the outlook of the solution design is: + - we should implement the active viewport only for mouse focus (user clicks). Although it would be nice to activate the viewport with the tab action with the keyboard focus, the user may also want to write in a dialog without losing the focus of the viewport. Therefore we would go with a solution where the viewport activation/focus is persistent. + - we should implement then in the data module an icon to activate the volume only in the active viewport (2D or 3D). Currently the visibility icon activates with propagate volume method in the slice logic in all the slices (and for example, not in the 3D). Should we do the same for markups visibility? + - for the UI painting, we can go with the idea to use a QtFrame around the view widgets (the QFrame should contain both the vtk rendering view widget and the controller widget). NOTE: Sam is working on applying QSS styles (including material ones). However at the moment there are some issues with qSlicerWidgets (it looks like the style does not get propagated for those). She plans to fix it, so we can revaluate this at any time. + - The active viewport should be stored in the scene and restored at scene loading (this avoids to redo clicks, i.e. the viewport in which the user was interacting/doing analysis would be ready at scene loading). We should save the active viewport parameter in the Layout Manager. This for several reasons: you could have a desktop and VR/AR screens with layout managers and the active viewport should be independent in those two (one for each). Already some other parameters and logic regarding to views are in the layout manager (e.g. the maximize a view to the full view layout widget). The selection node could be another option where saving the parameter, but the selection node refers more to the global status of the Application. + +2) This will be implemented later on, after the project week. + +# Illustrations +ActiveView + + +# Background and References diff --git a/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/README.md b/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/README.md new file mode 100644 index 000000000..9fb0c5454 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/README.md @@ -0,0 +1,52 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerAstro Update + +## Key Investigators + +- Davide Punzo (Freelancer, France) +- Thijs van der Hulst (Kapteyn Astronomical Institute, Netherlands) +- Anyone is welcome to join + + +# Project Description +SlicerAstro is an extension of 3D Slicer that aims to provide astronomers with a powerful and interactive visualization environment. In this environment, astronomers can analyze complex sources that automated pipelines find. These sources include interacting galaxies, tidal tails, HI filaments, and stripped galaxies. + +## Objective + +Update SlicerAstro to Slicer 5 + +## Approach and Plan + +1) Fix compilation with Slicer 5 + +2) Test under Slicer 5 and find issues (some already listed in [issues](https://github.com/Punzo/SlicerAstroApp/issues/4)) + +## Progress and Next Steps + +1) Done! + +2) Although the code now compile, many parts are broken and testing is not even possible. The main issues found are: + - The Astro sample data were hosted on the kitware server, which has been closed. Thijs has collected again the sample data, but we need to host them in github and update the links. + - the slice view factory (to customize slice view widget, astronomical WCS coordinates, etc...) seems not working or the code has to be updated. Other methods from base classes changed and needs to be updated: + - qMRMLSliceAstroControllerWidgetPrivate::init + - SlicerAstro uses in many places the old box annotation node (AnnotationROI), we need to migrate it to the new box markups node. Many methods are broken ans several features will not work: + - qSlicerAstroVolumeModuleWidget::onCalculateRMS + - all the methos in the analysis modules that uses the ROIbox to get the boundaries for the computations (statistics, masking, etc...) + - PV Diagram and PV Slice tools hangs and then crashes. This is related to modifications to the markups infrastructure from Slicer4 to Slicer5. + - 3DBarolo (galaxy modelling tool) should be updated to version 1.6 (currently there is a custom wrapped version forked from version 1.4) + - The SlicerAstroApp (SlicerCustomAppTemplate for customized binaries with SlicerAstro extension and style) was not yet tested. + + +~1-2 dev weeks would be necessary to fix all the issues and it will be done later on after the project week. After that we can do further testing and then finally restore the extension in the 3DSlicer store. + + +# Illustrations +SlicerAstro Icon ![](resized.gif) + +# Background and References +[SlicerAstro project Slides](https://docs.google.com/presentation/d/1nfBQul_XENvYHQvPe2c_DCJmSOk13eX0GEoBoHz8MFU/edit#slide=id.p1) + +[SlicerAstro repo](https://github.com/Punzo/SlicerAstro) + +[SlicerAstroApp repo](https://github.com/Punzo/SlicerAstroApp) diff --git a/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/SlicerAstroIcon.png b/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/SlicerAstroIcon.png new file mode 100644 index 000000000..d8ef1b88c Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/SlicerAstroIcon.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/WEIN069.png b/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/WEIN069.png new file mode 100644 index 000000000..93a2b8836 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/WEIN069.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/resized.gif b/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/resized.gif new file mode 100644 index 000000000..37edbabc8 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerAstroUpdate/resized.gif differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerBatchAnonymize/README.md b/PW38_2023_GranCanaria/Projects/SlicerBatchAnonymize/README.md new file mode 100644 index 000000000..69e57121a --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerBatchAnonymize/README.md @@ -0,0 +1,61 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Updating Batch Anonymizer +SlicerBatchAnonymize is a Slicer Extension that strips off metadata from dicom files, and converts them to various file formats. +The work during project week will involve investigating and creating prototypes for defacing in medical images, support and single file dicom export. + +## Key Investigators +- Hina Shah (UNC Chapel Hill) +- Juan Carolos Prieto (UNC Chapel Hill) +- Jonas Boanchi (University of Michigan) +- Lucia Cevidanes (University of Michigan) + +# Project Description + +The very first step to make any medical data available to research community is it's anonymization. [SlicerBatchAnonymize](https://github.com/hina-shah/SlicerBatchAnonymize) +is a 3D Slicer extension to anonymize a batch of DICOM images by stripping most of metadata (image information stays intact). +The tool currently provies a user-friendly UI, supports export to several popular research formats including DICOM series, and also generates a crosswalk files for future uses. + + +## Objective + + +1. Add support for exporting CBCT images to a single DICOM file. +2. Add support for keeping certain metadata fields (example: age and gender) intact during anonymization process +3. Improve current defacing algorithm + +## Approach and Plan + + +1. CBCT export to dingle file DICOM images will need some exploration into DICOM standards to be careful that correct modailities are assigned correct SOP IDs. Ask experts what is the right way to convert a multi-file DICOm images to a single file. +2. Will be using inspiration from existing metadata anonymization tools to implement "selective" metadata stripping, with initial options of keeping gender and age intact. This is per the request of our clinicians who will be the primary users of this tool. +3. Current defacing approach creates noise in the back of the head, and is not robust to intensity changes. We'll work on implementing frontal region detection, and make the algorithm robust to intensities. Community is welcome to add their own/other standard defacing algorithms in the + +## Progress and Next Steps + + + +* Suupert added for keeping age and gender intact during anonymization. +* Creating a summary report. +* Plans for CBCT anonymization created. The defacing will be evaluated using visual inspection and a survey by clinicians. + * Find the frontal face, and run anonymization on just that part. + * Make defacing robust to intensity changes through normalization. + * Retrain AMASSS. and consider adding more anatomical structures + +# Illustrations + +![image](https://user-images.githubusercontent.com/22948571/216633374-31240755-bcee-4c77-919a-ad39685ac71e.png) +SlicerBatchAnonymizeScreenshot + +image +CBCT Defacing pipeline + +image +Examples of CBCT defacint screenshots for evaluation + +# Slicer Extension link: +[SlicerBatchAnonymize Slicer extension](https://github.com/hina-shah/SlicerBatchAnonymize) + +[SlicerBatchAnonymize tutorial video](https://www.youtube.com/watch?v=2o8TInbGmRE) + +[DICOM standard guidelines for multi-frame volume generation](https://www.dicomstandard.org/docs/librariesprovider2/dicomdocuments/wp-cotent/uploads/2018/10/day1_s9-solomon-multiframe.pdf?sfvrsn=f07da5a4_2) diff --git a/PW38_2023_GranCanaria/Projects/SlicerCBM/README.md b/PW38_2023_GranCanaria/Projects/SlicerCBM/README.md new file mode 100644 index 000000000..2a70e7ffd --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerCBM/README.md @@ -0,0 +1,97 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerCBM: Computational Biophysics for Medicine in 3D Slicer + +## Key Investigators + +- Benjamin Zwick (The University of Western Australia) +- Saima Safdar (The University of Western Australia) +- Andy Huynh (The University of Western Australia) +- Gerry Gralton (The University of Western Australia) +- Adam Wittek (The University of Western Australia) +- Karol Miller (The University of Western Australia) + +# Project Description + + + +SlicerCBM is an extension for 3D Slicer that provides tools for +creating and solving computational models of biophysical systems and +processes with a focus on clinical and biomedical applications. + +## Objective + + + +1. Package SlicerCBM modules as an installable 3D Slicer extension. + +2. Improve documentation of individual modules and workflow. + +## Approach and Plan + + + +1. Complete the requirements for a new 3D Slicer extension () + +2. Add the SlicerCBM extension to the Slicer Extensions Catalog. + +## Progress and Next Steps + + + +1. Individual modules have been developed and are available on GitHub + () + +2. Checklist for submitting a new extension () + +# Illustrations + + + +![Electrical Conductivity module in SlicerCBM](Screenshot_20230130_112629.png) + +# Background and References + + + +Code repository and documentation: + +- +- + +Sample data: + +- Zwick BF, Safdar S, Bourantas GC, Joldes GR, Hyde DE, Warfield SK, + Wittek A, Miller K. Data for patient-specific solution of the + electrocorticography forward problem in deforming brain [Data + set]. Zenodo; 2022. + +Publications: + +- Safdar S, Zwick BF, Bourantas G, Joldes GR, Warfield SK, Hyde DE, + Wittek A, Miller K. Automatic Framework for Patient-Specific + Biomechanical Computations of Organ Deformation: An Epilepsy (EEG) + Case Study. In: Nielsen PMF, Nash MP, Li X, Miller K, Wittek A, + editors. Computational Biomechanics for Medicine. Cham: Springer + International Publishing; 2022. p. 75–89. + +- Zwick BF, Bourantas GC, Safdar S, Joldes GR, Hyde DE, Warfield SK, + Wittek A, Miller K. Patient-specific solution of the + electrocorticography forward problem in deforming + brain. NeuroImage. 2022;263:119649. + +- Yu Y, Safdar S, Bourantas GC, Zwick BF, Joldes GR, Kapur T, Frisken + S, Kikinis R, Nabavi A, Golby A, Wittek A, Miller K. Automatic + framework for patient-specific modelling of tumour resection-induced + brain shift. Comput Biol Med. 2022;143:105271. + +- Safdar S, Joldes GR, Zwick BF, Bourantas GC, Kikinis R, Wittek A, + Miller K. Automatic Framework for Patient-Specific Biomechanical + Computations of Organ Deformation. In: Miller K, Wittek A, Nash M, + Nielsen PMF, editors. Computational Biomechanics for Medicine. Cham: + Springer; 2021. p. 3–16. diff --git a/PW38_2023_GranCanaria/Projects/SlicerCBM/Screenshot_20230130_112629.png b/PW38_2023_GranCanaria/Projects/SlicerCBM/Screenshot_20230130_112629.png new file mode 100644 index 000000000..564c9522f Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerCBM/Screenshot_20230130_112629.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerCloud/HowToSetupAWSEC2Server.md b/PW38_2023_GranCanaria/Projects/SlicerCloud/HowToSetupAWSEC2Server.md new file mode 100644 index 000000000..67fe83707 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerCloud/HowToSetupAWSEC2Server.md @@ -0,0 +1,132 @@ +# How-to setup AWS EC2 Windows instances to use MONAILabel, deep learning tools, and 3D Slicer + +_by Rudolf Bumm (KSGR), Qing Liu (AWS), Steve Pieper (Isomics), Gang Fu (AWS)_ + + +# Benefits + +* Setup a powerful 3D Slicer instance on AWS Cloud +* Have different servers for each project +* Let coworkers remotely log on to that servers +* Share your 3D Slicer results from that server via the S3 bucket or your own 3dviewer.net instance  +* Up- or downscale your machine according to the required GPU power +* Access your EC2 machine with low-cost hardware, even tablets +* Switch off (pause), and finally, delete the instance if you do not need it and only pay for what you use + +# Things to consider + +We will be running a Windows EC2 instance.  + +Small EC2 Windows instances without GPU support can be created and run nearly free of charge on AWS. You could use an instance type "t2.small".  + +At least EC2 "g" instance types (with GPU support) will be needed to work with 3D Slicer and deep learning tools. All our testing has been on a "g5.xlarge".  + +For more information about GPU options see the [Amazon Deep Learning GPU Guide](https://docs.aws.amazon.com/dlami/latest/devguide/gpu.html). + +*Important note:* You may be running into "limit"  errors when you create and run your EC2 instance with GPU, because your Amazon account may need to get enabled for using a GPU first. The first step to increase your GPU instance quota is to request an instance using the steps below, and then if it's denied follow the instructuctions to request a quota increase. If you have an institutional email account (company or university) it may be easier to get a quota increase compared to a generic gmail or yahoo type of email account. + +The approximate cost of an EC2 instance with NVIDIA A10G support is around 1-2 $ per hour.  + +# Step 1. Deploy the CloudFormation template + +The CloudFormation template will automatically perform the following tasks when you create the EC2 instance: + +* Install the latest NVIDIA drivers +* Install git +* Install MONAILabel +* Install TotalSegmentator +* Install lungmask +* Download the 3D Slicer stable installer +* Install Firefox +* Install and connect an S3 bucket + +Go to the Amazon Console + +[https://aws.amazon.com/de/console/](https://aws.amazon.com/de/console/) + +and go to login page + +|![](https://user-images.githubusercontent.com/18140094/210726738-883715be-d8c0-4432-b78b-ef2ac8a5da35.png)| +|-| + + +Log into the AWS console, select the region you’d like to use + +|![](https://user-images.githubusercontent.com/18140094/210726739-a1f70591-3ceb-49db-b12b-4ea0c819a7f6.png)| +|-| + +In the search bar, type cloudformation, and select CloudFormation service + +|![](https://user-images.githubusercontent.com/18140094/210726732-c54e062b-3178-4dfc-84a1-c4b38d42a6aa.png)| +|-| + +* In the CloudFormation console, click Create stack + +|![](https://user-images.githubusercontent.com/18140094/210726731-9c9641a9-1f06-46b0-a59b-ffb2e98103f4.png)| +|-| + +* Download the [WIndowsServer2019-NICE-DCV.yaml file](./WindowsServer2019-NICE-DCV.yaml). +* "Upload it as a template file".  +* Click Next. + +|![](https://user-images.githubusercontent.com/18140094/210726733-c02aacd7-460a-43da-bb5d-7fb10e2972b7.png)| +|-| + +* Enter a few parameters for the stack +* Enter a name for this stack you are deploying, e.g. MONAI-Stack +* Select the instance type you’d like to use, e.g. g5.xlarge +* You can enter a name for the EC2 instance, or leave the default value +* Set the IP address of the machine that you will use to connect to the EC2 instance. To find out your IP address, you can visit [https://checkip.amazonaws.com](https://checkip.amazonaws.com) if your IP address is 1.2.3.4, please enter 1.2.3.4/32 as the parameter. *Note:* This is a security measure so that the machine can only be reached from the IP address of the client machine you are currently using. Standard AWS networking tools can be used to add other IP addresses for access after the instance is created. + +The screen should look similar to this: + +|![](https://user-images.githubusercontent.com/18140094/210726735-c46427e8-8411-4af7-b4ce-54b433605052.png)| +|-| + +* Click Next +* Accept default settings and click Next +* On the summary page, check I acknowledge that AWS CloudFormation might create IAM resources., and click +* Submit +* Status: "CREATE\_IN\_PROGRESS". It should take 15 ~ 20 mins for the stack to get deployed + +|![](https://user-images.githubusercontent.com/18140094/210744034-a4f668f6-d286-4ac3-953a-b12bec5fc022.png)| +|-| + +# Step 2. Connect to the environment + +1\. After the stack is deployed successfully, select the stack in CloudFormation console and then select Outputs tab + +|![](https://user-images.githubusercontent.com/18140094/210726734-8595cd3f-c6ee-4987-bbea-186a38747e2a.png)| +|-| + +* Set your login password. In the Outputs tab, open the SSMsessionManager link in a new browser tab +* In the new browser tab (Windows Powershell session), type net user administrator my-strong-password, +* please change my-strong-password to your own password, and hit Enter +* Go back to the Outputs tab, open DCVwebConsole link in a new browser tab (accepts the security warning) + +|![](https://user-images.githubusercontent.com/18140094/210726736-6c4a83b2-2580-4b4f-86e7-b4ead4f46082.png)| +|-| + +* In the NICE DCV login screen, enter administrator as the Username, and the previously set password as Password +* You should be able to log into the Windows EC2 instance + +|![](https://user-images.githubusercontent.com/18140094/210756064-f6ff0e14-b325-48c5-b4b7-a55cd33e6281.png)| +|-| + +speedtest.net on g5.xlarge: + +|![](https://user-images.githubusercontent.com/18140094/210758504-4605146a-a51d-4aa6-8a73-acc33079eb58.png)| +|-| + +Steam EZbench on g5.xlarge: + +|![](https://user-images.githubusercontent.com/18140094/210760154-e3b3dec3-fe44-42b9-8960-a7b753221e19.png)| +|-| + +# Step 3. Delete the environment + +* When the environment is no longer needed, you can delete all deployed resources. +* Go to the CloudFormation console, select the deployed stack, and then click Delete + +|![](https://user-images.githubusercontent.com/18140094/210726737-0adb6986-49cb-4b3e-a67e-85c9decff817.png)| +|-| diff --git a/PW38_2023_GranCanaria/Projects/SlicerCloud/README.md b/PW38_2023_GranCanaria/Projects/SlicerCloud/README.md new file mode 100644 index 000000000..e1bff24d3 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerCloud/README.md @@ -0,0 +1,55 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# How to setup and run 3D Slicer on an AWS cloud server + +## Key Investigators + +* Rudolf Bumm (KSGR) +* Steve Pieper (Isomics) +* Gang Fu (AWS) +* Qing Liu (AWS) + +# Project Description + +How to setup and run 3D Slicer on an AWS cloud server + +## Objective + +For this workshop, we want to set up a 3D Slicer EC2 AWS cloud instance that can be scaled based on hardware needs and can be used for deep learning.  + +The EC2 Windows Cloud server instance should natively run 3D Slicer and have NVIDIA GPU and CUDA support.  + +[The necessary steps are documented here](./HowToSetupAWSEC2Server.md). + +## Approach and Plan + +An AWS [CloudFormation template](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerCloud/WindowsServer2019-NICE-DCV.yaml) was designed to install  + +* 3D Slicer stable +* Firefox +* latest NVIDIA drivers +* git +* MONAILabel +* TotalSegmentator +* lungmask  +* S3 bucket + +A mechanism how to share result data between a working group will be discussed, with the option to install a 3dviewer.net server instance or use a S3 bucket. + +The speed of general system setup, up- and downscaling as well as running costs will be evaluated. + +## Progress and Next Steps + +The use of the EC2 instances was comfortable and reliable during the Project week. The performance of the g5x4large instance was a bit slower than the reference GTX 3070 Ti, although the GPU load never was on the upper limits. Up and downgrading the EC2 instance was comfortable and did not touch any component of the installed programs. The cost of running a g5.x4large GPU instance is around 2 $ per hour. The Cloudformation template proved very helpful.  + +Setting up the EC2 instance for interactive 3D purposes makes only sense if connection speeds to and from the server are around 50 Mbps.  + +# Illustrations + +![image](https://user-images.githubusercontent.com/18140094/211152360-f6e0d66b-aa84-4109-86d5-eedf404fd528.png) + +Fig. 1 Raspberry 4 Model B running 3D Slicer on an EC2 instance in Chromium browser + +# Background and References + +[Recommended GPU instances for deep learning purposes](https://docs.aws.amazon.com/dlami/latest/devguide/gpu.html) diff --git a/PW38_2023_GranCanaria/Projects/SlicerCloud/WindowsServer2019-NICE-DCV.yaml b/PW38_2023_GranCanaria/Projects/SlicerCloud/WindowsServer2019-NICE-DCV.yaml new file mode 100644 index 000000000..fe50ae7cb --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerCloud/WindowsServer2019-NICE-DCV.yaml @@ -0,0 +1,431 @@ +AWSTemplateFormatVersion: 2010-09-09 +Description: Windows Server with NICE DCV (login as administrator) + +Metadata: + License: + Description: > + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + SPDX-License-Identifier: MIT-0 + + Permission is hereby granted, free of charge, to any person obtaining a copy of this + software and associated documentation files (the "Software"), to deal in the Software + without restriction, including without limitation the rights to use, copy, modify, + merge, publish, distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, + INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + AWS::CloudFormation::Interface: + ParameterGroups: + - Label: + default: Instance type + Parameters: + - InstanceType + - Label: + default: EC2 configuration + Parameters: + - EC2Name + - Label: + default: Allowed inbound source IP prefixes to NICE DCV port 8443 + Parameters: + - IngressIPv4 + +Mappings: + AMIRegionMap: + af-south-1: + NICEDCVAMI: ami-03d47e4bac006f330 + ap-south-1: + NICEDCVAMI: ami-0e776ca6a86a5e730 + eu-north-1: + NICEDCVAMI: ami-01c816b3591d5dd97 + eu-west-3: + NICEDCVAMI: ami-0625f6a74fb6afe05 + eu-south-1: + NICEDCVAMI: ami-0ec6afec9aaece072 + eu-west-2: + NICEDCVAMI: ami-0b1a1b220220beb02 + eu-west-1: + NICEDCVAMI: ami-022508a67c81e6c28 + ap-northeast-2: + NICEDCVAMI: ami-0c31940939aa476b1 + ap-northeast-1: + NICEDCVAMI: ami-0321bbe7ef4a14e68 + me-south-1: + NICEDCVAMI: ami-0360a805f5ec40544 + ca-central-1: + NICEDCVAMI: ami-0c01afbe70f4fdf7c + sa-east-1: + NICEDCVAMI: ami-0b0c54a4745049317 + ap-east-1: + NICEDCVAMI: ami-05e4a897ad6341d4b + ap-southeast-1: + NICEDCVAMI: ami-0071215a86fc69d89 + ap-southeast-2: + NICEDCVAMI: ami-0dc354d026444eaca + eu-central-1: + NICEDCVAMI: ami-04c15076684cbc808 + us-east-1: + NICEDCVAMI: ami-0c46f9b9e1760377c + us-east-2: + NICEDCVAMI: ami-03ddf915d48800443 + us-west-1: + NICEDCVAMI: ami-092e5726b56b55e76 + us-west-2: + NICEDCVAMI: ami-0b3de9114492317cb + +Parameters: + InstanceType: + Description: Instance type ( https://aws.amazon.com/ec2/instance-types/#Accelerated_Computing ) + Type: String + Default: g4dn.xlarge + AllowedValues: + - g4dn.xlarge + - g4dn.2xlarge + - g4dn.4xlarge + - g5.xlarge + - g5.2xlarge + - g5.4xlarge + + EC2Name: + Description: Name of EC2 instance + Type: String + Default: Windows Server-NICE-DCV + IngressIPv4: + Type: String + Description: Allowed source prefix (IPv4) ( https://checkip.amazonaws.com ) + Default: 0.0.0.0/0 + +Resources: + VPC: + Type: AWS::EC2::VPC + Properties: + CidrBlock: 10.192.0.0/16 + EnableDnsSupport: true + EnableDnsHostnames: true + Tags: + - Key: StackName + Value: !Sub ${AWS::StackName} + - Key: StackId + Value: !Sub ${AWS::StackId} + - Key: Name + Value: !Sub ${AWS::StackName} - VPC + + InternetGateway: + Type: AWS::EC2::InternetGateway + Properties: + Tags: + - Key: StackName + Value: !Sub ${AWS::StackName} + - Key: StackId + Value: !Sub ${AWS::StackId} + - Key: Name + Value: !Sub ${AWS::StackName} - IGW + + InternetGatewayAttachment: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + InternetGatewayId: !Ref InternetGateway + VpcId: !Ref VPC + + PublicSubnet: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + AvailabilityZone: !Select [1, !GetAZs ""] + CidrBlock: 10.192.10.0/24 + MapPublicIpOnLaunch: True + Tags: + - Key: StackName + Value: !Sub ${AWS::StackName} + - Key: StackId + Value: !Sub ${AWS::StackId} + - Key: Name + Value: !Sub ${AWS::StackName} - Public Subnet + + PublicRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + Tags: + - Key: StackName + Value: !Sub ${AWS::StackName} + - Key: StackId + Value: !Sub ${AWS::StackId} + - Key: Name + Value: !Sub ${AWS::StackName} - Public Routes + + DefaultPublicRoute: + Type: AWS::EC2::Route + DependsOn: InternetGatewayAttachment + Properties: + RouteTableId: !Ref PublicRouteTable + DestinationCidrBlock: 0.0.0.0/0 + GatewayId: !Ref InternetGateway + + PublicSubnetRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PublicRouteTable + SubnetId: !Ref PublicSubnet + + S3GatewayEndpoint: + Type: AWS::EC2::VPCEndpoint + Properties: + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: "*" + Action: + - "s3:*" + Resource: + - "arn:aws:s3:::*" + RouteTableIds: + - !Ref PublicRouteTable + ServiceName: !Sub "com.amazonaws.${AWS::Region}.s3" + VpcId: !Ref VPC + + SecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Allow inbound DCV + VpcId: !Ref VPC + SecurityGroupIngress: + - Description: NICE DCV (IPv4) + IpProtocol: "tcp" + FromPort: "8443" + ToPort: "8443" + CidrIp: !Ref IngressIPv4 + - Description: NICE DCV QUIC (IPv4) + IpProtocol: "udp" + FromPort: "8443" + ToPort: "8443" + CidrIp: !Ref IngressIPv4 + SecurityGroupEgress: + - Description: Allow all outbound traffic (IPv4) + IpProtocol: "-1" + CidrIp: 0.0.0.0/0 + - Description: Allow all outbound traffic (IPv6) + IpProtocol: "-1" + CidrIpv6: "::/0" + Tags: + - Key: StackName + Value: !Sub ${AWS::StackName} + - Key: StackId + Value: !Sub ${AWS::StackId} + - Key: Name + Value: !Sub "[${AWS::StackName}] - ${EC2Name}" + + S3DataBucket: + Type: AWS::S3::Bucket + Properties: + PublicAccessBlockConfiguration: + BlockPublicAcls: true + BlockPublicPolicy: true + IgnorePublicAcls: true + RestrictPublicBuckets: true + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: AES256 + + InstanceIamRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Effect: Allow + Principal: + Service: [ec2.amazonaws.com] + Action: ["sts:AssumeRole"] + Path: / + Policies: + - PolicyName: dcvLicensing + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - s3:GetObject + Resource: !Sub "arn:${AWS::Partition}:s3:::dcv-license.${AWS::Region}/*" + - PolicyName: fullAccessToDataBucket + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - s3:ListBucket + - s3:GetObject + - s3:PutObject + Resource: + - !GetAtt S3DataBucket.Arn + - !Sub "${S3DataBucket.Arn}/*" + ManagedPolicyArns: + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonS3ReadOnlyAccess" + Tags: + - Key: StackName + Value: !Sub ${AWS::StackName} + - Key: StackId + Value: !Sub ${AWS::StackId} + + InstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref InstanceIamRole + + EC2Instance: + Type: AWS::EC2::Instance + CreationPolicy: + ResourceSignal: + Timeout: PT90M + Properties: + ImageId: !FindInMap [AMIRegionMap, !Ref "AWS::Region", NICEDCVAMI] + InstanceType: !Ref InstanceType + IamInstanceProfile: !Ref InstanceProfile + SubnetId: !Ref PublicSubnet + Monitoring: true + SecurityGroupIds: + - !Ref SecurityGroup + BlockDeviceMappings: + - DeviceName: /dev/sda1 + Ebs: + VolumeSize: 300 + VolumeType: gp3 + DeleteOnTermination: true + Encrypted: true + UserData: + Fn::Base64: !Sub | + + Tags: + - Key: Name + Value: !Ref EC2Name + - Key: StackName + Value: !Sub ${AWS::StackName} + - Key: StackId + Value: !Sub ${AWS::StackId} + - Key: GitHub + Value: https://github.com/aws-samples/amazon-ec2-nice-dcv-samples + + CloudWatchAlarm: + Type: 'AWS::CloudWatch::Alarm' + Properties: + AlarmName: StopInstanceAlarm + AlarmDescription: Auto-stop EC2 instance if CPU usage is below 5% for 1 hour + ComparisonOperator: LessThanThreshold + EvaluationPeriods: 12 + MetricName: CPUUtilization + Namespace: AWS/EC2 + Period: 300 + Statistic: Average + Threshold: 5 + TreatMissingData: notBreaching + AlarmActions: + - arn:aws:automate:us-east-1:ec2:stop + Dimensions: + - Name: InstanceId + Value: !Ref EC2Instance + + ElasticIP: + Type: AWS::EC2::EIP + Properties: + InstanceId: !Ref EC2Instance + Tags: + - Key: StackName + Value: !Sub ${AWS::StackName} + - Key: StackId + Value: !Sub ${AWS::StackId} + +Outputs: + InstanceID: + Description: EC2 Instance ID + Value: !Ref EC2Instance + + PublicIP: + Description: EC2 Public IP + Value: !GetAtt EC2Instance.PublicIp + + SSMsessionManager: + Description: SSM Session Manager login ("net user administrator MyStr@ngAdminPassw0rd" to change administrator password) + Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/systems-manager/session-manager/${EC2Instance}" + + DCVwebConsole: + Description: DCV web console (login as administrator) + Value: !Sub "https://${EC2Instance.PublicIp}:8443" + + DCVdownload: + Description: DCV client download + Value: https://download.nice-dcv.com + + S3DataBucket: + Description: S3 bucket for storing data + Value: !Ref S3DataBucket diff --git a/PW38_2023_GranCanaria/Projects/SlicerElastixUpdate/Picture1.png b/PW38_2023_GranCanaria/Projects/SlicerElastixUpdate/Picture1.png new file mode 100644 index 000000000..78a9a904a Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerElastixUpdate/Picture1.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerElastixUpdate/README.md b/PW38_2023_GranCanaria/Projects/SlicerElastixUpdate/README.md new file mode 100644 index 000000000..9b92dbe1f --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerElastixUpdate/README.md @@ -0,0 +1,46 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerElastix: update elastix version + +## Key Investigators + +- Simón Oxenford (Charité Berlin) +- Andras Lasso (Queen's University) + +# Project Description + + +Elastix recently released a new version [(5.1.0)](https://github.com/SuperElastix/elastix/releases/tag/5.1.0) which allows specifying an ITK file format for transformation output files file. + +## Objective + + +The idea is to update SlicerElastix to use this new version and use itk transforms throughout the module. + +## Progress and Next Steps + + + +1. Locally tested the new elastix version with itk transforms (MacOS). +1. Made a PR to the SlicerElastix repo with proposed changes. [GH PR](https://github.com/lassoan/SlicerElastix/pull/37) + +Next steps: + +1. Test build for other OSs. +1. Leverage upon the itk transform use to include other features (using initial transform). + +# Illustrations + + + +![BSpline registration output](Picture1.png) + + +# Background and References + + + +- This fixes some issues when loading Elastix BSpline transforms. [GH issue](https://github.com/lassoan/SlicerElastix/issues/33) diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicer.png b/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicer.png new file mode 100644 index 000000000..952fda03c Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicer.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicerhub_esquema.png b/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicerhub_esquema.png new file mode 100644 index 000000000..e8048a72e Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicerhub_esquema.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicerhub_esquema_2.png b/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicerhub_esquema_2.png new file mode 100644 index 000000000..cf37679e1 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerHub/3dslicerhub_esquema_2.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/README.md b/PW38_2023_GranCanaria/Projects/SlicerHub/README.md new file mode 100644 index 000000000..c48172757 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerHub/README.md @@ -0,0 +1,107 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# 3DSlicerHub + +## Key Investigators + +- Rafael Nebot (ITC - Instituto Tecnológico de Canarias) +- Paula Moreno (ITC) +- Juan Ruiz (ULPGC) +- Idafen Santana (ULPGC) +- Steve Pieper + +# Project Description + +Multiuser approach to Slicer in a browser, based on [Slicer Docker](https://github.com/pieper/SlicerDockers) + +### Main Features + +- Browser based 3D Slicer using [Slicer Docker](https://github.com/pieper/SlicerDockers), similar to AWS AppStream. +- Per-user workspace, with persistent data and configuration. +- Session control, including quick URL sharing convenience, for educational and collaborative purposes. +- Authentication using OpenLDAP. + +## Objective + + + +1. Use private clouds with GPU virtual machines. +2. Make the software configuration persistent after deleting the container. +3. Migrate from Docker+DockerCompose to Kubernetes+podman. +4. Set the size of 3DSlicer web window to fit the size of the user's screen and other novnc settings. +5. USB over IP + OpenIGTLink + Slicer in Docker. + +## Approach and Plan + + + +1. (o.1) GPU in Slicer Image: modify Slicer image to add nVidia drivers. +2. (o.1) GPU using separate MONAI Label images: analyze and design how to improve session manager to allow users to launch "pod-sets" (e.g. Slicer+MONAI, Slicer, Slicer+Orthanc, ...). +3. (o.2) Share current status of the feataure with people knowing about Slicer to fix the issue "saving config in laptop works, in VM it does not". +4. (o.3) Play with "kubernetes" package to familiariaze ourselves with the capabilities. +5. (o.3) Rewrite parts of 3d slicer hub accessing to containers to be able to work with "kubernetes" Python package. +6. (o.3) Start testing +7. (o.4) Gather information with participants knowing about websockify. +8. (o.5) Compile information about IGT protocol with participants in NAMIC. +9. (o.5) Modify design of 3dslicerhub architecture to enable IGT capabilities for Slicer containers + +## Progress and Next Steps + + + +1. Slicer in Docker with GPU. + - Approach: modify root image to one by nVidia. + - Slicer works and detects the GPU. + - But X11 server and the window manager do not start correctly + - Next: + - from AWS/GCE instance image build scripts (https://github.com/pieper/SlicerMachines) extract X11 config sections to prepare nVidia configuration. + - modify to ensure the image works on computers without GPU. + - fork original repository (https://github.com/pieper/SlicerDockers), prepare pull request. + - use as base image for SlicerHub spawner. +2. Persistence of configuration + - A bug in the preparation of Slicer.ini was detected and solved. + - A custom Slicer image is built using a Slicer.ini containing a connection to the Orthanc server of the OpenDx28 platform. +3. Migration from Docker Compose to Kubernetes + - Study of Kubernetes. Two "technology scope" adjustments: + - "podman" not ready for production, Docker used (also better for images with CUDA support). + - Use of "kubectl" instead of Python package (direct REST calls). + - Refactor current 3dslicerhub to enable switching between Container Orchestrator managers: baseline implementation, new implementation. + - Efforts adapting for K8s concepts: because of the different scopes of Docker Compose and Kubernetes, concepts do not match exactly. + - Next: + - Find ways, using kubectl and resource files to (see diagram below): + - Deploy NGINX service, using a volume containing “nginx.conf”. + - Deploy SlicerHub service, using the same volume. + - Execute "kubectl" from SlicerHub service to access its own K8s cluster. + - Continue adaptation of Container Orchestrator implementation for Kubernetes. + - Test locally. + - Deploy at Teide VMs. + +# Illustrations + +Docker Compose architecture + +K8s architecture + +Docker Compose eschema + + +Docker Compose eschema + +## Screenshots + +SlicerHub Landing Page +--> +SlicerHub login +--> +SlicerHub user page +--> +3DSlicer + + + + + + +# Background and References + + diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/dc_architecture.png b/PW38_2023_GranCanaria/Projects/SlicerHub/dc_architecture.png new file mode 100644 index 000000000..54122d212 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerHub/dc_architecture.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/k8s_architecture.png b/PW38_2023_GranCanaria/Projects/SlicerHub/k8s_architecture.png new file mode 100644 index 000000000..f46f965d1 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerHub/k8s_architecture.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/login.png b/PW38_2023_GranCanaria/Projects/SlicerHub/login.png new file mode 100644 index 000000000..dc258431b Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerHub/login.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/main.png b/PW38_2023_GranCanaria/Projects/SlicerHub/main.png new file mode 100644 index 000000000..d2165e1ba Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerHub/main.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerHub/session_manager.png b/PW38_2023_GranCanaria/Projects/SlicerHub/session_manager.png new file mode 100644 index 000000000..adf171a81 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerHub/session_manager.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerIMSTK/README.md b/PW38_2023_GranCanaria/Projects/SlicerIMSTK/README.md new file mode 100644 index 000000000..5e7a0be41 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerIMSTK/README.md @@ -0,0 +1,52 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer + IMSTK for low cost training setups + +## Key Investigators + +- Sam Horvath (Kitware) +- Kevin Cleay (CNMC) +- Karun Sharma (CNMC) + +# Project Description + + + +This is an on-going project to develop a kidney biopsy trainer based on 3D Slicer. The trainer uses the iMSTK library to simulate force interactions between the 3D models in Slicer and the physical trainer hardware. + +## Objective + + + +1. Upgrade the existing demo to newest 3D Slicer +1. Upgrade the existing demo to newest iMSTK (simulation library) + +## Approach and Plan + + + +1. Rewrite the main UI using the new ParameterNode infrastructure. + +## Progress and Next Steps + + + +1. Didn't get much done on the acutal coding. +1. Talked to a lot of people who were interested in the IMSTK package +1. ... + +# Illustrations + + +## Demo version of trainer +![Demo version of trainer](UsingTrainer.png) + +# Background and References + + + +- [SlicerIMSTK Extension](https://github.com/KitwareMedical/SlicerIMSTK) +- [IMSTk](https://www.imstk.org/) diff --git a/PW38_2023_GranCanaria/Projects/SlicerIMSTK/UsingTrainer.png b/PW38_2023_GranCanaria/Projects/SlicerIMSTK/UsingTrainer.png new file mode 100644 index 000000000..1c38e535b Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerIMSTK/UsingTrainer.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerLiver/README.md b/PW38_2023_GranCanaria/Projects/SlicerLiver/README.md new file mode 100644 index 000000000..12060b146 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerLiver/README.md @@ -0,0 +1,89 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Slicer-Liver + +## Key Investigators + +- Rafael Palomar (Oslo Unviersity Hospital/NTNU, Norway) +- Gabriella d'Albenzio (Oslo University Hospital, Norway) +- Ruoyan Meng (NTNU, Norway) +- Ole Vegard Solberg (SINTEF, Norway) +- Geir Arne Tangen (SINTEF, Norway) + +# Project Description + +This project will continue the development of the *Slicer-Liver* extension +that will be developed through the [ALive project](https://alive-research.no). +The objective of the Slicer-Liver extension is to provide researchers +with tools to perform liver analytics towards planning of liver interventions +(resections, ablations). + +## Objectives + + - Liver resection planning: + + 1. *Integration of resection contours* : introduce a new resection surface generated from a curved contour; this implies less interactions with the 3D Models. + 2. *Real-time 2D resection risk maps* : extract functional information in real time from the 3D resection surface and then map it onto a 2D map with intuitive and detailed information for surgical risk assessment and planning decisions. + + - Testing: + + 1. Improve testing infrastructure of the project + +## Approach and Plan + +For this Project week we will build on the advances obtained in the las project +week. Some of the objectives are based on new functionality that has been tested +but not integrated yet, while some other objectives are refinement of +functionality previusly integrated in [Slicer-Liver +PW36](https://github.com/NA-MIC/ProjectWeek/tree/master/PW37_2022_Virtual/Projects/Slicer-Liver + +## Illustrations + +

+ + + +

+ +

+ + + +

+ +# Resection from curved contour +We create a new resection surface generated directly from a curved contour; this implies fewer interactions with the 3D Models and quicker planning. + +resection + +# Resectogram +We create Real-time 2D resection risk map: which extracts functional information in real-time from the intersection between the 3D resection surface and liver model and then maps it onto a 2D map, resulting in quicker risk analysis with less cognitive cost while planning the surgery. + +resectogram + +## Lung Surgery Planning with Rudolf Bumm +We also tried to extend our user scenario from liver resection to lung resection. With the help of Dr. Rudolf, we plan a lung surgery case using Slicer liver extension. + +

+ lung-2D + lung-3D +

+ + +## Progress and Next Steps + +In this project week, we have changed the user interaction to use segmentations instead of models, which greatly simplifies the user interaction. + +There is a PR for adding this extension to the extension manager. This be effective when we prepare a tutorial video on the use of the extension. + +There are still standard features (e.g., volumetry computation) and new research features (e.g, risk maps visualization, new planning algorithms) that we would like to implement in future Project Weeks. + + +# Background and References +1. [Slicer-Liver PW37](https://github.com/NA-MIC/ProjectWeek/tree/master/PW37_2022_Virtual/Projects/Slicer-Liver) (July 2022) +1. [Slicer-Liver PW36](https://github.com/NA-MIC/ProjectWeek/tree/master/PW36_2022_Virtual/Projects/Slicer-Liver) (January 2022) +1. [Slicer-Liver PW35](https://github.com/NA-MIC/ProjectWeek/tree/master/PW35_2021_Virtual/Projects/Slicer-Liver) (June 2021) +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/NorMIT-Plan/) (january 2020) +1. [NorMIT-Plan at NA-MIC project week](https://projectweek.na-mic.org/PW34_2020_Virtual/Projects/SlicerLiverAnalysis/) (December 2020) +1. Palomar, Rafael, et al. "A novel method for planning liver resections using deformable Bézier surfaces and distance maps." Computer Methods and Programs in Biomedicine 144 (2017): 135-45. +1. Palomar, Rafael, et al. "Surface reconstruction for planning and navigation of liver resections." Computerized Medical Imaging and Graphics 53 (2016): 30-42. diff --git a/PW38_2023_GranCanaria/Projects/SlicerLiver/bezier_surface_markup.png b/PW38_2023_GranCanaria/Projects/SlicerLiver/bezier_surface_markup.png new file mode 100644 index 000000000..9d1fdb191 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerLiver/bezier_surface_markup.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerLiver/resection_initialization.png b/PW38_2023_GranCanaria/Projects/SlicerLiver/resection_initialization.png new file mode 100644 index 000000000..dcd1f430f Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerLiver/resection_initialization.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerLiver/resection_planning.png b/PW38_2023_GranCanaria/Projects/SlicerLiver/resection_planning.png new file mode 100644 index 000000000..987f1d8f0 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerLiver/resection_planning.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerLiver/screenshot.png b/PW38_2023_GranCanaria/Projects/SlicerLiver/screenshot.png new file mode 100644 index 000000000..5310aa739 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerLiver/screenshot.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerMeasurementPanel/README.md b/PW38_2023_GranCanaria/Projects/SlicerMeasurementPanel/README.md new file mode 100644 index 000000000..515a027a8 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerMeasurementPanel/README.md @@ -0,0 +1,35 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Measurement Panel + +## Key Investigators + +- Davide Punzo (Freelancer, France) +- Andras Lasso (Perk Labs, Canada) +- Anyone is welcome to join + +# Project Description + +Add a custom simple widget for visualization and control of measurements (markups) + +## Objective + +Integrate the [PR](https://github.com/Slicer/Slicer/pull/6662) in Slicer core + +## Approach and Plan + +1) Get feedback + +2) Finish the PR review + +## Progress and Next Steps + +1) Done. UI modifications based on Sara, Steve and Sam feedback have been applied + +2) in progress + +# Illustrations +Panel + +# Background and References +[Pull Request](https://github.com/Slicer/Slicer/pull/6662) diff --git a/PW38_2023_GranCanaria/Projects/SlicerMeasurementPanel/simpleMeasurementPanel.png b/PW38_2023_GranCanaria/Projects/SlicerMeasurementPanel/simpleMeasurementPanel.png new file mode 100644 index 000000000..466ea58ce Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerMeasurementPanel/simpleMeasurementPanel.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerPipelines/AutogeneratedPipelineUI.png b/PW38_2023_GranCanaria/Projects/SlicerPipelines/AutogeneratedPipelineUI.png new file mode 100644 index 000000000..4feccd44e Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerPipelines/AutogeneratedPipelineUI.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerPipelines/NotionalPipelineUI.png b/PW38_2023_GranCanaria/Projects/SlicerPipelines/NotionalPipelineUI.png new file mode 100644 index 000000000..291071feb Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerPipelines/NotionalPipelineUI.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerPipelines/README.md b/PW38_2023_GranCanaria/Projects/SlicerPipelines/README.md new file mode 100644 index 000000000..374d27e2d --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerPipelines/README.md @@ -0,0 +1,58 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# SlicerPipelines + +## Key Investigators + +- Connor Bowley (Kitware, USA) +- Sam Horvath (Kitware, USA) + +# Project Description + +Back in Project week 36, the SlicerPipelines extension was developed to allow GUI creation of simple modules, known as pipelines, in 3D Slicer. An initial set of pipelines were added which could be composed to make new functionality. + +This implementation had a number of limitations, most prominent being a pipeline could only have a single MRML node as input and a single MRML node as output. + +This project aims to address the limitations of the current PipelineCreator module by allowing multiple input, multiple output of pipelines, as well allowing cross step connections. + +## Objective + + + +1. Allow multiple inputs to a pipeline (MRML node or other objects). +1. Allow multiple outputs from a pipeline. +1. Make current non-pipeline modules easier to put into the pipeline system. + +## Approach and Plan + + + +1. Refactor the current way pipelines are described to be simpler. +1. Use pieces from the `parameterNodeWrapper` to simplify implementation. + 1. The `parameterPack`s can be used to allow structured output of a pipeline (which is analogous to multiple output) +1. Make a minimum usable GUI for creation of new pipelines via composition of existing pipelines. + +## Progress and Next Steps + + + +1. Collaborate on user interfaces. +2. Impelement PipelineCreator logic for making a new pipeline with multiple input nodes. +3. Work on user interface implementation. + +# Illustrations + +Notional pipeline creator UI + +![Notional pipeline creator UI](NotionalPipelineUI.png) + +Autogenerated pipeline UI for test pipeline + +![Autogenerated pipeline UI](AutogeneratedPipelineUI.png) + +# Background and References + +- [Previous Project page](https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/SlicerPipelines/) +- [Discourse post on SlicerPipelines](https://discourse.slicer.org/t/pipelines-in-3d-slicer/20107) +- [Main Github Repository](https://github.com/KitwareMedical/SlicerPipelines) +- [Github fork actively being developed](https://github.com/Connor-Bowley/SlicerPipelines) diff --git a/PW38_2023_GranCanaria/Projects/SlicerQSS/README.md b/PW38_2023_GranCanaria/Projects/SlicerQSS/README.md new file mode 100644 index 000000000..95c7cfc80 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerQSS/README.md @@ -0,0 +1,80 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Transitioning 3D Slicer to QSS Styling + +## Key Investigators + +- Sam Horvath (Kitware) +- J-Christophe Fillion-Robin (Kitware) +- Connor Bowley (Kitware) +- Andras Lasso (Queens) +- Steve Pieper (Isomics) +- Thibault Pelletier + +# Project Description + + +Currently 3D Slicer support both QStyle based styling and QtStylesheets. To improve custom apps and user experience, we woud like to move all styling to QSS. + +## Objective + + + + +1. Allow users to modify theme colors / add new themes +1. Support the existing Slicer Dark / Light themes through QSS + +## Approach and Plan + + + +1. Create QSS files for the existing Slicer Dark / Light themes +1. Integrate qt-material package to support Material styles through python +1. Create a SlicerThemes (?) extension which pulls in qt-material and add glue code + +## Progress and Next Steps + + + +1. [Forked qt-material](https://github.com/sjh26/qt-material/tree/slicer-compat) to address Slicer specific Python interface. +1. Created [SlicerThemes](https://github.com/sjh26/SlicerThemes) extension + 1. Manages installation of Slicer-specific version of qt-material + 1. Allows for saving and loading of custom color theme files + 1. Provides QSS templates for styles +1. Created QSS templates for "Classic" and "Material" Slicer themes + 1. These are still WIP +1. Next steps + 1. Allowing for loading of user provided templates in the extension + 1. Compile list of changes to core code (ctk and Slicer) to allow styles to be set correctly from QSS + +### Core code changes TBD +1. ctkConsole needs tweaks to respect QSS property settings for console colors +1. Slice controllers needs work to prevent icons from disappearing +1. Icon sets should be updated to a Material style + + +# Illustrations + + + +## Initial work + +### qt-material package out of the box w / light blue theme +![qt-material no tweaks](qt-material-box.png) + +### qt-material package with some manual tweaks to theme file +![qt-material with some tweaks](qt-mat-tweaks.png) + +## Project week results + + ![Classic Slicer Light](light-classic.png) ![Classic Slicer Dark](dark-classic.png) + ![Material Slicer Dark](dark-new.png) ![Custom colors](custom-app.png) + +# Background and References + + +1. [Discourse post](https://discourse.slicer.org/t/buttons-need-color/27181/11) +2. [qt-material](https://github.com/UN-GCPDS/qt-material) diff --git a/PW38_2023_GranCanaria/Projects/SlicerQSS/custom-app.png b/PW38_2023_GranCanaria/Projects/SlicerQSS/custom-app.png new file mode 100644 index 000000000..59b1e8a27 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerQSS/custom-app.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerQSS/dark-classic.png b/PW38_2023_GranCanaria/Projects/SlicerQSS/dark-classic.png new file mode 100644 index 000000000..9aaad4055 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerQSS/dark-classic.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerQSS/dark-new.png b/PW38_2023_GranCanaria/Projects/SlicerQSS/dark-new.png new file mode 100644 index 000000000..919524650 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerQSS/dark-new.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerQSS/light-classic.png b/PW38_2023_GranCanaria/Projects/SlicerQSS/light-classic.png new file mode 100644 index 000000000..3dbdce9fb Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerQSS/light-classic.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerQSS/qt-mat-tweaks.png b/PW38_2023_GranCanaria/Projects/SlicerQSS/qt-mat-tweaks.png new file mode 100644 index 000000000..29ce00780 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerQSS/qt-mat-tweaks.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerQSS/qt-material-box.png b/PW38_2023_GranCanaria/Projects/SlicerQSS/qt-material-box.png new file mode 100644 index 000000000..b5bc897cd Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerQSS/qt-material-box.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerTMS/README.md b/PW38_2023_GranCanaria/Projects/SlicerTMS/README.md new file mode 100644 index 000000000..d6858ce3c --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerTMS/README.md @@ -0,0 +1,78 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Real-time visualization for transcranial magnetic stimulation (TMS) + +## Key Investigators + +- Loraine Franke (University of Massachusetts Boston) +- Jax Luo (BWH & Harvard Medical School) +- Yogesh Rathi (BWH & Harvard Medical School) +- Lipeng Ning (BWH & Harvard Medical School) +- Steve Pieper (Isomics, Inc.) +- Daniel Haehn (University of Massachusetts Boston) + +# Project Description + +Transcranial magnetic stimulation is a nonivasive procedure used for treating depression with magnetic and electric fields to stimulate nerve cells. +A TMS coil is slowly moved over the subject's head suface to target certain areas in the brain. +Our project aims to develop a deep-learning powered software for real-time E-Field prediction and a visualization of TMS within 3D Slicer. + +## Objective + +Real-time visualization of an electric field (E-field) for transcranial magnetic stimulation (TMS) on the brain surface, visualization through an AR app (over browser). + +## Approach and Plan +What is done so far: +1. We created a TMS module in Slicer mapping NifTi file onto brain mesh with 3D TMS coil that can be moved by the user. +2. OpenIGTLinkIF is used to transfer data (E-Field from TMS) into 3D Slicer +3. Connected 3DSlicer to the web browser using our newly implemented secure WebSocket from https://github.com/liampaulhus/slicerWebWSS-WIP +4. Mobile device via WebXR connected and we can control the coil inside 3DSlicer. +5. We have integrated a deep learning model (CNN) inside our SlicerTMS module. We receive real time updates of newly generated Nifti files via the OpenIGTlink Plugin. The current deep learning model predicts the TMS E-field. We visualized this field with the magnetic field of the coil in the correct position on the brain mesh. +6. Beside the brain surface, we can visualize the E-Field on tractography fiber bundles. We have integrated the Fiber Bundle selection with an ROI attached to the TMS coil with the SlicerDMRI module. + +## Progress and Next Steps + +1. We improved the performance of the Fiber ROI selection by downsampling the fibers (see demo below). +2. Fixed CUDA bug of the neural network model that generates the Nifti files to be visualized. + + +## Illustrations + + +#### Current Visualization of the TMS Module in 3DSlicer with Coil and.... +#### Mapping of E-field on tractography with ROI selection: + +SlicerTMS Module with Efield mapped on fiber tracts + +#### Mapping of E-field on brain surface: + +SlicerTMS Module with Efield mapped on brain + + +# Background and References + + + +## Infos for running WebXR: + +Phones need a Depth sensor to run AR/VR. A list of supported devices can be found here: https://developers.google.com/ar/devices + +On an Android Phone via USB: +- PlayStore: Download Google VR Services and Google AR Services App +- Update Chrome/Camera apps etc. +- On the phone: Enable Developer tools (https://developer.android.com/studio/debug/dev-options) and USB debugging (description here: https://developer.chrome.com/docs/devtools/remote-debugging/) +- Run chrome://inspect#devices in the browser on your computer and it should detect USB connected devices + +For iPhone: +- Mozilla offers a WebXR Emulator that can be downloaded from the Apple Store for any iPhone and iPad: https://labs.mozilla.org/projects/webxr-viewer/ + +## For the full SlicerTMS Module and instructions see our [repository](https://github.com/lorifranke/SlicerTMS) + +## Also see previous project week [PW 37](https://github.com/NA-MIC/ProjectWeek/tree/master/PW37_2022_Virtual/Projects/SlicerTMS) + + + +The main controller interactions in SlicerVR have been broken for about a year, some interaction types even longer. It would be crucial for keeping SlicerVR usable to make the interactions work again. + +Kitware and Robarts (Jean-Christophe Fillion Robin, Lucas Gandel, Sankhesh Jhaveri, Adam Rankin) have been investing resources and effort in rehauling the AR/VR backend in VTK for a while, thus now we have a new OpenXR backend and restructured libraries SlicerVR is built on. The goal is to give a small push to their efforts in terms of SlicerVR interactions during the project week, towards restoring at least the previous feature set. + +## Objective + + + +1. Fix the main controller interactions + * Flying (joystick forward-backward) + * Grab and move objects (trigger down and move) + * Two-controller world move/zoom (i.e. 3D pinch) +2. Customization of controller buttons. Either via the + * Method in-place (functions integrated [here](https://github.com/KitwareMedical/SlicerVirtualReality/pull/87), see also [here](https://github.com/KitwareMedical/SlicerVirtualReality/pull/83)) + * Json manifest files (see [here](https://github.com/Kitware/VTK/tree/master/Rendering/OpenVR)) + +## Approach and Plan + + + +1. Continuous coordination with Kitware / Robarts about our potential to help during the week + * Make sure the absolutely necessary core VTK changes are in place by the start of the week (RecognizeComplexGesture virtual etc.) +3. Set up VR workstations at the hotel to be able to test and develop +4. Make sure event pipeline reaches the SlicerVR functions handling the events +5. Fix interactions: fly, grab, pinch3D + +## Progress and Next Steps + + + +1. Restored support for complex gesture (grip) through [SlicerVirtualReality#109](https://github.com/KitwareMedical/SlicerVirtualReality/pull/109) and VTK [MR-9892](https://gitlab.kitware.com/vtk/vtk/-/merge_requests/9892) +1. Restored partial support for interactions like `Select3DEvent` and `ViewerMovement3DEvent` through [SlicerVirtualReality#108](https://github.com/KitwareMedical/SlicerVirtualReality/pull/108) +1. Cherry-picked said commits, built, and confirmed that the fly and grab&move features worked +1. Confirmed that the actions manifest works on the HP Reverb 2 headset +1. Started work towards restoring the "Pinch 3D" complex gesture (saving initial physical to world transform and manipulate world as the controllers move) +1. Created coarse UML model of the relationship between SlicerVR and VTK VR to help troubleshoot the module. + +![Class diagram SlicerVR vs VTK](slicer-vr-class-diagram-2.png) + +# Illustrations + + + +https://user-images.githubusercontent.com/1325980/216611394-4e46ff9a-21b1-4e96-8f0b-48f81da5866a.mp4 + +[![SlicerVR example](https://i.ytimg.com/an_webp/F_UBoE4FaoY/mqdefault_6s.webp?du=3000&sqp=CN7D_50G&rs=AOn4CLDzwAi5yXSmiMEkmmgMkmwYpQJY3Q)](https://www.youtube.com/watch?v=F_UBoE4FaoY&t=153s&ab_channel=PerkLabResearch) + +[![SlicerVR collaborative example](https://i.ytimg.com/an_webp/Sw3JyKfvW6Q/mqdefault_6s.webp?du=3000&sqp=CJy8_50G&rs=AOn4CLDkH1pgzs3NCJqno3cJrc5lz8Oq-Q)](https://www.youtube.com/watch?v=Sw3JyKfvW6Q&ab_channel=EbatincaS.L.) + +![In-VR widget example](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/SlicerVRInfrastructure/VRWidget.gif) + +# Background and References + + + +Past project week pages +* [Project week #37 page](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/SlicerVRInfrastructure) +* [Project week #35 page](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/SlicerVR/) +* [Project week #34 page](https://projectweek.na-mic.org/PW34_2020_Virtual/Projects/SlicerVR/) + +Pinter, C., Lasso, A., Choueib, S., Asselin, M., Fillion-Robin, J. C., Vimort, J. B., Martin, K., Jolley, M. A. & Fichtinger, G. (2020). SlicerVR for Medical Intervention Training and Planning in Immersive Virtual Reality. IEEE Transactions on Medical Robotics and Bionics, vol. 2, no. 2, pp. 108-117, May 2020, doi: 10.1109/TMRB.2020.2983199. diff --git a/PW38_2023_GranCanaria/Projects/SlicerVRInteractions/slicer-vr-class-diagram-2.png b/PW38_2023_GranCanaria/Projects/SlicerVRInteractions/slicer-vr-class-diagram-2.png new file mode 100644 index 000000000..1bd4acd28 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerVRInteractions/slicer-vr-class-diagram-2.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/CTKDICOMQueryRetrievePanel.png b/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/CTKDICOMQueryRetrievePanel.png new file mode 100644 index 000000000..f47fc4d7c Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/CTKDICOMQueryRetrievePanel.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/PrototypeDICOMQueryRetrievePanel.png b/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/PrototypeDICOMQueryRetrievePanel.png new file mode 100644 index 000000000..4a324d4db Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/PrototypeDICOMQueryRetrievePanel.png differ diff --git a/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/README.md b/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/README.md new file mode 100644 index 000000000..376a20fd8 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/README.md @@ -0,0 +1,75 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Visual DICOM browser + +## Key Investigators + +- Davide Punzo (Freelancer, France) +- Andras Lasso (Perk Labs, Canada) +- Gabriel Kwiecinski Antunes (WebKriativa, Brazil) +- Ralf Floca (German Cancer Research Center, Germany; remote) +- Anyone is welcome to join + +# Project Description + +Implement a visual DICOM browser with thumbnails and query/retrieve/store capabilities for DICOM databases. + +## Objective + +Have a simple user-friendly interface to query/retrieve/store from a DICOM server. +The user interface would render series thumbnails so the users can easily navigate the DICOM database +(see illustrations for a first UI Design prototype). +Moreover, perfomance needs to be boosted as much as possible: + +1) fetch metadata only when strictly required (e.g. get series metadata only when the user clicks and opens a study item of the list) + +2) the fetch should be performed in async with Qthreads/workers and parallelized. + + +## Approach and Plan + +1) Get feedback: ask feedback on the UI prototype (e.g., Osirix, MITK, Weasis, cloud UIs) + +2) Design the solution + +3) Start implementation + +## Progress and Next Steps + +1) Meeting done on Tuesday (31/01/2023) + +2) Design: We will proceed with the pure C++ CTK implementation + - (A) we will implement the logic and UI in the self-contained CTK components: + - UI: display list of studies per patient with thumbnails. Show server and local content together (server content will have a small cloud icon) + - for avoing any bottleneck, we will optimize the fecthing of metadata and data by downloading only when strictly required + - we will get only few attributes at study level (StudyID, Study Description, Patient Birth Date and Patient Sex) when querying with a Patient Name or MRN + - once a Patient is selected, and the user has the list of the studies + - for each study item, we will download only few attributes from the series metadata and instances that we need for the thumbnails (Series Descriptin, Modality, Rows, Columns, and the "middle" instance Data fro the thumbnail rendering) + - improvement of the networking API: + - methods to retrieve instances metadata and DICOM files + - allow to query study and series metadata separately + - (B) we will implement async/parallel query/retrieve/store capabilities. This should be implemented with QThreads and workers with a pool manager where each task has a priority (similar to the cornerstoneWADOLoader). The infrastructure will also start donwloading Metadata and Data as soon as the user select a Patient in background. However, users' clicks on the UI will give high priority to tasks (e.g. opening a studyItem), while background tasks will have a lower priority. This will enhache greatly the user experience since we will have a response-like UI on the style of Web applications (e.g. OHIF) + - (C) we will design/implement how to stream/load data into Slicer (into the volumeNode) in an asynch way from the localDICOMDatabase. The pool manager in CTK could have signals to tell Slicer to grab stacks of downloaded Frames and Slicer can starts filling the ImageData in chuncks + - NOTE I: experiment with websocket from Marco Nolden https://github.com/nolden/CTK/commit/16ee8d0773ce37290636000d836ad107b4526085 + - NOTE J: web project from Stefan Denner ([link](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/KaapanaFastViewingAndTaggingOfDICOMImages/)). This is very nice and we could use this and comunicate between javascript/C++. However the project uses cornerstone -> dicomwebclient and our requirement is that the solution has to work for any server (not only dicomweb servers). + - NOTE K: the logic to define which is the instance to use for rendering the series thumbnail is still unclear. In the prototype we are sorting the instances using the frame number and using the "middle" one. However this is not reliable. We asked David Clunie and the only way it is to compute the order from the Image Position Patient and Orientation and for 4D datasets we need to use also the time informations. + +3) Progress: + - I have started adding the missing features in CTK: querying/retrieving at instances level, separating the query of attributes at the level of studies and series. + - Next stesps are: + - redo the UI designed in Python for the prototype in C++ (CTK) + - implement the async/parallel priority pool manager for query/retrieve/store from/to localDICOMDatabase and server + - stream the the data from the localDICOMDatabase to 3DSlicer into volumeNodes + +# Illustrations +Prototype: + + + +Current CTK Query Retrieve Panel: + +CTKDICOMQueryRetrievePanel + + +# Background and References +[CTK class](https://github.com/commontk/CTK/blob/master/Libs/DICOM/Widgets/ctkDICOMQueryRetrieveWidget.cpp) diff --git a/PW38_2023_GranCanaria/Projects/SystoleOS/README.md b/PW38_2023_GranCanaria/Projects/SystoleOS/README.md new file mode 100644 index 000000000..932411cdf --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/SystoleOS/README.md @@ -0,0 +1,193 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Systole OS: an operating system for development/deployment of medical devices. + +## Key Investigators + +- Rafael Palomar (Oslo University Hospital and NTNU, Norway) +- Steve Pieper (Isomics Inc., Cambridge, MA, USA) + +# Project Description + + + +For more than a decade, 3D Slicer has been enabling world-class biomedical +research. The great success of 3D Slicer is now pushing the boundaries of +research, making some research groups and companies regard 3D Slicer as a viable +software for building medical devices that not only could support regular +clinical workflows but also become commercial products. While the development of +3D Slicer has been tailored towards research, its modular architecture makes the +development of industrial prototypes possible. + +The vision of Systole OS is the integration of 3D Slicer and related software (e.g, +Plus Toolkit, MONAI Label and more!) in a free and open-source operating system +based on GNU/Linux, with the aim to support the development and deployment of +medical devices. + +![Systole](systole.png) + +Here are some of the features that we would like to leverage in +Systole OS: + +### Cutting-edge software + +Systole OS is based on Gentoo Linux, which follows a rolling-release model +providing up-to-date support out of box. + +### Installable Slicer... + +Slicer, together with all the required dependencies will be installable with a simple +command (e.g., `emerge sci-medical/slicer`). No SuperBuild! No Slicer-Launcher! + +### ...and Modular Slicer + +The base installation of 3D Slicer will include only the components needed to +run the application (e.g., `emerge slicer-modules/models` can be done +separately). + +### Source-based + +Systole OS is a **source-based** distribution, which means that all packages +will be built from source. Having the flexibility to make decisions on +compile-time allows: + + - Tighter hardware integration + - Highly configurable packages (e.g., `flaggie sci-medical/slicer -python +opencl; emerge sci-libs/slicer` will install Slicer without python support and with opencl support) + - Portability to hardware architectures other than amd64 (e.g., arm, risc-v). + +### Extensible + +Systole OS works on the Gentoo overlay system which allows you to extend the +system with your own ovelay or override packages provided by Systole. + +## Objectives + +The main objective for PW38 is the consolidation of the development achieved in [PW37](https://github.com/NA-MIC/ProjectWeek/tree/master/PW37_2022_Virtual/Projects/SystoleOS) in Virtual Machines and Containers; this will enable researchers and developers to test the system and contribute to its development. This objective includes the generation of documentation to get started with the project. + +As secondary objectives we aim to continue advancing on the integration of 3D Slicer: + + - Enabling Python support + - Porting scripted modules + +as well as the integration of allied technologies: + + - Plus Toolkit + - MONAI Label + - Total Segmentator + +## Approach and Plan + +1. Project discussion +2. Release of the SystoleOS development VMs and Containers +2. Documentation on how to get started with SystoleOS (gentoo-overlay, containers, VMs) + +## Progress and Next Steps + +### Systole Overlay progress PW38 + +```txt +Systole Overlay +├── dev-libs +│   └── qRestAPI #OK +├── dev-python +│   └── PythonQt_CTK #Needs update +├── metadata +├── profiles +├── sci-libs +│   ├── itk #OK +│   ├── SimpleITK #Needs update +│   ├── vtk #OK +│   └── vtkAddon #OK +├── sci-medical +│   ├── ctk #OK +│   ├── CTKAppLauncherLib #OK +│   ├── elastix #Needs update +│   ├── gdcm #OK +│   ├── Slicer #Work-in-progress +│   ├── teem #OK +│   └── vmtk #Needs update +├── Slicer-CLI #Needs update +│   ├── ACPCTransform | +│   ├── AddScalarVolumes | +│   ├── ExtractSkeleton | +│   ├── ModelMaker | +│   └── SlicerExecutionModel | +├── Slicer-Loadable +│   ├── Annotations #Remove +│   ├── Cameras #Needs update +│   ├── Colors | +│   ├── Data | +│   ├── DoubleArrays | +│   ├── Markups | +│   ├── Models | +│   ├── Plots | +│   ├── Reformat | +│   ├── SceneViews | +│   ├── Segmentations | +│   ├── SlicerWelcome | +│   ├── SubjectHierarchy #Work-in-progress +│   ├── Tables | +│   ├── Terminologies #OK +│   ├── Texts #Needs update +│   ├── Transforms | +│   ├── Units | +│   ├── ViewControllers | +│   ├── VolumeRendering | +│   └── Volumes | +├── Slicer-Scripted #Needs update +│   ├── DataProbe | +│   ├── Editor | +│   ├── EditorLib | +│   ├── Endoscopy | +│   ├── LabelStatistics | +│   ├── PerformanceTests | +│   ├── SampleData | +│   ├── ScreenCapture | +│   └── SegmentEditor | +└── x11-libs #OK + └── gdk-pixbuf +``` + +### Development environments + +![SystoleOS utility boxes and development environments](systoleos_dev_envs.png) + +### Contributing back to Slicer + +Some of the work done for SystoleOS can be used to improve the Slicer code base and help on the future modularization of Slicer. We will coordinate with the Slicer core developers to include as much as possible in Slicer. + +```txt +baea9d1dc3 * origin/Systole-patches/Slicer ENH: Provide an install version ov vtkSlicerConfigure.h +11e3802dcc * ENH: Fix qt-loadable-modules installation dirs +016d426ec2 * ENH: Enable installation of SLicerBase header files +f242cb635c * ENH: Update SlicerInstallConfig +138aaed6e3 * ENH: Add templates infrastructure +de7ff279d1 * ENH: Installation and setup qSlicerExport.h.in +2dca449c7b * ENH: Remove extension/launcher cmake code from UseSlicer.cmake +97ba113555 * ENH: Add vtkAddon as a requirement in UseSlicer.cmake +27be7571e8 * ENH: Add CTK as requirement in UseSlicer.cmake +3e8d0cbaf6 * ENH: Make installed CMake files available +c49f8f1dff * ENH: Generate and Install SlicerConfig (install tree) +3781a3682c * ENH: Add variable install dirs for Libs dev files +01f68e9856 * ENH: Use slicer installation dirs for base dev components +ec5d1bc8e3 * ENH: Use CMake GNUInstallDirs in Slicer directories +63ff5e26b9 * ENH: Remove the 'App-real' suffix from Slicer executable +feb34841e8 * ENH: Install testing data only with testing support +5d8961c414 * ENH: Limit CPack on non-superbuild mode +e0cebde590 * ENH: Remove conditional code for old VTK +fc6ff2f72f * ENH: Make optional the use of Slicer ITK +cb2a0161ce * COMP: Adapt to new qRestAPI cmake +ae6e0617bd * COMP: Find Eigen (required) +70dc2afdb2 * COMP: Add vtk CommonSystem component as requirement +``` + +For reference, here is a discussion where this support can be used for the generation of a GNU/Linux Flatpak: https://discourse.slicer.org/t/interest-to-create-flatpak-for-3d-slicer-have-issue-with-guisupportqtopengl-not-found/16532 + +## Future work + +Our most pressing issue right now is to integrate all the core modules and release testing virtual machines and containers. After this, more software packages like MONAI Label, Plus Toolkit, TotalSegmentator, VMTK and Elastix will be integrated. Contributions welcome!! + +# Background and References +1. [SystoleOS project in PW37](https://github.com/NA-MIC/ProjectWeek/tree/master/PW37_2022_Virtual/Projects/SystoleOS) + + diff --git a/PW38_2023_GranCanaria/Projects/SystoleOS/systole.png b/PW38_2023_GranCanaria/Projects/SystoleOS/systole.png new file mode 100644 index 000000000..53fb25065 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SystoleOS/systole.png differ diff --git a/PW38_2023_GranCanaria/Projects/SystoleOS/systoleos_dev_envs.png b/PW38_2023_GranCanaria/Projects/SystoleOS/systoleos_dev_envs.png new file mode 100644 index 000000000..98012be15 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/SystoleOS/systoleos_dev_envs.png differ diff --git a/PW38_2023_GranCanaria/Projects/TTTAtlas/README.md b/PW38_2023_GranCanaria/Projects/TTTAtlas/README.md new file mode 100644 index 000000000..594b72706 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/TTTAtlas/README.md @@ -0,0 +1,152 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Development of Anatomy Atlases and Training Tools with 3D Slicer and Open Source Software + +## Key Investigators + +- Juan Ruiz (ULPGC) +- Idafen Santana (ULPGC) +- Mario Monzón (ULPGC) +- Aday Melián (ULPGC) +- Miguel Ángel Rodríguez (ULPGC) +- Pablo Castellano (ULPGC) +- Jose Carlos Mateo (ULPGC) +- Nayra Pumar +- Babacar Diao +- Christine Sokhna +- Magate Gueye +- Charles Diem +- Khedijetou Vilaly +- Bella Konan + +# Project Description + +With the help of 3D Slicer and open source software, anatomy atlases and training tools are becoming more accessible than ever. These tools allow medical professionals to gain a better understanding of the human body, from its organs and muscles to its bones and tissues. They also provide an opportunity for medical students to practice their skills in a virtual environment without risking any harm to real patients. + +This project aims to provide the tools and knowledge for generating a comprehensive set of atlasses. Additionally, it will enable the development of innovative tools such as augmented reality (AR) applications and virtual reality (VR) simulations to further enhance the learning experience. + +## Objectives + +1. Objective A: Identify all resources and current state of open source atlases content so far and analyze the strength of Slicer 3D on this subject. + +2. Objective B: Select different open source atlas (OpenAnatomy and z-Anatomy as starting points) and define common needs for atlas creation. + +3. Objective C: Check and conclude what implementations or changes could improve our Virtual Dissection project and what is our best bet to use VR and AR with human anatomy. + +## Approach and Plan + +1. Research the state of the art of open source atlases meeting and discussing with previous atlas workers and get the current state and needs from different points of view. + +2. Recopilate, test and identify open source atlases, how they work and what features and options they provide. + +3. Check the inclusion of Slicer3D into the pipeline of open source atlases created for web access or standalone apps + +4. Identify potential features from the atlases that can be used or improved with little effort: VR, AR, Collab... + +## Progress and Next Steps + +1. Attend to conference by Michael Halle about Open Anatomy atlases and the [Open ABrowser](https://github.com/mhalle/oabrowser) open source project to know more about it + - Dedicated implementation of a WebGL project for easy access and user interactions + - Import directly Slicer3D content (MRML) to render and use on every devices through web + - Talked about the current state, next steps and challenges faced + +2. Meet & discusions with Nayra Pumar, researcher who has already work in affordable custom atlases [PAPER: Affordable Custom Three-Dimensional Anatomy Atlases](https://ieeexplore.ieee.org/document/9033044) + + +3. Meet & discussion with [Babacar Diao](https://mt4sd.ulpgc.es/es/equipo/instituciones/babacar-diao/), physician who has created and used them as learning tools: + - They work with Slicer3D for making the creation of 3D Models from real world CT images, segmentation and they use Slicer software as the atlas itself. + + - This is an affordable way of create and apply on student classes realistic atlases based on real life humans + + - The drawback of this way of using atlas is that it needs a little formation in the users on Slicer3D usage + + - In the other hand this allows students to be able to improve and make better atlases from the previous work + + - As conclussion, he has found out by several classes and teaching sessions that the students classifications improves while using atlases as learning tools comparing to others that don't use it + +4. Technical deconstruction of an Atlas. We have divided the concept of digital atlas into these features/needs: + - 3D Content provider + - 3D Models ready or adaptable to be used into atlas + + - Content adaptation for the needs of the project + - Customized distribution (Taxonomy) + - How do we divide/distribute the anatomy? Ex: We might need the full heart object but we have it divided into all its parts in small pieces. There is several ways to divide an object + + - 3D Content adapter (For custom use in apps) + - 3D objects merge and retopolization in order to use it into the atlases fulfilling all the requisites and hardware specs. + + - Viewer (Engine/Renderer) + - The software used to render the final atlases and add features for users work with the anatomy + + - Languages + - Depending on the use we could have different language options or we may need only the native language + + - Data validation + - If we are working with anatomic tools for learning, we cannot allow bad information, we need a system to check every model and pieces in the human body is well named and well modeled. + + - Using Slicer3D we have the certain that there is no wrong content because it comes directly from a human, not 3d modeled. The errors could come from the segmentation part, but we keep the editable capabilities. + +6. We made a research of open source atlases and related content to anatomic learning tools and we have recopilated/developed this tools for each layer: + - 3D Contents provider + - [Slicer3D](https://www.slicer.org/) + - [Open anatomy](https://www.openanatomy.org/) + - [z-Anatomy](https://www.z-anatomy.com/) + - [BodyParts](https://lifesciencedb.jp/bp3d/) + + - Content adapter + - [BodyParts3D (Anatomography)](https://lifesciencedb.jp/bp3d/) + + - Languages + - [Terminologia Anatomica](https://ta2viewer.openanatomy.org/) + - [Wikipedia]() + - Custom medical translations (Depends on medic profile for each iteration) + + - Content customized distribution (Taxonomy) + - [BodyParts3D (Anatomography)](https://lifesciencedb.jp/bp3d/) + - We have already created a little pipeline to reorder and merge 3d pieces of the anatomy from different systems into bigger objects more handy + + - Viewer (Engine/Renderer) + - [Online 3D Viewer](https://github.com/kovacsv/Online3DViewer) - GLTF renderer + - [OABrowser](https://github.com/mhalle/oabrowser) - Uses Slicer3D Content + - We created some tests using z-Anatomy full body to check the power required on web rendering from mobile devices. (Image #4) + +7. We have been working last year on creating atlas and human based apps using Unity3D as learning tools. + - We have identified that the challenges we have faced are common challenges in this kind of content + - We have created some helping software to aid us to modify and adapt the anatomy for our needs using web technologies: + - Anatomic Tree Adapter: Web application to reorganize all z-Anatomy models as we need into our custom apps. (Image #3) + + - Human Viewer: Web application to render all z-Anatomy body to check the performance of Three.JS technology from mobile devices. (Image #4) + +8. Conclusions and next steps: + - When we started creating atlases, we decided to use Slicer with the OpenAnatomy only as exporter of all content for the use into Unity3D and Three.JS apps. + - We have found out during this week that Slicer3D is more useful than we valued on the early stages of our projects: + - It can be used as Atlas itself for teaching anatomy with succesful cases as we talked to Babacar Diao + - It can create human anatomy models by CT segmentations and render them, then edit and polish to iterate and get the most realistic models of humans and then easily export to other engines or apps. + - VR and AR experiences are features supported by Slicer3D natively + - The amazing community behind it on continuous development makes it a reliable option to work in the future + - We will go deep into the previous works on atlases to improve our dissection project (AVRIR) with everything we have learned during the PW38 + +# Illustrations +- Image #1 - Meeting with Babacar Diao in NAMIC Project week 38 +![MeetingBabacar](https://user-images.githubusercontent.com/5611348/216622689-93eee25a-c7d1-44fc-bd5a-490e1379b417.jpeg) + +- Image #3 - Anatomic Tree Adapter for readapt human anatomy +AnatomicTree + +- Image #4 - Human Viewer - Three.JS performance test +WebHumanViewer + +- Image #5 - Segmentation by Babacar Diao +Capture d’eěcran 2023-02-03 aĚ 13 45 07 + +- Image #6 - Segmentation by Babacar Diao +Capture d’eěcran 2023-02-03 aĚ 13 45 20 + +# Background and References +- Last two years we have been working with anatomic 3d models to create educative experiences like VRAINS (VR Anatomic atlas - ULPGC) and AVRIR (Collaborative VR dissection - ULPGC) using OpenAnatomy and z-Anatomy content imported into Unity3D. + +- We have found several challenges with the adaptation of this kind of content for our objectives (Excess of polygons for target devices, too detailed anatomic distribution, language localization...). + +- [The impacts of three-dimensional anatomical atlas on learning anatomy](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6449593/) + +- [Commercial anatomic dissection app](https://www.medicalholodeck.com/en/) diff --git a/PW38_2023_GranCanaria/Projects/TeethSegmentation/README.md b/PW38_2023_GranCanaria/Projects/TeethSegmentation/README.md new file mode 100644 index 000000000..d8b7e2c0d --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/TeethSegmentation/README.md @@ -0,0 +1,69 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Multi-stage dental segmentation using MONAI Label + +## Key Investigators + +- David García Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Yucheng Tang (NVIDIA, USA) +- Andres Diaz Pinto (NVIDIA, UK) +- Daniel Palkovics (Semmelweis University, Budapest, Hungary) +- Csaba Pinter (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Attila Nagy (University of Szeged, Szeged, Hungary) +- Brianna Burton (3D Side, Belgium) +- Umang Pandey (Universidad Carlos III de Madrid, Spain) + +# Project Description + +A three-dimensional visualization of dento-alveolar structures can enhance the surgical planning process. However, no fully automated segmentation methods are currently available to generate realistic 3D virtual models of teeth, inferior alveolar nerves and alveolar bone. + +Example: manual segmentation of teeth and alveolar bone. + +drawing + +We have already tested segmentation and deepedit models in MONAI Label. Those models are good for single-label teeth segmentation or mandible segmentation. However, results are not optimal when trying to perform a multi-label segmentation where all teeth are correctly identified and segmented. + +## Objective + + + +1. Create MONAI pipeline for automatic segmentation of dental structures: teeth, mandible and inferior alveolar nerves. + +## Approach and Plan + + + +1. Implement multi-stage approach for teeth segmentation using MONAI Label pipelines. At least, two stages: (1) teeth localization and (2) teeth segmentation. +2. Develop model to segment mandible and inferior alveolar nerves. +3. Combine multi-stage teeth segmentation with mandible and nerve segmentation. + +## Progress and Next Steps + + + +1. Test Deepedit model for automatic segmentation of mandible and inferior alveolar nerves. + +2. Create and test pipeline for dental segmentation using only two stages: teeth localization + teeth segmentation + +3. Create and test pipeline for dental segmentation using three stages: teeth localization, teeth centroid computation and teeth segmentation. Pipeline based on vertebra pipeline with some modifications. + +# Illustrations + + + +Result: teeth segmentation using dental pipeline (2 stages) + +drawing + +Result: mandible and inferior alveolar nerves segmentation using DeepEdit. + +drawing + + +# Background and References +- Related project from 37th NA-MIC Project Week: [Multi-stage deep learning segmentation of teeth](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/MultistageTeethSegmentation/) + + diff --git a/PW38_2023_GranCanaria/Projects/TextureModelImport/README.md b/PW38_2023_GranCanaria/Projects/TextureModelImport/README.md new file mode 100644 index 000000000..3a4b7fe8e --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/TextureModelImport/README.md @@ -0,0 +1,74 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Automated map texture when importing the OBJ file into Slicer + +## Key Investigators + +- Chi Zhang (Seattle Children's Research Institute) +- Steve Pieper (Isomics, Inc.) +- A. Murat Maga (Seattle Children's Research Institute) +- Andras Lasso (The Perk Lab, Queen’s University) +- Sara Rolfe (Seattle Children's Research Institute) + +# Project Description + + +Automatedly map the associated texture image to the obj file when importing it into Slicer without a creating a volume node for the texture image. This can facilitate importing textured model acquired by photogrammetry into Slicer. The ultimate goal is to be able to access OpenDronMap(ODM) photogrammetric package via Slicer to facilitate the use of photogrammetry. + +## Objective + + + +When the obj file is imported into Slicer, Slicer will automatically call the Texture Model module from SlicerIGT to map the texture on the obj file without the need to import the texture image as a volumetric node and manually map it to the model using this module. + +## Approach and Plan + + + +1. Register a hook for the obj file type in the data import dialog (a similar approch suggested by Steve Pieper for loading nii file as either volume or segmentation (NIFTI file reader from SlicerDMRI extension: ([https://github.com/pieper/SlicerDMRI/blob/nifiio/Modules/Scripted/NIfTIFile/NIfTIFile.py](https://github.com/pieper/SlicerDMRI/blob/nifiio/Modules/Scripted/NIfTIFile/NIfTIFile.py)) +1. When the 'obj' option is selected in the data importing dialog, the 'Texture Model' functions will be called to automatically map texture to the model (The TextureModel module of SlicerIGT: [https://github.com/SlicerIGT/SlicerIGT/tree/master/TextureModel](https://github.com/SlicerIGT/SlicerIGT/tree/master/TextureModel) +1. Here is an example file [https://drive.google.com/file/d/1ZxJcx2nM-fgywA8KMm6JO0t7QJIcQR7O/view?usp=sharing](https://drive.google.com/file/d/1ZxJcx2nM-fgywA8KMm6JO0t7QJIcQR7O/view?usp=sharing)) + +## Progress and Next Steps + + + +1. The goal is basically reached, thanks for the help from Steve Pieper. The script `OBJFile.py` is incorporated in the forked SlicerMorph repository: https://github.com/chz31/SlicerMorph. Git clone the repository and use the Extension Wizard to install the SlicerMorph extension. + + +2. After that, the `OBJ textured model` option would be registered in the data dialog. Drag the OBJ into Slicer and select the `OBJ textured model` option. + +

+ +

+ + + +3. The mtl file (in the same directory) will then be automatically parsed to retrieve the texture image name. + +image + + + +4. The `ImageStacks` functions from SlicerMorph will then be called to import texture as a vector volumetric node and map to the model using `SetTextureImageDataConnection`. The texture node will then be deleted. + +

+image +image +

+

+

+ +# Illustrations + + + + +# Background and References + +Chi Zhang is working on a photogrammetry pipeline based on the open source package OpenDroneMap (ODM). Ultimately, the goal is being able to push and pull data between Slicer and ODM. + +A sample obj file with associated texture can be downloaded here: https://drive.google.com/file/d/1ZxJcx2nM-fgywA8KMm6JO0t7QJIcQR7O/view?usp=sharing diff --git a/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/NAMIC_Demo_00.gif b/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/NAMIC_Demo_00.gif new file mode 100644 index 000000000..94bee9ff8 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/NAMIC_Demo_00.gif differ diff --git a/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/NAMIC_Demo_01.gif b/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/NAMIC_Demo_01.gif new file mode 100644 index 000000000..67bbcfe56 Binary files /dev/null and b/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/NAMIC_Demo_01.gif differ diff --git a/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/README.md b/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/README.md new file mode 100644 index 000000000..6f8cabd17 --- /dev/null +++ b/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/README.md @@ -0,0 +1,49 @@ +Back to [Projects List](../../README.md#ProjectsList) + +# Training system for US-guided lung interventions + +## Key Investigators + +- Natalia Arteaga Marrero (IACTEC, Santa Cruz de Tenerife, Spain) +- David García Mato (Ebatinca S.L., Las Palmas de Gran Canaria, Spain) +- Javier González Fernández (ITC, Santa Cruz de Tenerife, Spain) +- Jordan Ortega Rodríguez (IACTEC, Santa Cruz de Tenerife, Spain) +- Gara Ramos (IACTEC, Santa Cruz de Tenerife, Spain) + +# Project Description + +The aim of the project is to develop a training system that provides a quantitative report regarding the use of a low-cost lung phantom [1] in ultrasound-guided interventions, particularly for core needle biopsy (CNB). + +A phantom with several embedded solid inclusions was fabricated. These inclusions are colour coded thus, a qualitative report can be used to indicate the success +of a CNB procedure. However, a quantitative analysis is required. For this reason, the development of a training system is planned. + +## Objective + +1. Objective A. Fabrication of dedicated phantom +2. Objective B. Setting up the system's tools +3. Objective C. Application development + +## Approach and Plan + +1. Phantom model --> Accurately replicated thoracic tissues (lung, ribs, and solid masses) providing a useful tool for training US-guided procedures +2. Tools connections via Optitrack and Plus Toolkit (Telemed/Clarius system and needle for biopsy) +4. Application development + +## Progress and Next Steps + +1. Fabrication of dedicated phantom --> Virtual and recontructed model finished +2. Tracking fixtures for the required equipment --> Biopsy needle (Bard 22mm), Telemed MicrUs EXT-1H L12 Probe. Extra tools and some adjustments to be implemented +3. 3D Models of the equipments --> Biopsy needle (loaded and extended) and Telemed US system +4. Optitrack + US System setup --> Connection to SLICER via Plus Toolkit using Telemed US System. Issues to connect the Clarius US system to be solved +5. Preliminar tracking system working --> New calibrations to be performed after upgrading the fixtures + +# Illustrations + + +![](https://github.com/NA-MIC/ProjectWeek/blob/master/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/NAMIC_Demo_00.gif) +![](https://github.com/NA-MIC/ProjectWeek/blob/master/PW38_2023_GranCanaria/Projects/US-guided_TrainingSystem/NAMIC_Demo_01.gif) + +# Background and References + +[1] Arteaga-Marrero, N.; Villa, E.; Llanos González, A.B.; Gómez Gil, M.E.; Fernández, O.A.; Ruiz-Alzola, J.; González-Fernández, J. +Low-Cost Pseudo-Anthropomorphic PVA-C and Cellulose Lung Phantom for Ultrasound-Guided Interventions. Gels 2023, 9, 74. https://doi.org/10.3390/gels9020074 diff --git a/PW38_2023_GranCanaria/README.md b/PW38_2023_GranCanaria/README.md new file mode 100644 index 000000000..ce95bee78 --- /dev/null +++ b/PW38_2023_GranCanaria/README.md @@ -0,0 +1,511 @@ +## Welcome to the web page for the 38th Project Week! + +[This event](https://projectweek.na-mic.org/PW38_2023_GranCanaria/) will take place from January 30 to February 3rd, 2023 in Gran Canaria, Spain. + +* Project Week 38 will be a hybrid event with a strong in-person component for the first time since 2020. +* The venue for in-person events is Hotel Cristina, Las Palmas, Gran Canaria (Spain). +* A block of rooms at [Hotel Cristina](https://www.dreamplacehotels.com/en/hotel-cristina/) is being held for PW attendees + * To make reservations use code NAMIC23 in email to grupos.mice@dreamplacehotels.com + * 40 rooms are being held for us at the rate of 108€ (single) and 135€ (double) until the end of November +* Please [register](https://forms.gle/sh9jGJLJdBm4us3E7) as early as possible, indicating whether you plan on participating in person. It will greatly help the organizing committee to estimate the number of participants we need to accommodate in Las Palmas. Registration fees for in-person attendees will be determined and collected later using a separate form by the local organizing team. +* **For those attending virtually**: there will be no registration fee, and zoom/discord links will be provided during preparation meetings. +* **For those attending in person**: a registration fee of 400 Euros will be charged to cover for the workshop venue, lunch and coffee breaks. Use [this form](https://www.fulp.es/inscripcion-namic) to register your payment. Please do so by Tuesday, Jan 17th 2023, as we need to estimate the number of participants to finalize the booking of the room. + +If you have any questions, you can contact the [organizers](#organizers). + + + +## Before Project Week +1. Register [here](https://forms.gle/sh9jGJLJdBm4us3E7), it is free! +2. **If you plan to attend in person**, register you workshop fee of 400 Euros [here](https://www.fulp.es/inscripcion-namic). +3. Attend one or more [preparation meetings](#preparation-meetings) to present a project you intend to work on at PW, for which you are seeking collaborators or to join one of the projects proposed by others. +4. Join the [Discord server](https://discord.gg/yQsNVdVpS3) that will be used to communicate with your team during Project Week. Go to [this page](../common/Discord.md) for more info on the use of Discord during PW. +5. Consider joining the [MONAI Label Workshop](MONAILabel_Workshop.md) that will take place January 25th (the week before Project Week). + +## Preparation meetings +We hold weekly preparation meetings at 10am on Tuesdays, starting November 29, 2022. Please join at [this link](https://etsmtl.zoom.us/j/86211702920?pwd=TEl0ZTFDam90WVN5bjZhR05kNVRVZz09) if you have a project that you would like to present or work on during project week or to find out about projects you can join. + +## During PW +PW 38 floor plan + +* **Monday Jan 30th** + * In person attendees can start setting up their computers and material, meeting with their teams and doing a last pass at the project pages from 9am (Las Palmas time) in the Working area. + * Remote attendees are encouraged to do the same on Discord if in a time zone that makes it possible. +* **Project presentations** + * Start at 9am, EST (2pm in Las Palmas). + * Takes place in the main hall for in person attendees and is streamed on Zoom for remote attendees (same zoom link as for preparation meetings). + * Each team delegates a member to present their project in no more than 90 seconds using no other visual support than the project page on GitHub (no time to switch screen sharing). +* **Breakout sessions** + * Start every day at 11am EST (4pm in Las Palmas) + * Streamed on Zoom (same link as for project presentation and preparation meetings) +* **Work in project teams** + * Starts at 4am EST (9am in Las Palmas) on Monday + * Takes place in the Working area + * Extends throughout the week before and after breakout sessions (see calendar on the PW webpage) + * Communication between team members takes place on Discord, in the work area, or both depending on where the team members are located + * The CreateEvent function on Discord can be used to plan a meeting (and inform other PW attendees about it) +* **Project results presentation** + * Friday 9am EST, 2pm in Las Palmas. + * Same format as for project presentation + +## Agenda + +

+
+ + + + + + +[How to add this calendar to your own?](../common/Calendar.md) + +## Travel guide + +[Gran Canaria travel guide](GranCanaria_TravelGuide.md) + +## Breakout sessions +[Panel: clinical uses of 3D Slicer](Projects/ClinicalPanel-BreakoutSession.md) (Rudolf Bumm) + +[Developing and using anatomy atlases](Projects/BreakoutSession-Atlases.md) (Michael Halle and friends) + +[NCI Imaging Data Commons workshop/tutorial](https://docs.google.com/document/d/1HMmqVXSshEHf90Vu9LORaXVuoSpJzKxy7StrMBqfSdQ/edit?usp=sharing) (Andrey Fedorov and IDC team) + +[What's new in 3D Slicer Slides](https://docs.google.com/presentation/d/1x9mdOoNHmf8qM4Tz2SGjykdzrX9vJeJqvMEYATU90es/edit?usp=sharing) + +Kaapana workshop (Markus Bujotzek and Kaapana team) + +## Projects [(How to add a new project?)](Projects/README.md) +Categories based on project list of PW37, will be updated as we populate the list of projects. + +Presenter (assumed based on order + confirmed status) in **bold** + +### Early birds + +1. [DATSCAN Viewer implementation in OHIF](Projects/OHIF_DATSCAN/README.md) (Salim Kanoun, Alireza Sedghi, Celian Abadie, Sofien Sellamo) *[??]* + +1. [Cross study sychronizer for OHIF Crosshair](Projects/OHIF_SyncCrosshair/README.md) (Salim Kanoun, Alireza Sedghi, Celian Abadie, Sofien Sellamo) *[??]* + +1. [PolySeg representations for OHIF Viewer Segmentations](Projects/OHIF_PolySeg/README.md)(Alireza Sedghi) _Remote_ + +### VR/AR and Rendering +1. [SlicerVR - Restore Interactions](Projects/SlicerVRInteractions/README.md) (**Csaba Pintér**, Simon Drouin, Andrey Titov, Lucas Gandel, Jean-Christophe Fillion-Robin) *[On site]* + +1. [SlicerTMS](Projects/SlicerTMS/README.md) (**Loraine Franke**, Jax Luo, Yogesh Rathi, Lipeng Ning, Steve Pieper, Daniel Haehn) *[Remote]* + +1. [ARinSlicer](Projects/ARinSlicer/README.md) (**Alicia Pose**, Javier Pascau, Gabor Fichtinger, Andras Lasso...) *[On site]* + +1. [Map texture when importing OBJ file into Slicer](Projects/TextureModelImport/README.md) (**Chi Zhang**, Steve Pieper, A. Murat Maga, Andras Lasso, Sara Rolfe) *[Remote]* + +### IGT and Training +1. [Training system for US-guided lung interventions](Projects/US-guided_TrainingSystem/README.md) (**Natalia Arteaga**, David García, Javier González, Gara Ramos) *[On site]* + +1. [Fetal Ultrasound Simulation for Delivery Training](Projects/FetalUltrasoundSimulation/README.md) (**Felix von Haxthausen**, David García, Tolga-Can Çallar, José Carlos Mateo) *[On site]* + +1. [Slicer Liver](Projects/SlicerLiver/README.md) (**Gabriella D'Albenzio**, Ruoyan Meng, Geir A. Tangen, Ole V. Solberg, Rafael Palomar) *[On site]* + +1. [Slicer Maxillofacial Surgery](Projects/Slicer4MaxillofacialSurgery/README.md) (Miguel Ángel Rodriguez-Florido, Christian Buritica, Mauro Dominguez) *[On site]* + +1. [Slicer + IMSTK for low cost training setups](Projects/SlicerIMSTK/README.md) (**Sam Horvath**, Kevin Cleary, Karun Sharma) *[On site]* + +1. [Open Source Technologies for the Development of Clinical Simulation Centers](Projects/OpenSourceSimulationCenter/README.md) (**Juan Ruiz**, **Miguel Angel Rodriguez-Florido**, Idafen Santana, Mario Monzón, etc.) *[On site]* + +1. [Setting up medical imaging courses](Projects/CoursesMedicalImaging/README.md) (**Juan Ruiz**, Idafen Santana, Mario Monzon) *[On site]* + +1. [Development of Anatomy Atlases and Training Tools with 3D Slicer](Projects/TTTAtlas/README.md) (**Juan Ruiz**, Idafen Santana, Mario Monzón) *[On site]* + +1. [Integration of infrared, ultraviolet and hyperspectral sensors in Slicer via Plus Toolkit and OpenIGTLink.](Projects/MultiSpectralSensorIntegration/README.md) (**Francisco J. Marcano Serrano**) *[On site]* + +1. [Ibis in Slicer](Projects/IbisInSlicer/README.md) (**Étienne Léger**, Houssem Eddine Gueziri, Simon Drouinb) *[On site]* + +### Segmentation / Classification / Landmarking +1. [Lung CT Segmentation and Analysis](Projects/LungSegmentation/README.md) (**Rudolf Bumm**, Ron Kikinis, Raúl San José Estépar, Steve Pieper, Eserval Rocha jr., Andras Lasso, Curtis Lisle) *[On site]* + +1. [MONAI Label to MONAI bundle conversion](Projects/MONAILabel2bundle/README.md) (**Deepa Krishnaswamy**, Cosmin Ciausu, Nazim Haouchine, Andres Diaz-Pinto, Jesse Tetreault, Roya Hajavi, Stephen Aylward, Steve Pieper, Andrey Fedorov) *[On site]* + +1. [HOWTO: Using MONAI zoo bundle for prostate MRI cancer detection in IDC data](Projects/MONAI_IDC_PCa_detection/README.md) (**Cosmin Ciausu**, Deepa Krishnaswamy, Patrick Remerscheid, Tina Kapur, Sandy Wells, Andrey Fedorov, Khaled Younis) *[On site]* + +1. [Multi-stage dental segmentation using MONAI Label](Projects/TeethSegmentation/README.md) (**David García**, Yucheng Tang, Andres Diaz, Daniel Palkovics, Csaba Pinter, Attila Nagy, Brianna Burton) *[On site]* + +1. [Real-time ultrasound AI segmentation using Tensorflow and PyTorch models](Projects/RealTimeUltrasoundSegmentationAI/README.md) (**María Rosa Rodríguez**, Tamas Ungi, David García, Chris Yeung) *[On site]* + +1. [AtlasYEB_Plugin_WEB_API](Projects/AtlasYEB_Plugin_WEB_API/README.md) (**Sara Fdez Vidal,ICM**) *[On site]* + +1. [Automatic multi-anatomical skull structure segmentation of cone-beam computed tomography scans using 3D UNETR](Projects/AMASSS_CBCT/README.md)(**Luc Anchling** et al) *[Remote]* + +1. [Automated Standardized Orientation for Cone-Beam Computed Tomography (CBCT)](Projects/ASO_CBCT/README.md)(**Luc Anchling**, Nathan Hutin, Maxime Gillot, Baptiste Baquero, Jonas Bianchi, Antonio Ruellas, Felicia Miranda, Selene Barone, Marcela Gurgel, Marilia Yatabe, Najla Al Turkestani, Hina Joshi, Lucia Cevidanes, Juan Prieto) *[Remote]* + +1. [Automatic Standardize Orientation IOS](Projects/AutomaticStandardizeOrientation_IOS/README.md) (**Nathan Hutin**, Luc Anchling, Marcela Gruge, Felicia Miranda, Najla Al Turkestani, Selene Barone, Lucia Cevidanes, Juan Prieto) *[Remote]* + +1. [Automatic Landmark Identification in Cranio-Facial CBCT](Projects/ALI_CBCT/README.md) (**Luc Anchling**, Nathan Hutin, Maxime Gillot, Baptiste Baquero, Jonas Bianchi, Marcela Gurgel, Najla Al Turkestani, Marilia Yatabe, Lucia Cevidanes, Juan Prieto) *[Remote]* + +1. [Automated Landmarking Support](Projects/AutomatedLandmarkingSupport/README.md) (Sara Rolfe, Chi Zang, Murat Maga, Steve Pieper, Andras Lasso) *[??]* + + +### Quantification and Computation + +1. [Automatic Quantification 3D Components](Projects/AutomaticQuantitative3DCephalometrics/README.md) (**Nathan Hutin**, Luc Anchling, Baptiste Baquero, Maxime Gillot, Lucia Cevidanes, David Allemang, Jean-Christophe Fillion-Robin) *[Remote]* + +1. [MeshComparison](Projects/MeshComparison/README.md) (**Paolo Zaffino**, Maria Francesca Spadea, Michela Destito, Amerigo Giudice) *[On site]* + +1. [Electrophysiological biosignals in 3D Slicer](Projects/Electrophysiological_Biosignals_In_3DSlicer/README.md) (**Jordan Ortega Rodríguez**, Gara Ramos) *[On site]* + +1. [Analytic Registration Verification](Projects/AnalyticRegistrationVerification/README.md) (**Gerry Gralton**, Andy Huynh, Benjamin Zwick) *[On site]* + +1. [SlicerCBM](Projects/SlicerCBM/README.md) (**Benjamin Zwick**, Saima Safdar, Andy Huynh, Gerry Gralton, Adam Wittek, Karol Miller) *[On site]* + +1. [Brain segmentation for Long COVID study](Projects/LongCOVID/README.md)(**Zora Kikinis**)[On site] + +1. [SlicerElastix: update elastix version](Projects/SlicerElastixUpdate/README.md) (**Simón Oxenford**, Andras Lasso)[On site] + +### Cloud / Web +1. [How-to setup and run 3D Slicer on an AWS server instance step by step](Projects/SlicerCloud/README.md) (**Rudolf Bumm**, Steve Pieper, Gang Fu, Qing Liu) *[On site]* + +1. [MHub Integration](Projects/MHub_Integration/README.md) (**Leonard Nürnberg**, Dennis Bontempi, Andrey Fedorov) *[On site]* + +1. [3DSlicerHub](Projects/SlicerHub/README.md) (**Rafael Nebot**, Paula Moreno, Juan Ruiz, Idafen Santana) *[On site]* + +1. [Kaapana related experiments/discussions/collaboratons](Projects/Kaapana_overall/README.md) (**Andrey Fedorov**, Marco Nolden, Hans Meine, Klaus Kades) *[On site]* + +1. [DICOM Meta-Dashboard](Projects/MetaDashboard/README.md) (Hans Meine, Stefan Denner, Klaus Kades, Andrey Fedorov) *[On site]* + +1. [Connecting/Using Kaapana to Google Cloud/Google Health/Google FHIR](Projects/KaapanaConnectingKaapanaToGoogleCloudAndHealthAndFHIR/README.md) (**Jonas Scherer**, Andrey Fedorov) *[Remote]* + +1. [Kaapana: Data and model exchange across different sources](Projects/KaapanaDataAndModelExchangeAcrossDifferentSources/README.md) (**Benjamin Hamm**, Ünal Akünal, Markus Bujotzek, Klaus Kades, Andrey Fedorov) *[On site]* + +1. [Kaapana: Fast viewing and tagging of DICOM Images](Projects/KaapanaFastViewingAndTaggingOfDICOMImages/README.md) (**Stefan Denner**, Klaus Kades, Andrey Fedorov) *[On site]* + +1. [Kaapana: Integration of desktop apps](Projects/KaapanaIntegrationOfDesktopApps/README.md) (**Hanno Gao**, Klaus Kades, Andrey Fedorov) *[On site]* + +1. [Integration of clinical data into medical imaging pipelines](Projects/KaapanaClinicalData/README.md) (**Philipp Schader**, Andrey Fedorov) *[On site]* + +1. [FAIRification of medical imaging data and analysis tools](Projects/Metadata_IDC_HMC/README.md) (**Marco Nolden**, Andrey Fedorov) *[On site]* + +1. [Using VolView with images in Google Storage / IDC buckets](Projects/IDC_with_VolView/README.md) (**Andrey Fedorov**, Forrest Li, Stephen Aylward) *[On site]* + +1. [IDC Introduction and Tutorial](Projects/IDC_Tutorial/README.md) (**Andrey Fedorov**, Deepa Krishnaswamy, Cosmin Ciausu, Vamsi Thiriveedhi, Dennis Bontempi, Leonard Nuerenberg) *[On site]* + +1. [DICOM WSI: conversion, analysis workflow, accessing DICOM WSI in IDC](Projects/IDC_DICOM_WSI_workflow/README.md) (**Maximilian Fischer**, Andrey Fedorov, Marco Nolden, Philipp Schader, David Clunie, Daniela Schacherer, André Homeyer, Curtis Lisle) *[On site]* + + + +### Infrastructure +1. [ParameterNodeWrapper](Projects/ParameterNodeWrapper/README.md) (**Connor Bowley**, Sam Horvath, David Allemang) *[On site]* + +1. [SlicerPipelines](Projects/SlicerPipelines/README.md)(**Connor Bowley**, Sam Horvath) *[On site]* + +1. [Transitioning 3D Slicer to QSS Styling](Projects/SlicerQSS/README.md) (**Sam Horvath**, J-Christophe Fillion-Robin, Andras Lasso, Steve Pieper) *[On site]* + +1. [SystoleOS](Projects/SystoleOS/README.md) (**Rafael Palomar**, Steve Pieper) *[On site]* + +1. [3D Slicer Internationalization](Projects/3DSlicerInternationalization/README.md) (**Sonia Pujol**, Steve Pieper, Andras Lasso, Mamadou Camara, Mouhamed DIOP, Adama Wade, Mohamed Alalli Bilal, Ahmedou Moulaye Idriss, Yahya Tfeil, Adriana H. Vilchis González, Luiz Otavio Murta Junior, Attila Tanács, Attila Nagy) *[On site and online]* + +1. [Active Viewport](Projects/SlicerActiveViewport/README.md) (**Davide Punzo**, Andras Lasso) *[On site]* + +1. [Visual DICOM browser](Projects/SlicerVisualDICOMbrowser/README.md) (**Davide Punzo**, Andras Lasso) *[On site]* + +1. [Measurement Panel](Projects/SlicerMeasurementPanel/README.md) (**Davide Punzo**, Andras Lasso) *[On site]* + +1. [SlicerAstro Update](Projects/SlicerAstroUpdate/README.md) (**Davide Punzo**, Thijs van der Hulst) *[On site]* + +1. [DICOM Segmentation Optimization](Projects/DICOMSEG/README.md) (**Steve Pieper**, Andrey Fedorov, Andras Lasso, Marco Nolden, Hans Meine, Alireza Sedghi, Erik Ziegler, Markus Hermann, Chris Bridge, David Clunie) *[On site]* + +1. [Updating Batch Anonymizer](Projects/SlicerBatchAnonymize/README.md) (**Hina Shah**, Juan Carlos Prieto, Lucia Cevidanes) [Remote] + + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. Register [here](https://forms.gle/sh9jGJLJdBm4us3E7) + +List of registered participants so far (names will be added here after processing registrations): + + + + +1. Rafael Palomar, Oslo University Hospital, Norway, (In-person, Confirmed) +1. Csaba Pinter, EBATINCA, Spain, (In-person, Confirmed) +1. Simon Drouin, École de technologie supérieure, Canada, (In-person, Confirmed) +1. Tina Kapur, Brigham and Women's Hospital, Harvard Medical School, USA, (Online) +1. Karol Miller, Intelligent Systems for Medicine Laboratory, The University of Western Australia, Australia, (In-person, Confirmed) +1. Andy Huynh, University of Western Australia, Australia, (In-person, Confirmed) +1. Sen Li, École de technologie supérieure, Canada, (Undecided) +1. Paolo Zaffino, Magna Graecia University of Catanzaro, Italy, (In-person, Confirmed) +1. Andrey Fedorov, BWH, USA, (In-person, Confirmed) +1. Steve Pieper, Isomics, Inc., USA, (In-person, Confirmed) +1. hans knutsson, Linköpings universitet, Sweden, (In-person, Confirmed) +1. Sonia Pujol, Brigham and Women's Hospital, Harvard Medical School, USA, (Online) +1. JUAN RUIZ-ALZOLA, UNIVERSIDAD DE LAS PALMAS DE GRAN CANARIA, Spain, (In-person, Confirmed) +1. Ron Kikinis, Harvard Medical School, USA, (In-person, Confirmed) +1. Carl-Fredrik Westin, Harvard Medical School, USA, (In-person, Confirmed) +1. Katie Mastrogiacomo, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Mamadou Samba CAMARA, Cheikh Anta Diop University of Dakar, Senegal, (In-person, Confirmed) +1. Pape Mady THIAO, École militaire de santé de Dakar , Senegal, (In-person) +1. Alexandra Golby, Brigham and Women's Hospital/Harvard Medical School, USA, (In-person) +1. yahya tfeil tfeil, faculty of medicine , nouakchott university, Mauritania, (In-person) +1. Javier Pascau, Universidad Carlos III de Madrid, Spain, (In-person, Confirmed) +1. Idafen Santana Perezz, ULPGC, Spain, (In-person, Confirmed) +1. David García Mato, Ebatinca S.L., Spain, (In-person, Confirmed) +1. Alicia Pose Díez de la Lastra, Universidad Carlos III de Madrid, Spain, (In-person, Confirmed) +1. Miguel Angel Rodriguez-Florido, Medical Tech and Audiovisual Research Group (GTMA) & Chair of Medical Technology (CTM), ULPGC, Spain, (In-person, Confirmed) +1. Gabor FICHTINGER, Queen's University, Canada, (In-person, Confirmed) +1. Luděk Hynčík, University of West Bohemia, Czechia, (Online) +1. Souleymane Diao, Cheikh Anta Diop University, Senegal, (Online) +1. Mouhamed DIOP, Cheikh Anta DIOP University of Dakar, Senegal, (In-person, Confirmed) +1. Gabriella d' Albenzio, Oslo University Hospital, Norway, (In-person, Confirmed) +1. Dwijkumar Mistry, 3D Surgical, India, (Online) +1. Ruoyan Meng, Norwegian University of Science and Technology, Norway, (In-person, Confirmed) +1. Ahmedou Moulaye IDRISS, Faculty of Medicine / Nouakchott Universty, Mauritania, (In-person) +1. Francesca Spadea, Karlsruhe Institute of Technology, Germany, (In-person, Confirmed) +1. Rudolf Bumm, Kantonsspital Graubünden, Switzerland, (In-person, Confirmed) +1. Raul San Jose, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Kanoun Salim, Pixilib, France, (Undecided) +1. Abadie Celian, Pixilib, France, (Undecided) +1. Simon Oxenford, Charité , Germany, (In-person, Confirmed) +1. Andre , IOC VET, Brazil, (Online) +1. Felix von Haxthausen, Unversity of Lübeck, Germany, (In-person, Confirmed) +1. Tamas Ungi, Queen's University, Canada, (Online) +1. Andres Diaz-Pinto, NVIDIA, UK, (Online) +1. Michela Destito, University Magna Graecia of Catanzaro, Italy, (In-person, Confirmed) +1. Attila Nagy, University of Szeged, Hungary, (In-person, Confirmed) +1. Zachary Baum, University College London, UK, (Undecided) +1. Jordan Ortega Rodríguez, Instituto de Astrofísica de Canarias, Spain, (In-person, Confirmed) +1. Pablo Rubén, Universidad de Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Yamilet Rivero López, Universidad de Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Joshua García Montagut, Grupo de investigación en fabricación integrada y avanzada (ULPGC), Spain, (In-person, Confirmed) +1. Mario Monzón, Universidad de Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Rubén Paz, Universidad de las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Ben Zwick, The University of Western Australia, Australia, (In-person, Confirmed) +1. Gara Ramos, IAC, Spain, (In-person, Confirmed) +1. Davide Punzo, Freelancer, France, (In-person, Confirmed) +1. Michael Halle, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Gerry Gralton, University of Western Australia, Australia, (In-person, Confirmed) +1. Natalia Arteaga-Marrero, Instituto de Astrofísica de Canarias (IAC) , Spain, (In-person, Confirmed) +1. Davide Punzo, Freelancer, France, (In-person, Confirmed) +1. Vamsi Krishna Thiriveedhi, Brigham and Women's Hospital (BWH), USA, (Online) +1. Lucia Cevidanes, University of Michigan, USA, (Online) +1. Luc Anchling, University of Michigan, USA, (Online) +1. Nathan Hutin, University of Michigan, USA, (Online) +1. Sam Horvath, Kitware, USA, (In-person, Confirmed) +1. Andrey Titov, École de technologie supérieure , Canada, (In-person, Confirmed) +1. Kizzy Scott, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Rafael Nebot Medina, Instituto Tecnológico de Canarias, Spain, (In-person, Confirmed) +1. Paula Victoria, instituto Tecnológico de Canarias, Spain, (In-person, Confirmed) +1. Alireza Sedghi, OHIF, Canada, (Online) +1. Andrey Titov, École de technologie supérieure, Canada, (In-person, Confirmed) +1. Sara Rolfe, Seattle Children's Research Institute, USA, (Undecided) +1. Geir Arne Tangen, SINTEF, Norway, (In-person, Confirmed) +1. Yaying Shi, University of North Carolina, USA, (Online) +1. Haythem Guermazi, Faculté de médecine de pharmacie et d'odonto-stomatologie de nouakchott, Mauritania, (In-person) +1. Étienne Léger, Montreal Neurological Institute, Canada, (In-person, Confirmed) +1. Theodore Aptekarev, Slicer Community, Montenegro, (In-person, Confirmed) +1. Gang Fu, AWS, USA, (Online) +1. Roya Khajavibajestani, BWH, USA, (Undecided) +1. Ole Vegard Solberg, SINTEF, Norway, (In-person, Confirmed) +1. Santhosh Parampottupadam, German Cancer Research Center, Germany, (In-person) +1. Brianna Burton, 3D Side, Belgium, (Undecided) +1. Cosmin Ciausu, Brigham and Women's Hospital -- Imaging Data Commons, USA, (In-person, Confirmed) +1. Charles DeLorey, Boston University/Brigham and Women's Hospital, USA, (Online) +1. Marco Nolden, German Cancer Research Center (DKFZ), Germany, (In-person, Confirmed) +1. Fernandez Vidal, ICM, France, (In-person, Confirmed) +1. Maria Sofia Sappia, Radboudumc, Netherlands, (Online) +1. Yucheng Tang, Nvidia, USA, (Online) +1. Chris Yeung, Queen's University, Canada, (Online) +1. Prodipta Guha, The University of Melbourne, Australia, (Online) +1. Daniel Haehn, University of Massachusetts Boston, USA, (Online) +1. Dennis Bontempi, BWH/AIM, USA, (In-person, Confirmed) +1. Leonard Nürnberg, BWH / AIM, USA, (In-person, Confirmed) +1. Loraine Franke, University of Massachusetts Boston, USA, (Online) +1. Ami Hashemi, MGH/HMS, USA, (Online) +1. Piotr Woznicki, University of Würzburg, Germany, (Online) +1. Andras Lasso, Queen's University, Canada, (In-person, Confirmed) +1. Connor Bowley, Kitware, Inc, USA, (In-person, Confirmed) +1. Rafe McBeth, University of Pennsylvania, USA, (Undecided) +1. Linmin Pei, Frederick National Laboratory for Cancer Research, USA, (Online) +1. William Wells, BHW, USA, (In-person, Confirmed) +1. Sara Fernandez Vidal, ICM, France, (In-person, Confirmed) +1. Sara Fernandez Vidal, Paris Brain Institute, ICM, France, (In-person, Confirmed) +1. Zora Kikinis, Mass General Brigham , Boston, USA, (In-person, Confirmed) +1. Fidèle AGOSSOU, LEMACEN , Benin, (Online) +1. Jeremiah Richard, Augius LTD, UK, (Online) +1. Deepa Krishnaswamy, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Hans Meine, Fraunhofer MEVIS, Germany, (In-person, Confirmed) +1. MARTA LATORRE MIGUEZ, ULPG, Spain, (In-person, Confirmed) +1. Attila Tanács, University of Szeged, Hungary, (In-person, Confirmed) +1. Pablo Sergio Castellano Rodríguez, Universidad de Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Andrea Mihaly, ULPGC, Spain, (In-person, Confirmed) +1. María Rosa Rodríguez Luque, Universidad de Las Palmas de Gran Canaria , Spain, (In-person, Confirmed) +1. Jose Carlos Mateo Pérez, ULPGC, Spain, (In-person, Confirmed) +1. Nikolaos Makris, Massachusetts General Hospital, USA, (In-person) +1. Aday Melián Carrillo, ULPGC - GTMA, Spain, (In-person, Confirmed) +1. Marta Kersten, Concordia University, Canada, (In-person, Confirmed) +1. Connor Bowley, Kitware, Inc., USA, (In-person, Confirmed) +1. Nicole Delgado, Booz Allen Hamilton, USA, (Online) +1. Brandon Konkel, Booz Allen Hamilton, USA, (Online) +1. Hanno Gao, DKFZ, Germany, (In-person, Confirmed) +1. Robabeh Salehiozoumchelouei, Instituto de Astrofísica de Canarias (IAC), Spain, (Online) +1. Nayra Pumar, Ebatinca, Spain, (Undecided) +1. Stefan Denner, German Cancer Research Center (DKFZ), Germany, (In-person, Confirmed) +1. Ünal Akünal, German Cancer Research Institute (DKFZ), Germany, (In-person, Confirmed) +1. Benjamin Hamm, German Cancer Research Center (DKFZ), Germany, (In-person, Confirmed) +1. Klaus Kades, German Cancer Research Center, Germany, (In-person, Confirmed) +1. Umang Pandey, Universidad Carlos III de Madrid (UC3M), Madrid, Spain, (In-person, Confirmed) +1. Yogesh Rathi, Harvard Medical School, USA, (In-person) +1. Trinity Urban, Clario, USA, (Online) +1. Francisco J. Marcano Serrano, Universidad de La Laguna, Spain, (In-person, Confirmed) +1. Philipp Schader, German Cancer Research Center (DKFZ) Heidelberg, Germany, (In-person, Confirmed) +1. Ralf Floca, German Cancer Research Center (DKFZ), Germany, (Online) +1. Carlo Rondinoni, University of Sao Paulo, Brazil, (Online) +1. Adriana H. Vilchis González, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Juan Carlos Avila Vilchis, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Carley Tillett, Curtin University, Australia, (Online) +1. Rebeca Villarroel Ramírez, Universidad de la Laguna, Spain, (In-person, Confirmed) +1. Luiz Murta, Universidade de São Paulo, Brazil, (Online) +1. Alberto Cuadrado Hernández, University of Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Álvaro Navarro González , University of Las Palmas de Gran Canaria , Spain, (In-person, Confirmed) +1. Markus Bujotzek, German Cancer Research Center (DKFZ) Heidelberg, Germany, (In-person, Confirmed) +1. Gauthier DOT, AP-HP, France, (Online) +1. Elanchezhian Somasundaram, Cincinnati Children's, USA, (Online) +1. Chi Zhang, Seattle Children's Research Institute, USA, (Online) +1. RODRIGO BASILIO, Radical Imaging , Brazil, (Online) +1. Saeed Arbabi, University Medical Center Utrecht, Netherlands, (Undecided) +1. Maximilian Fischer, German Cancer Research Center DKFZ, Germany, (Undecided) +1. Kyle Sunderland, Queen's University, Canada, (Online) +1. Ghulam Rasool, Moffitt Cancer Center and Research Institute , USA, (Online) +1. Suraj Pai, AIM, Brigham and Womens Hospital, USA, (Online) +1. Ibrahim Hadzic, Mass General Brigham, Harvard Medical School, Maastricht University, USA, (Online) +1. Diana Alejandra Mendoza Mora , Universidad Autónoma del Estado de México , Mexico, (Online) +1. Li-Wei Yang, National Taiwan University, Taiwan, (Online) +1. Maica Fernández, ULPGC, Spain, (In-person, Confirmed) +1. Allen Tannenbaum, Stony Brook University, USA, (Online) +1. Marina Elistratova Elistratova, Universidad de Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Nadya Shusharina, Massachusetts General Hospital, USA, (Online) +1. Tamas Heffter , N/A, Hungary, (Online) +1. Khaled Younis, Philips, USA, (Online) +1. Rebecca Hisey, Queen's University, Canada, (Online) +1. Pedro Moreira, Brigham and Women's Hospital, USA, (Online) +1. Jakob Wasserthal, University Hospital Basel, Switzerland, (In-person) +1. Glauco Caurin, University of Sao Paulo, Brazil, (Online) +1. Eserval Rocha Junior, Sao Paulo University, Brazil, (Online) +1. Djennifer Madzia-Madzou , Utrecht Medical Centre, Netherlands, (Online) +1. Rahul Kumar, Oslo University Hospital, Norway, (Online) +1. Mohamed Alalli BILAL, Université Cheikh Anta Diop de Dakar, Senegal, (Online) +1. Fryderyk Kögl, Technical University of Munich, Germany, (Online) +1. Carine Cindy Nguefack Tonleu, Ecole de Technologie Supérieure, Canada, (Online) +1. Tamas Ungi, Queen's University, Canada, (Online) +1. Javier González-Fernández, Instituto Tecnológico de Canarias, Spain, (In-person, Confirmed) +1. Maximilian Fischer, German Cancer Research Center DKFZ, Germany, (In-person, Confirmed) +1. Daniela Schacherer, Fraunhofer MEVIS, Germany, (Online) +1. Jenna Kim, University of Massachusetts Boston, USA, (Online) +1. Kiran Sandilya , University of Massachusetts Boston, USA, (Online) +1. Kunal Jain, University of Massachusetts Boston, USA, (Online) +1. Mahsa Geshvadi, UMass Boston, USA, (Online) +1. Daniel Palkovics, Semmelweis University, Hungary, (Online) +1. Keno Bressem, Brigham and Womens Hospital, USA, (Online) +1. Nielsen , Selfim, Brazil, (Online) +1. VIANNEY MUNOZ-JIMENEZ, Universidad Autónoma del Estado de México , Mexico, (Online) +1. Papa Alioune Cisse , Universidad Assane Seck de Ziguinchor , Senegal, (In-person, Confirmed) +1. Youssou Faye, Universidad Assane Seck de Ziguinchor , Senegal, (In-person, Confirmed) +1. Ousmane Dia , Ministerio de salud, Senegal, (In-person, Confirmed) +1. Ablaye Tacko Diop , Hospital Fann de Dakar , Senegal, (In-person, Confirmed) +1. Oumar Kane , Hospital Fann de Dakar , Senegal, (In-person, Confirmed) +1. Idy Diop, Universidad Cheikh Anta Diop , Senegal, (In-person, Confirmed) +1. Adama Faye , Universidad Cheikh Anta Diop , Senegal, (In-person, Confirmed) +1. Christine Sokhna Thiandoum, Universidad Cheikh Anta Diop , Senegal, (In-person, Confirmed) +1. Babacar Diao , Hospital Militar de Ouakam Dakar, Senegal, (In-person, Confirmed) +1. Magatte Gaye , Hospital General Idrissa Pouye Dakar, Senegal, (In-person, Confirmed) +1. Charles Dieme , Hospital Aristide Le Dantec Dakar, Senegal, (In-person, Confirmed) +1. Sidi El Wafi Sid El Wavi, Universidad de Nouakchott Al Aasriya , Mauritania, (In-person, Confirmed) +1. Mohamed Vadel Deida , Universidad de Nouakchott Al Aasriya , Mauritania, (In-person, Confirmed) +1. Cheikh Sidi Ethmane Kane , Universidad de Nouakchott Al Aasriya , Mauritania, (In-person, Confirmed) +1. Aliou Barry , Universidad de Nouakchott Al Aasriya , Mauritania, (In-person, Confirmed) +1. Fatimetou Mohamed Saleck , Universidad de Nouakchott Al Aasriya , Mauritania, (In-person, Confirmed) +1. Mouhamedi Bah, Escuela De Salud Pública de Rosso, Mauritania, (In-person, Confirmed) +1. Ahmed Dhahara Kane , Universidad de Nouakchott Al Aasriya , Mauritania, (In-person, Confirmed) +1. Joaquin Olivares, Universidad de Córdoba, Spain, (Online) +1. Mauro Ignacio Dominguez, Independent, Argentina, (Online) +1. Jonas Scherer, German Cancer Research Center (DKFZ), Germany, (Online) +1. Curtis Lisle, KnowledgeVis, LLC, USA, (Online) +1. Bartosz Włodarczyk, Polish-Japanese Academy of Information Technology, Poland, (Online) +1. Alexandru Dorobanțiu, Lucian Blaga University of Sibiu, Romania, (Online) +1. Zdravko Marinov, Karlsruhe Institute of Technology, Germany, (Online) +1. Juan de León Luis, Hospital Gregorio Marañón, Spain, (In-person, Confirmed) +1. Houssem Gueziri, MNI / McGill University , Canada, (Online) +1. Ulrike Wagner, Frederick National Laboratory for Cancer Research, USA, (Online) +1. Heather Selby, Stanford, USA, (Online) +1. MARIANA ALVAREZ CARVAJAL, UNIVERSIDAD AUTONOMA DEL ESTADO DE MEXICO, Mexico, (Online) +1. Osku Sundqvist, Planmeca Oy, Finland, (Online) +1. Daniel Fernández, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Artur Banach, Brigham and Women’s Hospital, USA, (In-person, Confirmed) +1. Valeria Gómez Valdes , Universidad Autónoma del Estado de México, Mexico, (Online) +1. Abigail Mercado Ponciano, Universidad Autónoma del Estado de México, Mexico, (Online) +1. B. Natterson-Horowitz, UCLA and Harvard, USA, (Online) +1. ENRIQUE HERNANDEZ LAREDO, UNIVERSIDAD AUTONOMA DEL ESTADO DE MEXICO, Mexico, (Online) +1. Ron Alkalay, Beth Israel Deaconess Medical Center, USA, (In-person, Confirmed) +1. Varun Agarwal, Rohilkhand Medical College, India, (Online) +1. Christopher Bridge, Massachusetts General Hospital, USA, (Online) +1. Marie Ndiaye, Universidad Assane Seck de Ziguinchor , Senegal, (In-person) +1. Mame Diarra SY , Universidad Assane Seck de Ziguinchor , Senegal, (In-person) +1. Mame Abdoulaye Gueye, Hospital Fann de Dakar - Ministerio de la Salud , Senegal, (In-person) +1. Ndeye Mareme Sougou, Universidad Cheikh Anta Diop , Senegal, (In-person) +1. Khedijetou Vilaly , Hospital Mère Enfant de Nouakchott , Mauritania, (In-person) +1. Edmee Clemence Mansilla, Ministerio de Enseñanza técnica, formación Profesional y Aprendizaje, Côte d'Ivoire, (In-person) +1. Estelle Tcheple Tuo, Ministerio de Enseñanza técnica, formación Profesional y Aprendizaje, Côte d'Ivoire, (In-person) +1. Delphine Bella Epse Konan, Centro de Formación Profesional Socio-Sanitaria Agboville, Côte d'Ivoire, (In-person) +1. Paul E. Neumann, Dalhousie University, Canada, (Online) +1. Hina Shah, UNC Chapel Hill, USA, (Online) +1. Badiaa AIT AHMED, University of Las Palmas de Gran Canaria, Spain, (Online) +1. Celestino Lopes De Barro, Universidad de Cabo Verde, Cabo Verde, (In-person, Confirmed) +1. Mateus Neves Andrade, Universidad de Cabo Verde, Cabo Verde, (In-person) +1. José Olavo Da Paz Teixeira, Universidad de Cabo Verde, Cabo Verde, (In-person, Confirmed) +1. Hagi Anderson Lima Lopes, Universidad de Cabo Verde, Cabo Verde, (In-person) +1. Tolga-Can Çallar, Institue for Robotics and Cognitive Systems (University of Lübeck), Germany, (Online) +1. Gabriel Kwiecinski Antunes, WebKriativa, Brazil, (Online) +1. Francisco Guerrero Aranda, Student University of Granada - Spain, Spain, (Online) +1. David Emanuel Luksic, LARALAB, Germany, (Online) +1. Nicola Martini, Yunu Inc., Italy, (Online) +1. Tamu Tsinesh, Ukh, Germany, (Online) +1. Eduardo Fares , University of São Paulo , Brazil, (Online) +1. Reihaneh Teimouri, Concordia, Canada, (Online) +1. Eduardo Fares, University of São Paulo, Brazil, (Online) +1. Keyvan Farahani, National Cancer Institute, USA, (Online) +1. David Molony, Georgia Heart Institute, USA, (Online) +1. Juan Carlos Avila Vilchis, Universidad Autónoma del Estado de México, Mexico, (Online) +1. David Molony, Georgia Heart Institute, USA, (Online) +1. Hui Liu, Shanghai 6th Hospital, China, (Online) +1. Dan Rukas, Mass General Hospital, USA, (Online) +1. Laura Connolly , Queens university , Canada, (Online) +1. Sonia Pujol, Brigham and Women's Hospital, Harvard Medical School, USA, (Online) + + + +## Statistics + +Participation statistics + +## Organizers +### Local organizing committee +Juan Ruiz-Alzola, PhD, Professor of Imaging Technologies, director of the Grupo de Tecnología Médica y Audiovisual (GTMA), [Instituto Universitario de Investigaciones Biomédicas y Sanitarias (IUIBS)](https://www.iuibs.ulpgc.es/), [Universidad de Las Palmas de Gran Canaria (ULPGC)](https://www.ulpgc.es/) + +Idafen Santana-Pérez, PhD, Project Manager and Research Fellow at Grupo de Tecnología Médica y Audiovisual (GTMA), [Instituto Universitario de Investigaciones Biomédicas y Sanitarias (IUIBS)](https://www.iuibs.ulpgc.es/), [Universidad de Las Palmas de Gran Canaria (ULPGC)](https://www.ulpgc.es/) + +### Global Project Week organizing committee +* [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](http://www.spl.harvard.edu/pages/People/tkapur)), +* [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://drouin-simon.github.io/ETS-web//)) +* [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)) +* [@piiq](https://github.com/piiq) ([Theodore Aptekarev](https://discourse.slicer.org/u/pll_llq)) +* [@sjh26](https://github.com/sjh26) Sam Horvath + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). diff --git a/PW38_2023_GranCanaria/pw38-floor-plan.png b/PW38_2023_GranCanaria/pw38-floor-plan.png new file mode 100644 index 000000000..f2c93bfa7 Binary files /dev/null and b/PW38_2023_GranCanaria/pw38-floor-plan.png differ diff --git a/PW38_2023_GranCanaria/statistics.svg b/PW38_2023_GranCanaria/statistics.svg new file mode 100644 index 000000000..6d431c5df --- /dev/null +++ b/PW38_2023_GranCanaria/statistics.svg @@ -0,0 +1,16883 @@ + + + + + + + + 2023-06-09T12:26:19.427250 + image/svg+xml + + + Matplotlib v3.5.2, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/PW39_2023_Montreal/BreakoutSessions/AMPSCZ/README.md b/PW39_2023_Montreal/BreakoutSessions/AMPSCZ/README.md new file mode 100644 index 000000000..1891fe0e3 --- /dev/null +++ b/PW39_2023_Montreal/BreakoutSessions/AMPSCZ/README.md @@ -0,0 +1,25 @@ +## Overview + +The Accelerating Medicines Partnership (AMP®) program is a public-private partnership between the National Institutes of Health (NIH), the U.S. Food and Drug Administration (FDA), the European Medicines Agency (EMA), pharmaceutical and life science companies, non-profits and other organizations. The overarching aim of the AMP program is to improve our understanding of disease pathways and identify new and better targets for treatment. + +The AMP Schizophrenia (SCZ) program was launched in 2020 to address the critical need for more effective treatments for people with schizophrenia and related mental health conditions. + +## The AMP SCZ Program Approach + +The aims of this 5-year $99.4 million global effort include developing tools that identify the early stages of risk for schizophrenia and predicting the likelihood of progression to psychosis and other outcomes. A related aim is to develop tools that can help researchers identify new targets for drug-based treatments that can be tested in clinical trials. + +The AMP SCZ program is managed by the Foundation for the National Institutes of Health (FNIH). More detailed information about the AMP SCZ program is available on the FNIH website. + +## Study design + +This is a non-interventional study examining clinical trajectories and predictors of outcomes in the clinical high risk (CHR) population. + +Clinical assessments will be collected monthly for the first year, and then at 18 and 24 months and at the point of conversion to psychosis. + +The biomarker measures (imaging, EEG and event-related potentials, fluid-based biomarkers, cognitive assessments, and speech sampling) will be collected at baseline and at two months after study entry. Digital assessments (actigraphy as well phone app-based data collection) will be collected daily for the first year. The collection of these biomarkers over time will validate their use and efficacy in the CHR population to establish early indicators of pharmacologic treatment efficacy. + +Cognitive assessments will be collected longitudinally at six, 12 and 24 months and at conversion to psychosis. Unbiased machine language and AI approaches will be used to derive algorithms that predict clinical endpoints: conversion to psychosis (primary clinical endpoint); remission of the CHR state (secondary clinical endpoint), and non-conversion/non-remission (secondary clinical endpoint). These approaches will allow the selection of enriched populations to help improve success in developing pharmacologic treatments. + +The goal of AMP® SCZ is to facilitate drug development for the CHR population. + +https://www.ampscz.org diff --git a/PW39_2023_Montreal/BreakoutSessions/IGT/README.md b/PW39_2023_Montreal/BreakoutSessions/IGT/README.md new file mode 100644 index 000000000..f686d2b17 --- /dev/null +++ b/PW39_2023_Montreal/BreakoutSessions/IGT/README.md @@ -0,0 +1,11 @@ +# IGT Breakout Session + +## Organizers +- Tamas Ungi (Queen's) +- Junichi Tokuda (BWH) + +## Agenda +- NousNav (Colton Barr, BWH, Queens) +- Live Ultrasound/3D Reconstruction (Tamas Ungi, Queen's) +- SlicerROS2 (Junichi Tokuda) +- diff --git a/PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/README.md b/PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/README.md new file mode 100644 index 000000000..f966ca16e --- /dev/null +++ b/PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/README.md @@ -0,0 +1,256 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/Readme.html +- /PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/README.html + +project_title: The future of rendering in VTK and Slicer + +key_investigators: +- name: Simon Drouin + affiliation: École de Technologie Supérieure + Country: Canada + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Murat Maga + affiliation: University of Washington + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Sara Rolfe + affiliation: Seattle Children's Research Institute + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware, Inc. + country: USA + +- name: Stephen Aylward + affiliation: Kitware, Inc. + country: USA + +- name: Rafael Palomar + affiliation: NTNU + country: Norway + +- name: Sankhesh Jhaveri + affiliation: Kitware, Inc. + country: USA + +- name: Matt McCormick + affiliation: Kitware, Inc. + country: USA + +- name: Forrest Li + affiliation: Kitware, Inc. + country: USA + +- name: Lucas Gandel + affiliation: Kitware SAS + country: USA + +- name: Jaswant Panchumarti + affiliation: Kitware, Inc. + country: USA + +- name: Shreeraj Jadhav + affiliation: Kitware, Inc. + country: USA + +- name: Tom Birdsong + affiliation: Kitware, Inc. + country: USA + +--- + +# Description + +The goal of this breakout session is to gather all parties interested in the future of rendering in VTK and Slicer, present ongoing development by Kitware and others and discuss potential future directions and clinical and biomedical needs. + +## During the Breakout Session + +Links and notes are organized at [https://hackmd.io/Mq81LxbYTfqrwOBRjxrb6Q](https://hackmd.io/Mq81LxbYTfqrwOBRjxrb6Q). It is a markdown based document we can collaboratively & interactively edit. + +Once the breakout session is over, we will contribute the information back to this page. See [Notes](#notes) below. + +## Topics + +* VTK Evolution + * Status of the replacement for OpenGL (WebGPU) + * Integration of VTK and VTK.js (common shaders?) +* Rendering + * Global illumination + * Support for high resolution volumes + * Support for shared graphics contexts + * Support for GPU pre-processing of volumes and meshes + * Creation of an experimental rendering module: + * Support for multiple volumes/surfaces handled by the same pipeline + * Modifiable shaders + * Multipass rendering + * Arbitrary number of transfer functions + * Better support for animation and high-resolution rendering +* AR-VR + * New Slicer Mixed-Reality module (for Hololens remoting) + * Support for OpenXR in Slicer Virtual Reality module + * Status of WebXR in vtk.js + +# Notes + +## VTK C++: WebGPU + +_Contact: Sankhesh Jhaveri @ Kitware_ + +WebGPU effort in VTK aims to provide a future-proof rendering backend as an alternative to OpenGL. + +* VTK’s data model and visualization pipeline will have no architectural changes. +* For the most part, there will be no frontend user-facing changes to the rendering classes either. Applications would still have to instantiate `vtkRenderWindow`, `vtkRenderer`, `vtkActor`, etc. +* There will be changes with respect to how platform-specific windows are created. These windows no longer need to be tied to the graphics backend i.e. no need for `vtkXOpenGLRenderWindow`, etc. +* For advanced users of VTK who modify rendering logic, use shader replacements, etc. there will be *significant* changes. This should be expected, IMO. + +Experimental: +* RenderingWebGPU: https://docs.vtk.org/en/latest/modules/vtk-modules/Rendering/WebGPU/README.html +* Draft: WebGPU: Native graphics backend. See https://gitlab.kitware.com/vtk/vtk/-/merge_requests/10239 + +Questions: +* multi-volume rendering + * Will it be the default ? In the OpenGL backend, there were two backends. + * vtkMultiVolume issues + * `SetVisibility` issue + * `UserMatrix` not taken into account for the first volume (issue) (works in `TestGPURayCastMultiVolumeOverlapping`) + * Cropping + * Shading + * Only shows up in Composite w/ Shading, but nothing in MIP (Error: Shader failed to compile in `raycastervs.glsl`) +* context sharing ? + * Goal is to reduce the memory footprint by avoiding having duplicate of teture memory (e.g multiple 3D views of the same volume data) + * Availability of result of compute shader for rendering. +* Compute shaders + * WebGPU ? + +Notes: +* [moltengl](https://moltengl.com/moltengl/): Applications built for OpenGL ES 2.0 can use MoltenGL to run on top of Metal, +* Unity allows to easily compose complex rendering pipeline. +* RenderingWebGPU: + * The shader is the center piece + * Possibly use [SPIRV-Reflection](https://github.com/KhronosGroup/SPIRV-Reflect) to decipher shader inputs, their attributes/dimensionality and expose a VTK interface to populate shader bindings with vtkImageData, vtkDataArray, etc. + +* Compute Shader + * https://github.com/Punzo/SlicerAstro/tree/master/vtkOpenGLFilters + * Simon will share details about experiment for implementing compute shader + +* AI and WebGPU for training ? + * Rendering into texture ? + * https://virtualgl.org/ + * Idea would be to support Differential Rendering + * https://towardsdatascience.com/differentiable-rendering-d00a4b0f14be + * + +* Review meeting ? + +### WebAssembly + +_Contact: Jaswant Panchumarti @ Kitware_ + +* Slides: [WASM/WebGPU in VTK](https://docs.google.com/presentation/d/1Nl0TVa55616QKCSHP54BoYBvByMKe6lIUl6IFZqSeJo/edit#slide=id.p) + +* Docs: [Getting Started / Using WebAssembly](https://docs.vtk.org/en/latest/getting_started/index.html#using-webassembly) + +* Leverage efforts done in the context of https://wasm.itk.org/ + +## VTK C++: OpenXR + +_Contact: Lucas Gandel @ Kitware_ + +Improvement roadmap for `OpenXR` and `OpenXR Remoting` support in VTK: + +* OpenXR controller model support: + * `vtkOpenXRManager` already supports for querying the controller models in a buffer (see [here](https://gitlab.kitware.com/vtk/vtk/-/blob/master/Rendering/OpenXR/vtkOpenXRManager.cxx#L297)) + * The buffer should be forwarded to vtkGLTFReader using vtkResourceStream/vtkResourceParser + * Need to add vtkResourceStream/vtkResourceParser support to vtkGLTFReader (see [PLYReader](https://gitlab.kitware.com/vtk/vtk/-/merge_requests/10224/diffs) ) +* OpenXR Remoting hand interaction improvements + * The current interactor style does not allow for accessing different poses from a single Move3D event (see discussions in [this MR](https://gitlab.kitware.com/vtk/vtk/-/merge_requests/9595#note_1255954)) + * Possible options are: + * Add support for storing additional poses (maybe as additional TrackedDevice) + * Add support for [hands joints tracking](https://registry.khronos.org/OpenXR/specs/1.0/html/xrspec.html#_conventions_of_hand_joints) and implement basic gestures recognition +* Add support for [scene understanding](https://registry.khronos.org/OpenXR/specs/1.0/html/xrspec.html#XR_MSFT_scene_understanding) in OpenXR Remoting, to occlude the VTK scene with the real world. + +## SlicerVirtualReality + +* Are we ready to transition to OpenXR ? + * Before doing so, we would need to generalize the function [vtkOpenVRRenderWindow::GetOpenVRPose()](https://github.com/Slicer/VTK/blob/9bde2b3fa9887a801e3eec686ce591072986977f/Rendering/OpenVR/vtkOpenVRRenderWindow.cxx#L546-L558) currently specific to `Rendering/OpenV` and only available in `Slicer/VTK` + +## SlicerMixedReality + +_Contact: Jean-Christophe Fillion-Robin @ Kitware_ + +See https://github.com/KitwareMedical/SlicerMixedReality/pull/2 + +Question: What to do once we start working on adding `OpenXR` support to `SlicerVirtualReality`. + + +## vtk.js: Rendering Non-uniform image series + +_Contact: Shreeraj Jadhav @ Kitware_ + +See https://docs.google.com/presentation/d/1mrMe8w2G5hgRan0KzdwqrxgKGLfyR-h3mM7Kj5KYz4c/edit#slide=id.p + + +## vtk.js: Interactive, in-browser cinematic volume rendering of medical images + +_Contact: Stephen Aylward & Forrest Li @ Kitware_ + +> The diversity and utility of cinematic volume rendering (CVR) for medical image visualisation have grown rapidly in recent years. At the same time, volume rendering on augmented and virtual reality systems is attracting greater interest with the advance of the WebXR standard. + +See https://doi.org/10.1080/21681163.2022.2145239 + +See https://volview.kitware.com/ + +## vtk.js: WebXR + +_Contact: Tom Birdsong @ Kitware_ + +* Blog: [VTK.js Transforms Web-based Visualization with Immersive Virtual and Augmented Reality](https://www.kitware.com/vtk-js-transforms-web-based-visualization-with-immersive-virtual-and-augmented-reality/) +* Blog: [Holograms Over the Web: Kitware Extends vtk.js to Support Looking Glass Factory’s Displays](https://www.kitware.com/holograms-over-the-web-kitware-extends-vtk-js-to-support-looking-glass-factorys-displays/) +* Example and FAQs: https://kitware.github.io/vtk-js/docs/develop_webxr.html + * [Supported devices](https://kitware.github.io/vtk-js/docs/develop_webxr.html#What-mixed-reality-devices-are-supported-by-VTK-js) + * [Feature Support Roadmap](https://github.com/Kitware/vtk-js/issues/2571) + +## VTK & ITK interoperability + +Web: +* ITK IO are compiled to WASM and re-used in vtk.js based web application +* ITK-WASM is a building block for VTK-WASM + +OME-Zarr: +* https://www.biorxiv.org/content/10.1101/2023.02.17.528834v1 +* https://github.com/InsightSoftwareConsortium/itkwidgets/blob/main/examples/integrations/zarr/OME-NGFF-Brainstem-MRI.ipynb + +Slicer + Large Image Rendering: + +* We currently do not have any readily available implementation in Slicer, from a high level, the idea would be to: + - support streaming dataset at different resolutions and updating the rendering accordingly. + - leverage ITK+Zarr integration and ensure information flow all the way through the VTK pipeline + - Consolidate ITKVTKGlue modules + +* Improve ITK module: ITKIOOMEZarrNGFF. See https://github.com/InsightSoftwareConsortium/ITKIOOMEZarrNGFF + +# Illustrations + + + +# Background and References + + +* [WebGPU in VTK](https://www.kitware.com/vtk-webgpu-on-the-desktop/) +* [WebGPU in Slicer](https://github.com/pieper/SlicerWGPU) from [Project Week 37](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/SlicerWGPU/). diff --git a/PW39_2023_Montreal/Projects/3DMedicalRegistrationAndSegmentationWithElastixAndMonaiLabel/README.md b/PW39_2023_Montreal/Projects/3DMedicalRegistrationAndSegmentationWithElastixAndMonaiLabel/README.md new file mode 100644 index 000000000..130d02eb4 --- /dev/null +++ b/PW39_2023_Montreal/Projects/3DMedicalRegistrationAndSegmentationWithElastixAndMonaiLabel/README.md @@ -0,0 +1,80 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: 3D Medical Registration and Segmentation with Elastix and MONAI Label +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Konstantinos Ntatsis + affiliation: Leiden University Medical Center + country: the Netherlands + +- name: Andres Diaz-Pinto + affiliation: NVIDIA & King's College London + country: United Kingdom + +--- + +# Project Description + + + +This project aims to investigate the application of **itk-elastix** (a python wrapping of Elastix) for image registration in combination with **MONAI Label** for segmentation/classification. Depending on the time/people availability, we will work in one or more sub-projects. + +*Initial sub-project:* +We will starty by training a single modality MONAI Label model on Elastix-aligned brain images (T1, T2, FLAIR, etc) using [SynthSeg](https://github.com/BBillot/SynthSeg) as the source of annotations. SynthSeg is a tensorflow-based deep learning segmentation tool for brain MRIs. It consists of a generative network that produces the synthetic images and a 3D U-Net trained to do the segmentation. The only input (training data) is the training labels so no real images are used. + +We will use SynthSeg to produce annotations as “ground truth” on a publicly available dataset like BRATS (multimodal + non-healthy brains) or OASIS (temporal/monomodal + healthy brains). Elastix will be used for the co-registration of the different modalities or temporal images and achieve segmentation via registration. + +*Other possible sub-projects:* +* Extend the [whole brain segmentation model](https://github.com/Project-MONAI/model-zoo/tree/dev/models/wholeBrainSeg_Large_UNEST_segmentation) available in the Model Zoo, Use Elastix to perform affine registration of the data in the MNI305 space. +* Compare registration performance between cross-modal registration (CT-MRI) versus intra-modal registration via synthesised MRI (MRI_syn - MRI). MONAI for the synthesis and elastix for the registration. What would a suitable dataset be? +* Train MONAI Label model for automatic landmark identification in e.g. lung images ([dataset](https://med.emory.edu/departments/radiation-oncology/research-laboratories/deformable-image-registration/index.html)) . Landmarks can be used either to assist registration with elastix OR elastix can be used to validate the landmark accuracy. 3D Slicer can be used to visualize the landmarks and ease the qualitative evaluation. +* ... any other idea that is interesting to people, feel free to propose it! + +## Objective + + + +1. Working code, jupyter notebooks, any other artifacts etc that demonstrate the combination of itk-elastix and MONAI Label. They will be helpful for users that would like to solve similar problems. + +## Approach and Plan + + + +1. Configure and run Elastix +2. Setup and run MONAI Label +3. Make sure they work together nicely (e.g. output of Elastix should be suitable for MONAI, or the reverse) +4. Improve the results (a bit) +5. Polish and store the code/documentation/results so that they are helpful for future generations + +## Progress and Next Steps + + + +1. Preliminary registration of the BRATS dataset. Several details need to be sorted out still. +2. ... + +# Illustrations + + + +Example of the unregistered images for a subject in the BRATS dataset: + +![example_unregistered_brats](https://github.com/NA-MIC/ProjectWeek/assets/45266491/dc3aef02-140f-4bb4-964b-4a996cd31caa) + + +# Background and References + + + +* Elastix repo: +* itk-elastix (python wrapping): +* MONAI Label: +* itk-torch-bridge: https://docs.monai.io/en/latest/data.html#module-monai.data.itk_torch_bridge diff --git a/PW39_2023_Montreal/Projects/3DSlicerForLatinAmerica/README.md b/PW39_2023_Montreal/Projects/3DSlicerForLatinAmerica/README.md new file mode 100644 index 000000000..d742510bc --- /dev/null +++ b/PW39_2023_Montreal/Projects/3DSlicerForLatinAmerica/README.md @@ -0,0 +1,155 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/3DSlicerForLatinAmerica/README.html + +project_title: 3D Slicer for Latin America +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Adriana Herlinda Vilchis Gonzalez + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Luiz Murta + affiliation: Universidade de São Paulo + country: Brazil + +- name: Lucas Sanchez Silva + affiliation: Universidade de São Paulo + country: Brazil + +- name: João Pedro Alves Januário + affiliation: Universidade de São Paulo + country: Brazil + +- name: Douglas Samuel Gonçalves + affiliation: Universidade de São Paulo + country: Brazil + +- name: Enrique Hernandez Laredo + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Diana Alejandra Mendoza Mora + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Mariana Alvarez-Carvajal + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Gael Garcia + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Valeria Gómez Valdes + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Abigail Mercado Ponciano + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Nubia Sofía González Casanova + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Victor Manuel Montaño Serrano + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Vianney Muñoz Jiménez + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Juan Carlos Avila Vilchis + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Aída García Limas + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Daniel Enrique Fernández García + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Abigail Mercado Ponciano + affiliation: Universidad Autónoma del Estado de México + country: Mexico + + +--- + +# Project Description + + + +The goal of this project is to leverage 3D Slicer's internationalization infrastructure to localize the software into Spanish and Portuguese and to develop a novel software infrastructure for tutorial localization. + +## Objective + + + +1. To identify members of the Latin American community interested in 3D Slicer activities in Spanish and in Portuguese +2. To run daily translation hackathons at PW39 +3. To translate the Slicer Language Packs tutorial to Spanish and Portuguese + +## Approach and Plan + + + +### Slice Internationalization Breakout session: + +* Monday, June 12, 2-4 pm EST + +Zoom link for Slicer Internationalization Breakout session: https://etsmtl.zoom.us/j/86060017076?pwd=NmVkb2ovckh6Y3ZjQzZxSUtXU09tZz09#success + +### Daily Slicer internationalization sessions with members of the Slicer community + +Zoom link for daily Slicer Internationalization sessions: https://etsmtl.zoom.us/j/86060017076?pwd=NmVkb2ovckh6Y3ZjQzZxSUtXU09tZz09#success + +* Tuesday, June 13, 9:30-10 am EST +* Wednesday, June 14, 9:30-10:30 am EST +* Thursday, June 15, 11am-12 pm EST + +## Progress and Next Steps + + + +The "how to use" web page of Slicer Language Packs extension has been translated to Latin american Spanish and Brazilian Portuguese + +[Español Latinoamericano](https://github.com/Slicer/SlicerLanguagePacks/blob/main/HowToUse_es_419.md) + +[Português Brasileiro](https://github.com/Slicer/SlicerLanguagePacks/blob/main/HowToUse_pt-br.md) + +# Illustrations + +![example_es](https://user-images.githubusercontent.com/15926896/245937609-71f70b88-ec46-4f57-8ad0-d11045e5234e.png) +![example_pr](https://user-images.githubusercontent.com/15926896/245937613-29d649f4-d9c7-4620-9ea4-b4e597ff1a61.png) + + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/3DSlicerInternationalization/PW39_SlicerInternationalization.png b/PW39_2023_Montreal/Projects/3DSlicerInternationalization/PW39_SlicerInternationalization.png new file mode 100644 index 000000000..bb69e55b7 Binary files /dev/null and b/PW39_2023_Montreal/Projects/3DSlicerInternationalization/PW39_SlicerInternationalization.png differ diff --git a/PW39_2023_Montreal/Projects/3DSlicerInternationalization/PW39_SlicerInternationalization_2.png b/PW39_2023_Montreal/Projects/3DSlicerInternationalization/PW39_SlicerInternationalization_2.png new file mode 100644 index 000000000..d0613003e Binary files /dev/null and b/PW39_2023_Montreal/Projects/3DSlicerInternationalization/PW39_SlicerInternationalization_2.png differ diff --git a/PW39_2023_Montreal/Projects/3DSlicerInternationalization/README.md b/PW39_2023_Montreal/Projects/3DSlicerInternationalization/README.md new file mode 100644 index 000000000..845f27934 --- /dev/null +++ b/PW39_2023_Montreal/Projects/3DSlicerInternationalization/README.md @@ -0,0 +1,141 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/3DSlicerInternationalization/README.html + +project_title: 3D Slicer Internationalization +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Mamadou Camara + affiliation: Université Cheikh Anta Diop + country: Senegal + +- name: Ahmedou Moulaye IDRISS + affiliation: Faculté de Médecine, de Pharmacie et d’Odonto-Stomatologie, Université de Nouakchott + country: Mauritania + +- name: Mouhamed Diop + affiliation: Université Cheikh Anta Diop + country: Senegal + +- name: Adama Rama Wade + affiliation: Université Cheikh Anta Diop de Dakar + country: Senegal + +- name: Mohamed Alalli Bilal + affiliation: Université Cheikh Anta Diop de Dakar + country: Senegal + +- name: Idrissa Seck + affiliation: Université Cheikh Anta Diop de Dakar + country: Senegal + +- name: Papa ibra NDIAYE + affiliation: Ecole Superieure Polytechnique, Université Cheikh Anta Diop de Dakar + country: Senegal + +- name: Pape Malick GUEYE + affiliation: Université Cheikh Anta Diop de Dakar + country: Senegal + +- name: Fatou Bintou NDIAYE + affiliation: Université Cheikh Anta Diop de Dakar + country: Senegal + +- name: Attila Nagy + affiliation: University of Szeged + country: Hungary + + +--- + +# Project Description + + + +The project aims to develop a novel software infrastructure to enable the localization of 3D Slicer into multiple languages. + +## Objective + + + +To identify members of the global Slicer community interested in new Slicer activities in their language + +To create a list containing the top 10 Slicer modules to translate for new Slicer Internationalization contributors + +To create a new module translation lookup table for the Language Packs Extension tutorial + +## Approach and Plan + + + +### Slice Internationalization Breakout session: + +* Monday, June 12, 2-4 pm EST + +Zoom link for Slicer Internationalization Breakout session: https://etsmtl.zoom.us/j/86060017076?pwd=NmVkb2ovckh6Y3ZjQzZxSUtXU09tZz09#success + +### Daily Slicer internationalization sessions with members of the Slicer community + +Zoom link for all Slicer Internationalization sessions: https://etsmtl.zoom.us/j/86060017076?pwd=NmVkb2ovckh6Y3ZjQzZxSUtXU09tZz09#success + +* Tuesday, June 13, 9:30-10:30 am EST +* Wednesday, June 14, 9:30-10:30 am EST +* Thursday, June 15, 11:00-12:00 pm EST + +## Progress and Next Steps + + + +* Daily Slicer Internationalization sessions with over 20 participants from Africa, Latin America, North America and Europe + +* Added the Module translation lookup table to the Slicer Language Packs repository. Click [here](https://github.com/Slicer/SlicerLanguagePacks/blob/main/Module%20translation%20lookup%20table.md) to view it! + +* [Excel worksheet with language selection](https://github.com/Slicer/SlicerLanguagePacks/releases/tag/TranslationResources) + +* Find Text tool of the Language Packs Extension. Click [here](https://github.com/Slicer/SlicerLanguagePacks/blob/main/TranslationGuidelines.md#:~:text=Find%20Text%20tool%20of%20the%20Language%20Packs%20Extension) + + + +# Illustrations + +## Project meetings + +PW39_SlicerInternationalization + +![SlicerPW39-2](https://github.com/NA-MIC/ProjectWeek/assets/1847492/84fd8116-4e49-497a-a8ec-87c291f49c22) + +* Based on our discussions, we realized there's an interest in using localized widgets in Slicer virtual reality environments. Thanks to Csaba Pinter and Simon Drouin we were able to make rapid prototypes for French and Korean! + +## Videos + + + + + +* More information on the VR widget developments [on their project page](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/SlicerVRInteraction/). + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/AI-EnhancedVirtualResectionsforImprovedSlicer-LiverSurgicalPlanning/README.md b/PW39_2023_Montreal/Projects/AI-EnhancedVirtualResectionsforImprovedSlicer-LiverSurgicalPlanning/README.md new file mode 100644 index 000000000..17f842ed5 --- /dev/null +++ b/PW39_2023_Montreal/Projects/AI-EnhancedVirtualResectionsforImprovedSlicer-LiverSurgicalPlanning/README.md @@ -0,0 +1,58 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/AI-EnhancedVirtualResectionsforImprovedSlicer-LiverSurgicalPlanning/README.html + +project_title: AI-Enhanced Virtual Resections for Improved Slicer-Liver Surgical Planning +category: IGT and Training +presenter_location: In-person + +key_investigators: +- name: Gabriella d'Albenzio + affiliation: The Intevention Centre (OUS) + country: Norway + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Rafael Palomar + affiliation: The Intevention Centre (OUS) + country: Norway +--- + +# Project Description + +The primary aim of this project is to utilize artificial intelligence (AI) to enhance the surgical planning of Slicer-Liver through the generation of virtual resections. The initial focus is on employing AI for liver resection planning, specifically using complex anatomical information obtained from CT and/or MRI scans, such as the hepatic and portal veins, as well as the liver parenchyma. The objective is to train a model capable of generating optimal resections in the form of parametric surfaces, while also providing control points that can be adjusted to modify the suggested plan. To achieve this, two distinct deep learning approaches can be explored: + +**Proposal 1 - SplineNet:** Instead of parametrizing a set of points as a spline patch, which can introduce errors due to noise, sparsity, and non-uniform sampling, this proposal suggests employing a neural network to directly predict control points. SplineNet, a neural network referenced in this project, takes the boundary 3D points of a liver segment as input and produces a fixed-size grid of control points, yielding more robust results. + +**Proposal 2 - Multimodal deep learning for generating liver resection suggestions:** This proposal involves a two-step process. Firstly, the boundary 3D points of a liver segment and the 3D CT volume, along with anatomical segmentations, are processed by modality-specific feature extraction networks (CNN and PointNet) independently, to identify regional and geometric features for each modality. Subsequently, the modality-based features are fed into a siamese architecture consisting of cross-modal attention blocks, which capture local features and establish their global correspondence across modalities. Finally, a recurrent neural network (RNN) block is utilized to extract the control points, which can be adjusted by the surgeon to modify the suggested plan. + +## Objective + +1. Engage in collaborative discussion with other members to assess the practicality of implementing the two networks within a clinical environment. +2. Develop a dedicated Slicer module to effectively apply trained models for liver resection surfaces. + +## Approach and Plan + +- Utilizing the [ABC dataset](https://deep-geometry.github.io/abc-dataset/) for training purposes. +- Employing SPLINet and a multimodal deep learning network for parametric surface reconstruction on the ABC dataset. +- Drawing inspiration from @ungi to develop a Slicer module with PyTorch implementation of trained models. + +## Progress and Next Steps + +- SPLINet has been trained using ABC data. +- The user has the ability to modify the number of control points, which serves as a parameter. +- The output predictions can be uploaded into Slicer as a GridSurface. +- ToDO: Train and test the model on liver resection surfaces and develop a Slicer module with PyTorch implementation. + +# Illustrations +![image_00064](https://github.com/dalbenzioG/ProjectWeek/assets/75131750/4acb105b-5cc3-4002-960f-2fbe549f6208) +![image_00060](https://github.com/dalbenzioG/ProjectWeek/assets/75131750/03d72a88-b9b5-404a-a866-c8f751f4f8f5) + +# Background and References +[Saiti, E., and T. Theoharis. "Multimodal registration across 3D point clouds and CT-volumes." Computers & Graphics 106 (2022): 259-266.](https://www.sciencedirect.com/science/article/pii/S0097849322001121) +[Sharma, Gopal, et al. "Parsenet: A parametric surface fitting network for 3d point clouds." Computer Vision–ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part VII 16. Springer International Publishing, 2020.](https://graphics.stanford.edu/courses/cs348n-22-winter/PapersReferenced/ParSeNet%20A%20Parametric%20Surface%20Fitting%202003.12181.pdf) diff --git a/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/README.md b/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/README.md new file mode 100644 index 000000000..423dc0b7c --- /dev/null +++ b/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/README.md @@ -0,0 +1,84 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/README.html + +project_title: AMPSCZ Collaboration Space Tutorials +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Sylvain Bouix + affiliation: ÉTS + country: Canada + +- name: Tina Kapur + affiliation: BWH + country: USA + +- name: Ofer Pasternak + affiliation: BWH + country: USA + +- name: Nora Penzel + affiliation: MGH + country: USA + +- name: Kevin Cho + affiliation: BWH + country: USA + +- name: Ameneh Asgari-Targhi + affiliation: BWH + country: USA + +--- + +# Project Description + + + +The AMPSCZ project allows consortium researchers to access an AWS workspaces virtual desktop with direct access to the AMPCZ data lake hosted at the NIMH data archive (NDA). +This project will consist of generating R and Python notebooks to illustrate how to access and analyze datasets using this collaboration space. + +## Objective + + + +1. Objective A. Build python and R notebooks showing how to access and interact with AMPSCZ data + +## Approach and Plan + + + +1. Build Python and R notebooks to access the data lake +2. Build cross-instrument data analyses of tabular data +3. Build example of loading and inspecting raw non-tabular data (e.g., MRI data with Slicer). + +## Progress and Next Steps + + + +1. Installed RStudio and all associated packages on AWS Linux. +2. Installed Python and associated packages on AWS Linux. +3. Installed Slicer on AWS Linux. +4. Some issues with MesaGl version on AWS Linux prevented us from getting SlicerJupyter to work. +5. Ran out of time installing FSL. + +# Illustrations + + +![R markdown](image.png) +![R markdown1](image1.png) +![R markdown2](image2.png) + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image.png b/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image.png new file mode 100644 index 000000000..bdceb751c Binary files /dev/null and b/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image.png differ diff --git a/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image1.png b/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image1.png new file mode 100644 index 000000000..7ae0759cf Binary files /dev/null and b/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image1.png differ diff --git a/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image2.png b/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image2.png new file mode 100644 index 000000000..6447dbc53 Binary files /dev/null and b/PW39_2023_Montreal/Projects/AmpsczCollaborationSpaceTutorials/image2.png differ diff --git a/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/README.md b/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/README.md new file mode 100644 index 000000000..b8dd44965 --- /dev/null +++ b/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/README.md @@ -0,0 +1,75 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/README.html + +project_title: AMPSCZ First Data Release Documentation +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Sylvain Bouix + affiliation: ÉTS + country: Canada + +- name: Tina Kapur + affiliation: BWH + country: USA + +- name: Ameneh Asgari-Targhi + affiliation: BWH + country: USA + +--- + +# Project Description + + +The Accelerating Medicines Partnership (AMP®) program is a public-private partnership between the National Institutes of Health, the U.S. Food and Drug Administration, the European Medicines Agency, pharmaceutical and life science companies, non-profit and other organizations. The AMP Schizophrenia (SCZ) program was launched in 2020 to address the critical need for more effective treatments for people with schizophrenia and related mental health conditions. +The project aims to recruit and test over 2,000 individuals at clinical high risk for psychosis using a longitudinal multi-modal protocol involving imaging, EEG and event-related potentials, fluid-based biomarkers, cognitive assessments, and speech sampling). The collection of these biomarkers over time will validate their use and efficacy in the CHR population to establish early indicators of pharmacologic treatment efficacy. + +The AMPSCZ project will have its first public data release in July and we want to finalize documentations and "customer-facing" material. + +## Objective + + + +1. Objective A. Generate documentation for the AMPSCZ data release. + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. + +## Progress and Next Steps + + + +1. Drafted a first version of the description of the project. +2. Collected documentation material for Smartphone Data and Penn Cognitive Battery. +3. Reviewed NIMH Data Archives (NDA) webpages structure with Tina. +4. Started description of clinical measures (Nora) +5. TODO: provide wireframe for NDA site +6. TODO: collect material for other data types (EEG, MRI, Clinical Tests, A/V recording) + +# Illustrations + + + +## TOC from the main manual. +![Manual](Screenshot%202023-06-14%20at%202.21.06%20PM.png) + +## Example description of a cognitive test. +![PennCNB Example](Screenshot%202023-06-14%20at%202.17.37%20PM.png) + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/Screenshot 2023-06-14 at 2.17.37 PM.png b/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/Screenshot 2023-06-14 at 2.17.37 PM.png new file mode 100644 index 000000000..eebbfd721 Binary files /dev/null and b/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/Screenshot 2023-06-14 at 2.17.37 PM.png differ diff --git a/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/Screenshot 2023-06-14 at 2.21.06 PM.png b/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/Screenshot 2023-06-14 at 2.21.06 PM.png new file mode 100644 index 000000000..5bc121b69 Binary files /dev/null and b/PW39_2023_Montreal/Projects/AmpsczFirstDataReleaseDocumentation/Screenshot 2023-06-14 at 2.21.06 PM.png differ diff --git a/PW39_2023_Montreal/Projects/ArInSlicer/README.md b/PW39_2023_Montreal/Projects/ArInSlicer/README.md new file mode 100644 index 000000000..2d00f78fc --- /dev/null +++ b/PW39_2023_Montreal/Projects/ArInSlicer/README.md @@ -0,0 +1,119 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: AR in Slicer +category: VR/AR and Rendering +presenter_location: In-person + +key_investigators: + +- name: Alicia Pose Díez de la Lastra + affiliation: Universidad Carlos III de Madrid + country: Madrid, Spain + +- name: Simon Drouin + affiliation: 'École de Technologie Supérieure ' + country: Montreal , Canada + +- name: Jose Carlos Mateo Pérez + affiliation: 'Universidad de Las Palmas de Gran Canaria' + country: Gran Canaria , Spain + +- name: Pablo Sergio Castellano Rodríguez + affiliation: 'Universidad de Las Palmas de Gran Canaria' + country: Gran Canaria , Spain + +--- + +# Project Description + + + +Microsoft HoloLens 2 has demonstrated to be an excellent device in many clinical applications. They are mainly used to display 3D patient-related virtual information overlaid to the real world. However, its processing capacity is quite limited, so developing complex applications that require medical image processing is quite convoluted. + +A good solution could be to perform the difficult computations on a specialized software on a computer (i.e. 3D Slicer) and send them in real time to HoloLens 2 so that it can focus solely on visualization. +Up to date, there has been a lack of software infrastructure to connect 3D Slicer to any augmented reality (AR) device. + +During the last year, [Universidad Carlos III de Madrid](https://igt.uc3m.es/augmented-reality/) (Madrid, Spain) and Perk Lab in Queen's University have worked together to develop a novel connection approach between Microsoft HoloLens 2 and 3D Slicer using OpenIGTLink. + +The results of that work are publicly available at [this GitHub repository](https://github.com/BSEL-UC3M/HoloLens2and3DSlicer-PedicleScrewPlacementPlanning). + +The current solution is implemented in a 3 elements system. It is composed by A Microsoft HoloLens 2 headset, the Unity software, and the 3D Slicer platform. +The HoloLens 2 application is not directly built on the device, but remotely transferred from Unity in real time using Holographic Remoting. + +![image](https://github.com/NA-MIC/ProjectWeek/assets/66890913/6be8aff6-c4e8-48f1-a5ce-dfebff0dc0df) + +## Objective + + + +Evaluate the transferability of the aforementioned project to other AR devices. Specifically, we'll focus on the VARJO XR-3 headset. +![Varjo headset](https://github.com/NA-MIC/ProjectWeek/assets/66890913/d731d842-0809-466f-b676-bf9d728f911e) + +## Approach and Plan + + + +1. Connect Varjo headset to Unity. +2. Find a way to remotely render information from Unity to the headset. +3. 3D Slicer creates an OpenIGTLink server. +4. Unity, containing the AR application, creates an OpenIGTLink client that connects to the server. +5. Currently, when the application is executed in the Unity editor, it starts sending and receiving messages from 3D Slicer. Simultaneously, it wirelessly streams the app to Microsoft HoloLens 2 using Holographic Remoting. Try to replicate the same with Varjo. + +## Progress and Next Steps + + + +So far, everything works for HoloLens 2. Our current application transfers geometrical transform and image messages between the platforms. +It displays CT reslices of a patient in the AR device. The user wearing the glasses can manipulate the CT plane to see different perspectives. +The application was build for pedicle screw placement planning. + +![20221213_161232_HoloLens](https://user-images.githubusercontent.com/66890913/212931527-035baf4c-4799-4d83-9c60-b8a0f839547e.jpg) + +Our main goal for this week is to replicate the exact same application in the new device. + +# Illustrations + + + +*No response* + +# Background and References + + + +Check out our app in [this GitHub repository](https://github.com/BIIG-UC3M/HoloLens2and3DSlicer-PedicleScrewPlacementPlanning). +This repository contains all the resources and code needed to replicate our work in your computer. +You can also have a look at [this demo](https://www.youtube.com/watch?v=35WiSceP94Q&t=2s) of the functioning of the app on HoloLens 2. + +Transfer of geometrical transforms from HoloLens 2 to 3D Slicer: + +![MovingSpine_GIF](https://user-images.githubusercontent.com/66890913/214097820-96b9f875-4651-4efd-879b-831eb88b7b07.gif) + +Transfer of images from 3D Slicer to HoloLens 2: + +![MovingCT_GIF](https://user-images.githubusercontent.com/66890913/214097469-17a1aa1a-2768-4f73-8c12-bb4ab7d393f0.gif) + + +# Results +It worked! + + + + + + +# Conclusions +The system seems to be easily transferable to any AR or VR headset as long as it can be connected to Unity, either via wifi or with a cable diff --git a/PW39_2023_Montreal/Projects/AutomatedRegistrationCBCT/README.md b/PW39_2023_Montreal/Projects/AutomatedRegistrationCBCT/README.md new file mode 100644 index 000000000..c43f4d344 --- /dev/null +++ b/PW39_2023_Montreal/Projects/AutomatedRegistrationCBCT/README.md @@ -0,0 +1,102 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Automated Registration of Cone-Beam Computed Tomography +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: +- name: Anchling Luc + affiliation: University of Michigan + +- name: Nathan Hutin + affiliation: University of Michigan + country: France + +- name: Marcela Grugel + affiliation: University of Michigan + country: USA + +- name: Selene Barone + affiliation: University of Michigan + country: USA + +- name: Felicia Miranda + affiliation: University of Michigan + country: USA + +- name: Sophie Roberts + affiliation: University of Melbourne + country: Australia + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA +--- + +# Project Description + + +Automated clinical decision support systems rely on accurate analysis of three-dimensional (3D) medical and dental images to assist clinicians in diagnosis, treatment planning, intervention, and assessment of growth and treatment effects. However, analyzing 3D images requires orientation and registration, which are tedious and error-prone tasks. + +This project proposes two novel tools that can automatically perform the orientation and registration of 3D Cone-Beam Computed Tomography (CBCT) scans with high accuracy. Our work aims to reduce the sources of error in the 3D image processing workflow by automating these operations. These methods combine classical algorithmic approaches and AI-based models trained and tested on de-identified CBCT volumetric images. + +The registration method is based on an automatic tool [AMASSS](https://github.com/DCBIA-OrthoLab/SlicerAutomatedDentalTools) to perform a segmentation of the different regions of reference (described [here](#illustrations)) used for the regional voxel-based registration + +Our code is available [here](https://github.com/lucanchling/areg) + +The different methods for automatic orientation and registration of 3D CBCT scans rely on a combination of algorithmic and deep-learning techniques to perform both the orientation and the registration automatically. It also uses work that our group of researchers has already developed. Our Python-based algorithm and requires multiple libraries for the different image-processing tasks accomplished throughout the proposed method: SimpleITK \cite{Lowekamp2013-jt}, VTK \cite{Schroeder2006-ab}, SimpleElastix \cite{SimpleElastix}. To implement these tools, we also used the Medical Open Network for Artificial Intelligence (MONAI) library, which is a PyTorch-based framework for medical image analysis. MONAI offers several advantages for our work, such as high performance, modularity, and interoperability with other libraries. + +## Objective + + + +1. Continue to develop the Slicer tool +1. Deploy AREG CBCT in the Slicer extension *SlicerAutomatedDentalTools* + +## Approach and Plan + + +1. Find a method to perform the Automatic Registration of CBCT +1. Implement this method with a python script +1. Use the previous work done on developing other Slicer Extensions to develop the AReg extension for Slicer + +## Progress and Next Steps + + + +1. Method and script working +1. Slicer Extension created and progress made on developing it +1. Deploy AReg tool to the *SlicerAutomatedDentalTools* +1. Validating the tool via method paper + +# Illustrations + + +## Comparison between the current and the proposed workflow +![Workflow](https://github.com/lucanchling/ProjectWeek/assets/72148963/a6617e85-df6e-426f-ab4a-eef322453e7e) + +## Different regions of reference (comparison between the full segmentation and the mask) +![MaskComparison](https://github.com/lucanchling/ProjectWeek/assets/72148963/7312a43f-8b00-4513-bf75-0cf1a363b310) + +## Example of Cranial Base Registration +![AREGCBCTExample](https://github.com/lucanchling/ProjectWeek/assets/72148963/66574b8d-a9b0-465a-a5ef-4206bb2d84dd) + +## Screenshot of the User Interface of the developed extension +![AREG](https://github.com/lucanchling/ProjectWeek/assets/72148963/23200b88-21f2-4538-afdf-3dc757454efb) + +# Background and References + + diff --git a/PW39_2023_Montreal/Projects/AutomaticQuantitative3DCephalometrics/README.md b/PW39_2023_Montreal/Projects/AutomaticQuantitative3DCephalometrics/README.md new file mode 100644 index 000000000..7bf691a4f --- /dev/null +++ b/PW39_2023_Montreal/Projects/AutomaticQuantitative3DCephalometrics/README.md @@ -0,0 +1,97 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Automatic Quantification 3D Components +category: Quantification and Computation +presenter_location: In-person + +key_investigators: +- name: Nathan Hutin + affiliation: University of Michigan + +- name: Luc Anchling + affiliation: University of Michigan + country: France + +- name: Baptiste Baquero + affiliation: University of Michigan + country: France + +- name: Maxime Gillot + affiliation: University of Michigan + country: France + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: David Allemang + affiliation: University of Michigan + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: University of Michigan + country: USA +--- + +# Project Description +The Automatic Quantification 3D Components(AQ3DC) was developed during Namic-project 37 is now available in Slicer Q3DC extension. + +The Automatic Quantification 3D Components(AQ3DC) aims to provide a user-friendly automated tool that decrease user time for extraction of quantitative +image analysis features. +AQ3DC is a Slicer extension to automatically compute lists of measurements selected by users for a single case or a whole +study sample, at one or more time points. +The current implementation is aimed at automatic computation of 3D components like distances (AP, RL and SI) +between points, points to line, midpoint between two points or angles (Pitch, Roll and Yaw), interpretation of directionality,which can be further extended to any type of desired computation/quantitative image analysis. The design of the user interface is currently aimed at quantification of craniofacial dental, skeletal and soft tissue structures. + +- Project link : https://github.com/DCBIA-OrthoLab/Q3DCExtension + + + +## Objectives + +1. Renaming function and variable +2. Create Documentation for developer +3. Find bugs + + +## Progress and Next Steps +### Progress +1. Resolve few issues +2. Ranaming most of the function + + +### Next Steps + +1. Improve readability of the code +2. Create Documentation for developer + + + + + +# Illustrations + + +# 1. Slicer Interface +![Screenshot from 2022-06-30 18-31-37](https://user-images.githubusercontent.com/83285614/176789715-f90c3ea5-faf6-4e49-bdf3-2683b18ce375.png) + +# 2. List of measurements exported. +![Screenshot from 2022-06-30 18-29-01](https://user-images.githubusercontent.com/83285614/176789814-29e76874-1060-4681-bbe3-a4853975f510.png) + +# 3. Results of the computation for all the list of measurement for a sample of patient. +![Screenshot from 2022-06-30 19-01-23](https://user-images.githubusercontent.com/83285614/176792428-d5c3cb6f-4e56-45c0-95e2-fb24798453a8.png) + +# 4. Skeletal measurements signs meaning. +![skeletal_measurement](https://user-images.githubusercontent.com/83285614/176794349-fa99dcc8-bdf7-4518-ba8e-01451ebf05d8.jpeg) + +# 5. Linear measurements signs meaning. +![linear_measurement](https://user-images.githubusercontent.com/83285614/176794371-c87e7cba-8242-4149-bbda-5e67e28859cc.jpeg) + +# 6. Angular measurements signs meaning. +![angular_measurement](https://user-images.githubusercontent.com/83285614/176794405-c1e283e6-bad2-4da5-b777-991e93c419ce.jpeg) diff --git a/PW39_2023_Montreal/Projects/AutomaticRegistration_IOS/README.md b/PW39_2023_Montreal/Projects/AutomaticRegistration_IOS/README.md new file mode 100644 index 000000000..f5652f49e --- /dev/null +++ b/PW39_2023_Montreal/Projects/AutomaticRegistration_IOS/README.md @@ -0,0 +1,108 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Automatic Registration Intra Oral Scan +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: +- name: Nathan Hutin + affiliation: University of Michigan + +- name: Luc Anchling + affiliation: University of Michigan + country: France + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: Selene Barone + affiliation: University of Catanzaro + country: Italy + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +- name: Jonas Bianchi + affiliation: University of Pacifique + country: USA + +- name: Marcela Gurgel + affiliation: University of Michigan + country: USA + +- name: Najla Al Turkestani + affiliation: University of Michigan + country: USA + +- name: Felicia Miranda + affiliation: University of Sao Paulo + country: Brezil + +- name: Denise Curado + affiliation: University of Michigan + country: USA + +- name: Kinjal Mavani + affiliation: University of Michigan + country: USA + +- name: Kinjal Mavani + affiliation: University of Michigan + country: USA + +- name: Margaret Eason + affiliation: University of Michigan + country: USA + +- name: Aron Aliage del Castilo + affiliation: University of Michigan + country: USA + +--- + + +# Project Description +This project propose a tool to automatically register intra oral scan of upper jaw. The method can register growth and non-growing patient. +The registration method is based on neural network to create a region of interest on the palate, and ICP (Iterest Closest Point) to register. +The neural network has been trained with extraction and non-extraction case, growing and non-growing patient to have robust neural network. +We will leave the option to the users to also register the mandible by applying the transformation matrix of the maxilla to the mandible. + +The actual code is on this [repository](https://github.com/HUTIN1/ALIDDM/tree/refactoring/py/Palete/CNN). + +## Objective +1. Start to implement automatic registration of IOS in Slicer +2. Deploy Areg + +## Approach And Plan +1. Find a method to perform Automatic Registration of IOS +2. Train a neural network to create a region of interest +3. Implement automatic registration for region of interest + + +## Progress and Next Steps +### Progress +1. Method and script working +2. User Interface + +### Next Steps +1. Implement the method in [Areg](https://github.com/lucanchling/AREG) +2. Deploy Areg in [SlicerAutomatedDentalTools](https://github.com/DCBIA-OrthoLab/SlicerAutomatedDentalTools) to be available to all users + +# Illustrations + + +![Screenshot from 2023-06-09 11-45-14](https://github.com/NA-MIC/ProjectWeek/assets/72212416/8f2ee89a-9801-4f60-ace8-a7778779c009) + +Region of interest make by neural network + +![Screenshot from 2023-06-09 13-48-29](https://github.com/NA-MIC/ProjectWeek/assets/72212416/90cc7bb6-995b-4046-84c3-5ac118abc04c) + +Legend : +- pink : initial scan +- blue : clinician registration +- yellow : automatic registration diff --git a/PW39_2023_Montreal/Projects/ChatidcNavigatingDicomAndIdcUsingNaturalLanguage/README.md b/PW39_2023_Montreal/Projects/ChatidcNavigatingDicomAndIdcUsingNaturalLanguage/README.md new file mode 100644 index 000000000..117f5c618 --- /dev/null +++ b/PW39_2023_Montreal/Projects/ChatidcNavigatingDicomAndIdcUsingNaturalLanguage/README.md @@ -0,0 +1,72 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: 'ChatIDC: Navigating DICOM and IDC using Natural Language' +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Justin Johnson + affiliation: Department of Radiology + country: Brigham and Women’s Hospital, Boston, MA + +- name: Suraj Pai + affiliation: Department of Radiology + country: Brigham and Women’s Hospital, Boston, MA + +- name: Andrey Fedorov + affiliation: Department of Radiology + country: Brigham and Women’s Hospital, Boston, MA + +--- + +# Project Description + + + +ChatIDC is a natural language interface tool for exploring the rich ecosystem of DICOM tags and IDC. It is intended to filter and download highly specific cohorts of imaging data and discover relevant information pertaining to the DICOM standard, IDC documentation, and data that consists of DICOM tags. + +## Objective + + + +The goal of this project is to reduce some technical barriers for clinical researchers to filter and download highly specific cohorts of imaging data. As a result, the project is poised to make the retrieval of data more efficient and encourage the widespread adoption of the platforms in which it is integrated. + +For IDC, you can currently filter cohorts by some of the most common tags with sliders and buttons but this eventually has a limit when the researcher has to gather data that is highly tailored to their use case, which may be highly compositional and utilises more esoteric DICOM Tags. When the number of filter parameters is too large, manual selection and query construction may become infeasible if you are not an expert in both DICOM and SQL. + +## Approach and Plan + + + +We will prepare a list of queries to motivate and test the development of the project. The list will contain “free text request” and the matching SQL query. We will work with IDC/SQL domain “experts” to confirm that SQL queries on this list are both syntactically and semantically correct. This list will be shared at the end of the project week. + +We will implement semantic searching for DICOM tags based on the user's input that is then used for the pretext in the language model. We will work with IDC/DICOM experts to confirm that this curated list is meaningful and comprehensive. This list will be shared at the end of the project week. + +We plan to document our current experience and recommendations to what prompts users should use to improve the quality of the responses generated by the existing LLM interfaces. +We will document our experience observing syntactic accuracy of generated queries to motivate future development (ie, what worked, what didn’t work, what can be fixed with refinements to the prompt, what can be improved with the approach used in the text2cohort project). + +We would like to conduct interviews with the AI developers attending project week to gather the list of requests/ideas for queries that the users would like to see addressed. + +## Progress and Next Steps + + + +*No response* + +# Illustrations + + + + + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/CreateAgatstonCardiacScoringModule/README.md b/PW39_2023_Montreal/Projects/CreateAgatstonCardiacScoringModule/README.md new file mode 100644 index 000000000..4a3ed4c0d --- /dev/null +++ b/PW39_2023_Montreal/Projects/CreateAgatstonCardiacScoringModule/README.md @@ -0,0 +1,63 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Create Agatston Cardiac Scoring Module +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Curtis Lisle + affiliation: KnowledgeVis + country: USA + +- name: Andras Lasso + affiliation: Queens University + country: Canada + +--- + +# Project Description + + + +The algorithm for calculating Agatston Cardiac scoring (a clinical way to measure arterial occlusion around the heart) was previously written for an older version of Slicer by Jans Johnson and Jessica Forbes. Andras updated the algorithm and his script was recently tested by members of the community. This clinical module would be more useful if a Slicer Extension is build so the Agatston scoring is available for clinicians. This project is a start to creating a Slicer Extension. + +## Objective + +- Objective A. Describe **what you plan to achieve** in 1-2 sentences. + Start construction a Slicer Extension to run the existing Agatston scoring algorithm. + +## Approach and Plan + - Create an Extension stub using the Extension Wizard + - Refactor the Python code to fit in the extension + - Update the GUI elements and description to guide the user in preparing data + - Test the Extension + - Work with the Slicer core community to publish the Agatston Cardiac Scoring Extension + +## Progress and Next Steps + + - Reviewed existing algorithm and Andras re-written version + - Acquired reference cardiac scan with corresponding Agatston score for testing + - Extension Wizard was easy to get started, though comment text and acknowledgement text entered in the GUI was lost + - **there is still a runtime error, but we are close** + +# Illustrations + +Below is the module interface with the debugging buttons from the Extension Wizard still showing. A sample image is loaded +![GUI-Image](https://data.kitware.com/api/v1/file/648a9d9a488633cbb1275cda/download) + +# Background and References + +Sample Masked Image as input: + +Existing Algorithm to refactor: + + +Andras' updated algorithm on GIST: + + +A recent update to interpreting Agatston scoring: + diff --git a/PW39_2023_Montreal/Projects/DeepLearningModelForBLineDetectionInLungUltrasoundVideosUsingCrowdsourcedLabels/README.md b/PW39_2023_Montreal/Projects/DeepLearningModelForBLineDetectionInLungUltrasoundVideosUsingCrowdsourcedLabels/README.md new file mode 100644 index 000000000..34a2e067a --- /dev/null +++ b/PW39_2023_Montreal/Projects/DeepLearningModelForBLineDetectionInLungUltrasoundVideosUsingCrowdsourcedLabels/README.md @@ -0,0 +1,80 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Deep learning model for B-line detection in lung ultrasound videos using crowdsourced + labels +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Mike Jin + affiliation: Brigham and Women's Hospital + country: USA + +- name: Tamas Ungi + affiliation: Queen's University + country: Canada + +- name: Colton Barr + affiliation: Queen's University + country: Canada / Brigham and Women's Hospital, USA + +- name: Ameneh Asgari-Targhi + affiliation: Brigham and Women's Hospital + country: USA + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + +Automated B-line detection in lung ultrasound videos has been demonstrated before, most recently by [Lucassen 2023](https://pubmed.ncbi.nlm.nih.gov/37276107/). However, acquiring the many labels necessary can be a resource-intensive process, limited by the availability of expert clinicians capable of producing high-quality labels. Recently, gamified crowdsourcing with a new quality control mechanism and built-in learning for labelers has been demonstrated to be capable of producing annotations on lung ultrasound videos comparable in quality to expert clinicians (as well as analogous results for EEG and skin lesion classification tasks), which can greatly shorten the time required to acquire high-quality labels for model training. Though these crowd labels have been shown to have expert-level quality, it has yet to be demonstrated whether crowd-produced labels are capable of training high-performance models. + +## Objective + + + +1. Train a deep learning model to classify lung ultrasound videos as having B-lines or having no B-lines. + +## Approach and Plan + + + +1. Create a data file associating all 3000+ clips with filepath, crowd label, and expert labels (for those that have expert labels). +2. Adapt the model (ResNet(2+1)D-18 or similar pretrained model) and training procedure used in [Lucassen 2023](https://pubmed.ncbi.nlm.nih.gov/37276107/) to train a new model on a new crowd-labeled dataset of 3000+ lung ultrasound videos from 500 patients. +3. Evaluate the model performance and compare to previously reported model performance for ultrasound video classification of B-line presence. + +## Progress and Next Steps + + + +1. De-identified and masked 3000+ lung ultrasound clips +2. Uploaded 3000+ clips with standard filename format to a GPU cluster. +3. Crowd-labeled all 3000+ lung ultrasound clips using 193 clips from \~70 patients for crowd training. + +*** PW39 progress *** +1. Lots of helpful discussions about model selection and handling varying input size +2. Tried two different existing CNN + RNN solutions, but thwarted by hardware/environment setup/version compatibility issues. +3. First time seeing functionality of 3D Slicer in greater depth, and was able to demo DiagnosUs annotation collection platform to some folks. + +# Illustrations + + + +*No response* + +# Background and References + + + + diff --git a/PW39_2023_Montreal/Projects/DefiningAndPrototypingLabelmapSegmentationsInDicomFormat/README.md b/PW39_2023_Montreal/Projects/DefiningAndPrototypingLabelmapSegmentationsInDicomFormat/README.md new file mode 100644 index 000000000..124e3dc8b --- /dev/null +++ b/PW39_2023_Montreal/Projects/DefiningAndPrototypingLabelmapSegmentationsInDicomFormat/README.md @@ -0,0 +1,86 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Defining and Prototyping Labelmap Segmentations in DICOM Format +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Chris Bridge + affiliation: MGH/Harvard + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: David Clunie + affiliation: PixelMed + country: USA + +- name: Andrey Fedorov + affiliation: BWH/Harvard + country: USA + +--- + +# Project Description + + + +The DICOM Segmentation format is used to store image segmentations in DICOM format. Using DICOM Segmentations, which use the DICOM information model and can be communicated over DICOM interfaces, has many advantages when it comes to deploying automated segmentation algorithms in practice. However, DICOM Segmentations are criticized for being inefficient, both in terms of their storage utilization and in terms of the speed at which they can be read and written. This is in comparison to other widely-used segmentation formats within the medical imaging community such as NifTi and NRRD. + +While improvements in tooling may alleviate this to some extent, there appears to be an emerging consensus that changes to the standard are also necessary to allow DICOM Segmentations to compete with other formats. One of the major reasons for poor performance is that in segmentation images containing multiple segments (sometimes referred to as "classes"), each segment must be stored as an independent set of binary frames. This is in contrast to formats like NifTi and NRRD that store "labelmap" style arrays in which a pixel's value represents its segment membership and thus many (non-overlapping) segments can be stored in the same array. While the DICOM Segmentation has the advantage that it allows for overlapping segments, in my experience the overwhelming majority of segmentations consists of non-overlapping segments, and thus this representation is very inefficient when there are a large number of segments. + +The goal of this project is to gather a team of relevant experts to formulate changes to the standard to address some issues with DICOM Segmentation. We will focus primarily on "Labelmap" style segmentations and issues surrounding frame compression. Other objectives for further discussion include simplifying per-frame metadata. Although we do not speak for the DICOM standards committee, we hope to put forward a complete proposal that can be considered by the committee. Ideally, the proposal will be backed by multiple interoperable implementations of the proposed objects and demonstrations of their value in reducing object size and complexity. + +The proposal for this project received a considerable amount of constructive feedback from the community: [#643](https://github.com/NA-MIC/ProjectWeek/issues/643) + +@pieper @fedorov @dclunie + +## Objective + + + +1. Put forward a proposal for changes to the DICOM Segmentation object that addresses the needs of the medical image computing community + +## Approach and Plan + + + +1. Gather relevant experts to discuss and appraise potential changes to the DICOM standard for Segmentations +2. Compile a full proposal based on the resulting consensus from the team +3. Implement prototypes of the new proposed objects in the highdicom (python) and dcmjs (javascript) libraries +4. Use the prototype implementations to demonstrate the advantages of the proposed changes on realistic data (e.g. in terms of file size, read/write times) + +## Progress and Next Steps + + + +1. Solicited feedback and items for discussion on proposal #643 +2. Completed a full implementation of LABELMAP creation in highdicom (see [draft pull request](https://github.com/ImagingDataCommons/highdicom/pull/234)). Planar imaging, 3D imaging and tiled slide images are supported. +3. Using an example segmentation of 98 organs/tissues from TotalSegmentator, we got the following sizes: + - Original BINARY segmentation: 385MB + - LABELMAP Compressed with JP2k 6.7MB + - LABELMAP Compressed with JPEG-LS 1.9MB + - LABELMAP Compressed with RLE 4.4MB + - LABELMAP Uncompressed 152MB + +# Illustrations + + + +image + +* Prototype DICOM SEG LABELMAP from highdicom loaded in Slicer using pydicom. + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/DockerBasedSystemToAssessChallengeSubmissions/README.md b/PW39_2023_Montreal/Projects/DockerBasedSystemToAssessChallengeSubmissions/README.md new file mode 100644 index 000000000..eda2c3a15 --- /dev/null +++ b/PW39_2023_Montreal/Projects/DockerBasedSystemToAssessChallengeSubmissions/README.md @@ -0,0 +1,151 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Docker-based system to assess challenge submissions +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Roya Khajavibajestani + affiliation: Brigham and Women’s hospital + country: USA + +- name: Erik Ziegler + affiliation: Yunu + country: Netherland + +- name: Ron Kikinis + affiliation: Harvard Medical School + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +--- + +# Project Description + + + +Project Description: + +Our project is focused on developing a Docker-based submission mechanism for challenge participants. To maintain fairness and make sure that the test set is not used in the training process, the test set will not be released to the participants. Instead, participants will be required to containerize their methods using Docker and submit their Docker containers for evaluation. + +Docker provides an excellent solution for running algorithms in isolated environments known as containers. In our project, we will leverage Docker to create a container that replicates the participants' pipeline requirements and executes their inference script. By encapsulating the entire environment within a container, we can ensure consistent execution and reproducibility. + +## Objective + + + +- Create a sample docker container for submission +- Create an evaluation mechanism on the challenge website +- Create documentation, guidelines, and tutorial for participants + +## Approach and Plan + + + +- Design the docker container, input/output mechanism, requirements, and methods to perform inference using a subset of the validation set. +- Create an evaluation mechanism on the challenge website +- Create a sample submission docker for the test phase and test it on the challenge website +- Create documentation to publish in phase 2 of the challenge. + +## Progress and Next Steps + + +We created a baseline algorithm to assist participants with their submissions. +We used evalutils to develop a code template that participants can customize with their specific algorithms. +We will work with Grand Challenges to create the input and output interface standards for participants which aids us in creating clear instructions on how to format and provide the necessary data. +Participants have to follow the guidelines for building their Docker containers. We will link to the guideline on the original [challenge website](https://lnq2023.grand-challenge.org/). + +**Evaluator container**: + +For Generating The Project Structure we will use Evautils. +Evalutils contains a project generator based on CookieCutter that I can use to generate the boilerplate for our evaluation. + +We will also generate our project with docker by running a container and sharing our current user id: +```` +docker run -it --rm -u `id -u` -v $(pwd):/usr/src/myapp -w /usr/src/myapp python:3 bash -c "pip install evalutils && evalutils init evaluation LNQ2023" +```` + +Either of these commands will generate a folder called LNQ2023 with everything we need to get started. + +The .gitattributes file at the root of the repository specifies all the files which should be tracked by git-lfs. By default all files in the ground truth and test directories are configured to be tracked by git-lfs, but they will only be registered once the git lfs extension is installed on my system and the git lfs install command has been issued inside the generated repository. + +The structure of the project will be: + +```` +└── LNQ2023 + ├── build.sh # Builds your evaluation container + ├── Dockerfile # Defines how to build your evaluation container + ├── evaluation.py # Contains your evaluation code - this is where you will extend the Evaluation class + ├── export.sh # Exports your container to a .tar file for use on grand-challenge.org + ├── .gitattributes # Define which files git should put under git-lfs + ├── .gitignore # Define which files git should ignore + ├── ground-truth # A folder that contains your ground truth annotations + │ └── reference.csv # In this example the ground truth is a csv file + ├── README.md # For describing your evaluation to others + ├── requirements.txt # The python dependencies of your evaluation container - add any new dependencies here + ├── test # A folder that contains an example submission for testing + │ └── submission.csv # In this example the participants will submit a csv file + └── test.sh # A script that runs your evaluation container on the test submission +```` +evaluation.py. is the file where we will extend the Evaluation class and implement the evaluation for our challenge. In this file, a new class has been created, and it is instantiated and run with: + +```` +if __name__ == "__main__": + LNQ2023().evaluate() +```` +This is all that is needed for evalutils to perform the evaluation and generate the output for each new submission. + +# Background and References + + +The generated code for segmentation tasks: +```` +class Myproject(ClassificationEvaluation): + def __init__(self): + super().__init__( + file_loader=SimpleITKLoader(), + validators=( + NumberOfCasesValidator(num_cases=2), + UniquePathIndicesValidator(), + UniqueImagesValidator(), + ), + ) + + def score_case(self, *, idx, case): + gt_path = case["path_ground_truth"] + pred_path = case["path_prediction"] + + # Load the images for this case + gt = self._file_loader.load_image(gt_path) + pred = self._file_loader.load_image(pred_path) + + # Check that they're the right images + assert self._file_loader.hash_image(gt) == case["hash_ground_truth"] + assert self._file_loader.hash_image(pred) == case["hash_prediction"] + + # Cast to the same type + caster = SimpleITK.CastImageFilter() + caster.SetOutputPixelType(SimpleITK.sitkUInt8) + gt = caster.Execute(gt) + pred = caster.Execute(pred) + + # Score the case + overlap_measures = SimpleITK.LabelOverlapMeasuresImageFilter() + overlap_measures.Execute(gt, pred) + + return { + 'ASSD': overlap_measures.GetASSD(), + 'DiceCoefficient': overlap_measures.GetDiceCoefficient(), + } + +```` +The next step is Building and testing, exporting the evaluation container and working on the Algorithm container. diff --git a/PW39_2023_Montreal/Projects/EfficientHandlingAndProgressiveLoadingOfCompressedMultiframeDicomImages/README.md b/PW39_2023_Montreal/Projects/EfficientHandlingAndProgressiveLoadingOfCompressedMultiframeDicomImages/README.md new file mode 100644 index 000000000..9047fd2f8 --- /dev/null +++ b/PW39_2023_Montreal/Projects/EfficientHandlingAndProgressiveLoadingOfCompressedMultiframeDicomImages/README.md @@ -0,0 +1,116 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Efficient Handling and Progressive Loading of Compressed Multiframe DICOM Images +category: Cloud / Web +presenter_location: Online + +key_investigators: + +- name: Ozge Yurtsever + affiliation: Stanford + country: USA + +- name: Emel Alkim + affiliation: Stanford + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Alireza Sedghi + affiliation: Accolade Imaging, Inc. + country: Canada +--- + +# Project Description + + + +Loading compressed multiframe DICOM images as a whole causes frequent browser crashes, particularly on Microsoft machines. This issue arises due to the large file size of the DICOM images, exceeding the browser's memory capacity. + +The browser's rendering engine attempts to load the entire file into memory, due to the significant size of these images, the browser can quickly exhaust its allocated memory, leading to crashes or unresponsive behavior. + +This issue affects both ePAD and OHIF with the latest WADO-loader version. Note that the crash can be reproduced in the current OHIF v3 version, so it has impact on projects including IDC. + +## Objective + + + +Initiate a discourse about the methodologies for saving, storing, and reading DICOM data, and explore strategies for optimizing the handling of compressed multiframe images to achieve enhanced efficiency and avoid browser crashing. + + + +## Approach and Plan + + +* Confirm that the DICOM data is valid. Issues from dciodvfy seem unrelated: +``` +Warning - Missing attribute or value that would be needed to build DICOMDIR - Study ID +USMultiFrameImage +Warning - Unrecognized defined term for value 1 of attribute +Error - Missing attribute Type 2C Conditional Element= Module= +Warning - Unrecognized defined term for value 1 of attribute +``` +* Create a deidentified dataset to reproduce the data. Original US data has burned in patient info, so blank pixel data will be substituted. +* Done: https://github.com/emelalkim/sampledata/releases/tag/large_multiframe +* Explore changing the code so that instead of loading the entire DICOM file at once, the image loading process can be modified to load the image in smaller chunks or frames progressively. This approach may allow the browser to handle smaller portions of the image, reducing the memory burden and enhancing overall stability. + +## Progress and Next Steps + + + +After conducting further tests, we discovered an additional observation regarding the uncompressed images. It became evident that even when working with uncompressed images, if the image size exceeds a certain threshold (500 MB), OHIF crashes almost instantly, within a matter of seconds. + +This finding underscores the importance of addressing the performance and stability concerns, not only for handling compressed images but also for handling larger uncompressed images. It emphasizes the need to optimize the OHIF viewer and its underlying components like cornerstoneWADOImageLoader and dicomParser to ensure robust performance across a wide range of image sizes. + +We have made significant progress in addressing various issues related to the OHIF viewer and its underlying components: + +* _OHIF Build with Proposed PR for Uncompressed Images:_ We successfully created an OHIF build incorporating the proposed pull request (PR) for uncompressed images. Previously, the demo ultrasound series would cause immediate failure and crash the browser. However, the proposed fix effectively resolved this issue, allowing smooth loading and scrolling through a multiframe image of approximately 600 MB without any problems. + +PR link: + +* _Handling Compressed Images:_ + * To address the issue with compressed images, we adapted a solution approach inspired by the provided PR link. While the original solution focused on uncompressed images, we applied a similar method within the dicomParser library to handle compressed images. Initially, the attempted solution did not yield the desired outcome. However, after further work and refinement, we were able to fix the issue and submit a PR to dicomParser. + * _Successful Testing of Proposed PR with Compressed Images:_ We have successfully built and tested the proposed PR in ePAD using compressed images. This implementation resolved the crashing issue associated with compressed images, ensuring stable functionality. + * _Deidentify Sample Dataset:_ Although we made efforts to create a deidentified sample dataset, we encountered challenges in blacking out the pixels. We are actively working on finding a solution to overcome this obstacle. + * Configuration of dicomparser's sharedCopy Method: Alireza suggested making the dicomParser's sharedCopy method configurable with a useCopy option. This enhancement would provide other applications and users utilizing dicomparser with the flexibility to choose whether they want to use a copy or not. We will diligently work on implementing this suggestion and update the PR accordingly. + +Ticket link: + +PR Link: + +* _Stress Testing OHIF Viewer and cornerstoneWADOLoader:_ As part of our testing, we performed a stress test to evaluate the limits of the OHIF viewer and the cornerstoneWADOLoader. Despite the implementation of the proposed changes that significantly improved performance, we encountered an ongoing challenge with the OHIF Viewer crashing when loading large datasets. It is important to note that this issue does not appear to be specific to either compressed or uncompressed data, as it is reproducible with both types. Upon further investigation, we identified the following key issues: + + * _Crashing when trying to load large data folder:_ Attempting to load a folder larger than 3GB resulted in the viewer failing to load and crashing immediately. It is important to note that the crash occurs while trying to index the folder, and it is likely due to the cache size limitation, which is currently set to 2GB. + * _Crashing when trying to load a large multiframe after loading a couple of series: After a certain number of multiframe images have been opened, the browser crashes during the image loading process. To reproduce this issue, please follow the steps outlined below: + 1. Open the local OHIF page + 2. Load the sample deidentified multiframe + 3. Open the page again in the same browser tab + 4. Load the sample deidentified multiframe + 5. Repeat steps _c_ and _d_ seven or eight times + 6. After a certain number of iterations, the browser will crash during the image loading phase. + +# Illustrations + + + +Crash screenshot ![crash-image](https://github.com/NA-MIC/ProjectWeek/assets/9955081/9f80cbd7-cfa7-4c54-934c-9d165fe38e1a) +Sample multiframe loaded successfully with suggested improvements ![multiframe-uncompressed](multiframe_uncompressed.png) + +# Background and References + + + +Uncompressed ultrasound image + +Unfortunately we couldn't deidentify the compressed ultrasound images. + +Related libraries: + + diff --git a/PW39_2023_Montreal/Projects/EfficientHandlingAndProgressiveLoadingOfCompressedMultiframeDicomImages/multiframe_uncompressed.png b/PW39_2023_Montreal/Projects/EfficientHandlingAndProgressiveLoadingOfCompressedMultiframeDicomImages/multiframe_uncompressed.png new file mode 100644 index 000000000..c4b7e4413 Binary files /dev/null and b/PW39_2023_Montreal/Projects/EfficientHandlingAndProgressiveLoadingOfCompressedMultiframeDicomImages/multiframe_uncompressed.png differ diff --git a/PW39_2023_Montreal/Projects/ExtensionForRecurrentLungInfections/README.md b/PW39_2023_Montreal/Projects/ExtensionForRecurrentLungInfections/README.md new file mode 100644 index 000000000..9108f06f6 --- /dev/null +++ b/PW39_2023_Montreal/Projects/ExtensionForRecurrentLungInfections/README.md @@ -0,0 +1,61 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/ExtensionForRecurrentLungInfections/README.html + +project_title: extension for recurrent lung infections +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: + +- name: Pape Mady Thiao + affiliation: école militaire de santé de Dakar + country: Sénégal + +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital, Harvard Medical School + country: USA + +--- + +# Project Description + + + +The objective is to create an extension capable of identifying lung lesions of different ages following repetitive infections. + +## Objective + + + +In order to use the extension in pulmonology to correlate a recent symptomatology with an X-ray image having images related to old infections + +## Approach and Plan + + + +1 Collect radiographs of 2 groups of patients (A with an ongoing infection, B having recovered from an infection but with sequelae images) +2\. Compare the hounsfied of the different lesions and create a threshold for the 2 groups. + +## Progress and Next Steps + + + +Gathering of X-Ray images of patients that meet the study criteria + +# Illustrations + + + +Not yet + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/FacialExpressionFeatureExtractionForVideoInterviews/README.md b/PW39_2023_Montreal/Projects/FacialExpressionFeatureExtractionForVideoInterviews/README.md new file mode 100644 index 000000000..bc542e454 --- /dev/null +++ b/PW39_2023_Montreal/Projects/FacialExpressionFeatureExtractionForVideoInterviews/README.md @@ -0,0 +1,81 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/FacialExpressionFeatureExtractionForVideoInterviews/README.html + +project_title: AMP SCZ Facial expression feature extraction for video interviews +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Eduardo Castro + affiliation: IBM Research + country: USA + +- name: Kevin Cho + affiliation: BWH + country: USA + +- name: Ofer Pasternak + affiliation: BWH + country: USA + +- name: Guillermo Cecchi + affiliation: IBM Research + country: USA + +--- + +# Project Description + + + +Put together code that 1) performs facial expression feature extraction for video interviews stored on a data aggregation server, and 2) transfers them to a local directory. It would be based on existing scripts for facial expression feature extraction and an existing data management tool. This project is part of the AMP SCZ program, an initiative for early detection of risk for schizophrenia (https://www.ampscz.org). + +## Objective + + + +1. Objective 1: Adapt our existing code for facial expression analysis to extract features through a proper video pipeline, including running this task for upcoming videos in the data aggregation server. +2. Objective 2: Adapt our data management tool to incorporate the files generated by this pipeline for data transfer. + +## Approach and Plan + + +1. Discuss how the data management tool (Lochness) retrieves data from the aggregate server. +2. Define facial expression features of interest. +3. Set up the facial expression analysis code to be run as a proper pipeline. + +## Progress and Next Steps + +Since we could only stay at project week for a couple of days and this was our first face-to-face interaction as a group, we mainly focused on figuring out specifics of the project that are difficult to fully grasp via zoom meetings. Some of the points being discussed were: + +- How Lochness is currently set up to copy processed information from the aggregation server to the server at Brigham and Women's Hospital. +- Defined formatting of the filenames were extracted features would be stored and their path structure. +- Decided to process only videos that were compliant with the Standard Operating Procedures (SOPs). +- Decided to include a log file with the list of frames that were not successfully processed for each video. +- Made a decision about what information not to include in the generated feature extraction csv files (no dates, filenames or facial landmarks) +- Decided to enforce a face confidence threshold (faces detected with less than 0.6 confidence would be discarded). +- Decided to include an extra feature with the number of detected faces per frame (sanity check to flag interviews incorrectly recorded in speaker mode, not following SOPs). + + +We started to incorporate those adjustments in the code and will continue to work on this after project week. Other considerations that will be taken into account for future work are: +- Adjust the code to run in batch mode only for those interviews that have not been processed yet. +- Figure out how to cron this job so that it runs continuously. + +# Illustrations + + + +*No response* + +# Background and References + + + +Facial expression code: +Data Management tool: diff --git a/PW39_2023_Montreal/Projects/GPUNonlinearRegistration/README.md b/PW39_2023_Montreal/Projects/GPUNonlinearRegistration/README.md new file mode 100644 index 000000000..66c281c70 --- /dev/null +++ b/PW39_2023_Montreal/Projects/GPUNonlinearRegistration/README.md @@ -0,0 +1,54 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: GPU Nonlinear Registration +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Houssem Gueziri + affiliation: Montreal Neurological Institute and Hopital + country: Canada + +- name: Mohammadreza Eskandari + affiliation: Montreal Neurological Institute and Hopital + country: Canada +--- + +# Project Description + +This project aims to add a multimodal nonlinear registration plugin to Slicer. This work extends the functionality of [GPU Rigid Registration project](https://github.com/NA-MIC/ProjectWeek/blob/master/PW35_2021_Virtual/Projects/GPURigidRegistration/README.md) (which was initially part of [Ibis Neuronav](http://ibisneuronav.org/)). This project is based on [Multi-Modal Image Registration Based on Gradient Orientations of Minimal Uncertainty](https://ieeexplore.ieee.org/abstract/document/6298013). +## Objective + +1. Be able to nonlinearly register a source image to a target image from different modalities + +## Approach and Plan + +1. Figure out what needs to be changed compared to rigid registration +2. Add required components +3. Test + +## Progress +1. Theoretical foundation of the method has been investigated +2. Previous implementation has been reviewed +3. Successfully integrated CPU implementation of non-rigid registration in Ibis + +## Next setps +1. Replace metric value computation with OpenCL code + +# Background and References + +[1] De Nigris, D., et al., 2014. SymBA: Diffeomorphic Registration Based on Gradient Orientation Alignment and Boundary Proximity of Sparsely Selected Voxels. In Biomedical Image Registration: 6th International Workshop, WBIR 2014, London, UK, July 7-8, 2014. Proceedings 6 (pp. 21-30). [link](https://link.springer.com/chapter/10.1007/978-3-319-08554-8_3) +- http://ibisneuronav.org +- https://github.com/IbisNeuronav/Ibis + +# Illustrations + +Before registration: +![image](https://github.com/NA-MIC/ProjectWeek/assets/8172629/27d88c0c-8694-4548-9ea9-f1c2f3a88517) + +After registration: +![image](https://github.com/NA-MIC/ProjectWeek/assets/8172629/e1a8e139-5244-4293-99b6-57284d0fe513) diff --git a/PW39_2023_Montreal/Projects/GeneralModelRegistrationAndMergingTool/README.md b/PW39_2023_Montreal/Projects/GeneralModelRegistrationAndMergingTool/README.md new file mode 100644 index 000000000..09096d3b3 --- /dev/null +++ b/PW39_2023_Montreal/Projects/GeneralModelRegistrationAndMergingTool/README.md @@ -0,0 +1,91 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: General model registration and merging tool +category: VR/AR and Rendering +presenter_location: Online + +key_investigators: + +- name: Chi Zhang + affiliation: Seattle Children's Research Institute + country: USA + +- name: Arthur Porto + affiliation: Louisiana State University + country: USA + +- name: Sara Rolfe + affiliation: Seattle Children's Research Institute + country: USA + +- name: Murat Maga + affiliation: University of Washington + country: USA + +--- + +# Project Description + + + +We are working on developing a general purpose model registration tool in Slicer. At this moment, I developed a simple test module () using rigid registration functions (RANSAC + ICP) from Open3D and new ITK-based ALPACA module. This can allow people to test registration for purposes such as ALPACA automated landmarking. + +We are thinking about expanding this module into its own system for other purposes related to model registrations. One purpose is to register and align models that represent different parts of an object with overlapping area, and fuse them together. This could be useful for some purposes. For example, it would allow align and fuse models acquired from different angles, such as different parts of an object acquired by photogrammetry techniques. It would also allow virtual fossil reconstruction, which is usually done using commercial software such as Geomagic Studio. + +## Objective + + + +1. Develop a general purpose model registration tool in Slicer. Adding more utilities, such as a parameter adjustment tab. +2. Add new functions for other purposes related to model registration. At this moment, we are thinking about how to align models that represent different parts of an object and fuse them together. This could be useful for photogrammetry and virtual fossil reconstruction. + +## Approach and Plan + + + +1. Add parameter adjustment tab for the current test version +2. Merge registered models that represent different parts of an object into one. One way to aid the alignment is allow users to place a few matching landmarks on two or more models. + +## Progress and Next Steps + + + +1. Current GUI testing version is here: . It implements rigid registration functions (RANSAC + ICP) and new ITK-based ALPACA module. +Screenshot 2023-06-14 at 4 34 06 PM + + +2. Testing the performance of the new ITK-based rigid registration function for more cases. +3. Currently, we are implementing CPD affine registration from the pycpd package. +4. Adding more new registration features in the future. + +# Illustrations + + +Screenshot 2023-06-05 at 10 35 41 AM +Screenshot 2023-06-05 at 10 35 52 AM + +Screenshot 2023-06-05 at 10 36 04 AM + + +These are the models acquired by photogrammetry from two angles. The yellow one has no top, and the red one has no bottom. Rigid registration from Open3D can align them pretty well, though not perfect. + +Screenshot 2023-06-05 at 10 24 06 AM + +A sample virtual reconstruction in Geomagic Studio. The skull missed a part at the right side. The yellow part is the mirror image of the counter part at the left side. + +# Background and References + + + +Current testing version is here: . It uses rigid registration functions (RANSAC + ICP) from Open3D and new ITK-based ALPACA module. + +ALPACA module (including the ITK version) repository: + +ALPACA tutorial: + +Open3D rigid registration utilized in ALPACA: diff --git a/PW39_2023_Montreal/Projects/HistologyAiModelsImportedIntoIdc/README.md b/PW39_2023_Montreal/Projects/HistologyAiModelsImportedIntoIdc/README.md new file mode 100644 index 000000000..6a4ffa28c --- /dev/null +++ b/PW39_2023_Montreal/Projects/HistologyAiModelsImportedIntoIdc/README.md @@ -0,0 +1,101 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/HistologyAiModelsImportedIntoIdc/README.html + +project_title: Histology AI model annotations imported into IDC +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Curtis Lisle + affiliation: KnowledgeVis + country: USA + +- name: Daniela Schacherer + affiliation: MEVIS + country: Germany + +- name: David Clunie + affiliation: PixelMed + country: USA + +- name: Maximillian Fischer + affiliation: DKFZ + country: Germany + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +- name: Chris Bridge + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + +This project focuses on importing whole slide image (WSI) histology images and trained deep learning models into the Imaging Data Commons for access by others. We have developed tissue-level segmentation models for detecting subtypes of rhabdomyosarcoma (RMS) in whole slides. Our project is releasing WSIs and the corresponding models trained on the slide images. + +This project will test reading DICOM-WSI imagery (including compression) and focus on how to write out model segmentation results as DICOM-WSI annotations for upload to IDC. We also have classification and regression models, so we need to decide how to write non-imagery classification results as DICOM, as well. + +## Objective + + + +* Write out model segmentation image results as DICOM-WSI Segmentation or Parametric Map objects +* Test models on sample DICOM-WSI images +* Determine where to how to store regression and classification model results as DICOM annotations + +## Approach and Plan + + + +* Verify the algorithms run on DICOM-WSI source images (including compression) +* Understand the semantics associated with DICOM Segmentation and Parametric Map objects (there is a lot to learn here. DICOM is powerful, but comes with some complexity). +* Start with example workflow code from Max. He generated a DICOM parametric map to save the output of his model. +* Write output formatter code to generate proper DICOM for single class and multi-class segmentation images +* Test output annotations in Slim viewer + +## Progress and Next Steps + + + +* Used Kitware's Large-Image package for reading DICOM images. +* Executed Rhabdomyosarcoma detection model (reading DICOM-WSI images) +* Daniela and Chris from the IDC program were a great help with how to navigate DICOM. The HighDicom library is designed nicely. +* Thanks to David Clunie for explaining DICOM segmentation objects +* After many tries, we generated a multi-class segmentation map (a BINARY segmentation type in DICOM) +* We Previewed the image and the matching segmentation in the Slim viewer running on my laptop + + +# Illustrations + + + +Here is the model output drawn as an RGB pseudocolor image. Each tissue class determined by the +model is given a different color: +![colorimage](https://data.kitware.com/api/v1/file/648a8497488633cbb1275cbd/download) + +Here is the stained pathology slide image and the model output written as a DICOM segmentation image and +overlayed in the Slim viewer developed by the IDC program. The viewer is zoomed into the right part of the image: + +![dicomimage](https://data.kitware.com/api/v1/file/648a87dd488633cbb1275cc3/download) + +# After Project Week +We want to extend this algorithm to address segmentation images of the same extent, but different resolution than the original image. For this week, we kept the same resolution between the source image and the segmentation image generated by the model. + +# Background and References + + + +- starting example code from Max and Chris Gorman: https://github.com/maxfscher/DICOMwsiWorkflow.git +- Repository containing the model execution code and the dicom output logic, based on the HighDicom library: [Github Link](https://github.com/knowledgevis/rms-infer-code-standalone) diff --git a/PW39_2023_Montreal/Projects/IbisInSlicer/README.md b/PW39_2023_Montreal/Projects/IbisInSlicer/README.md new file mode 100644 index 000000000..cd9f1f5f3 --- /dev/null +++ b/PW39_2023_Montreal/Projects/IbisInSlicer/README.md @@ -0,0 +1,47 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Ibis in Slicer +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Houssem Gueziri + affiliation: Montreal Neurological Institute and Hopital + country: Canada + +- name: Étienne Léger + affiliation: Montreal Neurological Institute and Hopital + country: Canada + +- name: Simon Drouin + affiliation: École de technologie supérieur + country: Canada +--- + +# Project Description + +Continuing the trend set in the [GPU Rigid Registration project](https://github.com/NA-MIC/ProjectWeek/blob/master/PW35_2021_Virtual/Projects/GPURigidRegistration/README.md), the purpose of this project is to port functionalities from the [Ibis Neuronav](http://ibisneuronav.org/) platform to 3D Slicer to increase compatibility between the two systems. During this week, we will focus on the HardwareModule of Ibis, which handles reading hardware set configuration files and creating scene objects and OpenIGTLink connectors accordingly. + +## Objective + +1. Final Objective. Be able to read Ibis configuration files from Slicer to produce an equivalent scene. + +## Approach and Plan + +1. Assess which classes need to be ported. +2. Port/wrap/reimplement necessary components. +3. Test + +## Progress +1. Use generic MRMLNode to encapsulate tool/device properties (e.g., Calibration transform, Tool transform, Mask, etc.) +2. Start with a Python implementation (module?) +3. Possibility of using slicer scenes to share configurations + +# Background and References + +- http://ibisneuronav.org +- https://github.com/IbisNeuronav/Ibis diff --git a/PW39_2023_Montreal/Projects/ImproveTciaBrowserExtension/README.md b/PW39_2023_Montreal/Projects/ImproveTciaBrowserExtension/README.md new file mode 100644 index 000000000..52eda4c4d --- /dev/null +++ b/PW39_2023_Montreal/Projects/ImproveTciaBrowserExtension/README.md @@ -0,0 +1,76 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/ImproveTciaBrowserExtension/README.html + +project_title: Improve TCIA Browser extension +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Justin Kirby + affiliation: Frederick National Laboratory for Cancer Research + country: USA + +- name: Adam Li + affiliation: Georgetown University + country: USA + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + +[The Cancer Imaging Archive (TCIA)](https://www.cancerimagingarchive.net/) is an NCI-funded service which de-identifies and publishes cancer imaging datasets. The imaging data are organized as “collections” or "analysis result" datasets defined by a common disease (e.g. lung cancer), image modality or type (MRI, CT, digital histopathology, etc) or research focus. An emphasis is made to provide supporting data related to the images such as patient outcomes, treatment details, genomics and expert analyses where available. + +[TCIA Browser](https://github.com/QIICR/TCIABrowser) is an extension that lets users easily download and import TCIA data into 3D Slicer. This project seeks to improve the TCIA Browser extension for 3D Slicer by updating it to leverage [TCIA-Utils](https://github.com/kirbyju/tcia_utils) to access TCIA's APIs. + +## Objective + + + +The major improvements we'd like to address with TCIA Browser include: + +## Approach and Plan + + + +1. Identify locations in the code that use the older API to download or query data and update them to leverage TCIA-Utils functions such as downloadSeries(), getCollections(), getPatients(), getStudies() and getSeries(). +2. Implement a new feature to support logging in to TCIA Browser using the getToken() function in TCIA-Utils. +3. Review the existing metadata fields in the Browser GUI. Perform queries of the TCIA database to determine how often these fields are populated. +4. Discuss and agree on other available metadata fields that may be useful to Slicer users. Run queries to find out how often they're populated. Include external sources from NCI's Cancer Research Data Commons that may include genomic, proteomic and clinical data on the same subjects that TCIA hosts. +5. Update the GUI with a "Download TCIA Manifest" button and leverage the TCIA-Utils downloadSeries() function with the input_type = "manifest" option to pass the path of a \*.TCIA manifest file as the series_data parameter. + +## Progress and Next Steps + + + +1. Identified the code that used the old API and updated them to use the new API. +2. Added and removed some columns/metadata fields within the browser widget. +3. Set the default cache option to off. +4. Next Step: Implement the login function; Update the new API further to use the tcia_utils modules directly; Implement the manifest file download function. + +*For anyone interested, click [here](https://github.com/QIICR/TCIABrowser/pull/46) to view the PR* + +# Illustrations + + +Screenshot 2023-06-15 at 10 09 48 + +*No response* + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/IntegrationOfHapticDeviceIn3DSlicerForLumbarPuncture/README.md b/PW39_2023_Montreal/Projects/IntegrationOfHapticDeviceIn3DSlicerForLumbarPuncture/README.md new file mode 100644 index 000000000..32aa8ded4 --- /dev/null +++ b/PW39_2023_Montreal/Projects/IntegrationOfHapticDeviceIn3DSlicerForLumbarPuncture/README.md @@ -0,0 +1,113 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/IntegrationOfHapticDeviceIn3DSlicerForLumbarPuncture/README.html + +project_title: Integration of Haptic Device in 3D Slicer for Lumbar Puncture +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Pablo Sergio Castellano Rodríguez + affiliation: Universidad de Las Palmas de Gran Canaria + country: Spain + +- name: Jose Carlos Mateo Pérez + affiliation: Universidad de Las Palmas de Gran Canaria + country: Spain + +- name: Juan Bautista Ruiz Alzola + affiliation: Universidad de Las Palmas de Gran Canaria + country: Spain + +--- + +# Project Description + + + +The main objective of the project is to integrate the haptic device Touch 3D Systems into 3D Slicer through an OpenIGTLink connection with the Unity platform. Slicer To Touch is the 3D Slicer module that contains the scene with the 3D models of the spine and the needle. This module has an interface where the user can configure the number, position and value of the resistances to be exerted by the haptic device. These values will be included in a .json file that will later be transferred to Unity, which will process this data and configure the forces of the haptic device within the Unity environment. Finally, through the OpenIGTLink connection bridge, a real-time connection will be created, where the transformations and the resistances of the haptic device will be shared with the 3D Slicer scene. This idea comes from a project for a lumbar puncture training system that makes use of this device, but the body tissues, their location and thickness are generic. This way you can make a segmentation of a real patient's back with its own characteristics and practice the lumbar puncture before doing it with the patient. Due to the way it works, it could be used in other procedures. + +## Objective + + + +1. Create a module that with the help of Unity and OpenIGTLink allows us to interact with a back model of a real patient obtained by segmentation of medical images. In this way we can train the lumbar puncture on the model of a real patient feeling the resistance in the body tissues. +2. Automate the process of creating resistances on segmentation-generated models so that clinicians can easily perform lumbar puncture and other procedures with a sense of realism. + +## Approach and Plan + + + +1. Creation of the 3D Slicer module with fields to enter the number of resistances, positions and values. +2. Generate a .json file with all the information entered in the module. +3. Create a Unity project with a script that reads the generated .json file and creates a scene with the resistances in that position and with those values. +4. Connect Unity to 3D Slicer through OpenIGTLink to send the transforms and see the needle movements in 3DSlicer. +5. Generate an executable application from the Unity project with a simple look and feel that does the procedure automatically so that the clinical user finds it easy to use and does not have to deal with the unity interface. +6. Do a documentation search of other procedures to check that the project works correctly in them. We are looking for other clinical procedures for which this project may be useful and for which there is information a + +## Progress and Next Steps + + + +Steps that are already done: + +1. Create a 3d model of a real patient back from segmentation of medical images. + +![Screenshot (33)](https://github.com/NA-MIC/ProjectWeek/assets/117910171/98f78b7b-61c5-451c-9277-9b432ca00f41) + + +2. Indicate with MarkUps the tissues which we want to feel. + +![Screenshot (34)](https://github.com/NA-MIC/ProjectWeek/assets/117910171/b8117833-3df2-45f1-a7d4-f93dd315458c) + + +3. Include in SlicerToTouch module the number of resistances, posiitons and force value for each tissue. This module generates a json file with all the information. + +![Screenshot (43)](https://github.com/NA-MIC/ProjectWeek/assets/117910171/988cbfc0-0ba2-4ea1-b11a-9f279c83adb8) + +![configfile](https://github.com/NA-MIC/ProjectWeek/assets/117910171/a58ca86b-ffcc-413d-8638-8f1234e16c2e) + + +4. Using Unity in the background we read that file and automatically a new scene is created with the haptic materials. Also, at the same time, it sends the transform of the haptic device to slicer by OpenIGTlink, so you can see a needle moving. + +![2023-06-14-16-09-55-Trim](https://github.com/NA-MIC/ProjectWeek/assets/117910171/34822062-8a61-4ade-b346-e6e5a6d8dee3) + + + +What we are actually working on: + +1. Integration of Hololens 2 for the visualization of the scene: +![20230614-224421-HoloLens-Trim-Tr (1)](https://github.com/NA-MIC/ProjectWeek/assets/117910171/d66de98b-a539-4407-a489-fdf2aa398b0f) + + +Next steps: + +1. Include metrics in order to analyze the procedure. +2. Restrict the movement to just one axis once you are inside the back model. +# Illustrations + + + +3D Slicer Module in which you enter the resistances (left) and the .json file with the information of these resistances (right)(Picture1.png) + +![Picture1](https://github.com/NA-MIC/ProjectWeek/assets/134281471/02e28cdd-11dc-4f3c-b714-1c7164456f05) + +Unity interface after reading the information from the .json file with the resistances created in the positions and the needle as a visual mesh of the haptic device (left) and script that makes it work (right) (Picture2.png) + +![Picture2](https://github.com/NA-MIC/ProjectWeek/assets/134281471/e4ac4786-0ae6-442e-b068-8808591c1e99) + +# Background and References + + + +* Real-Time integration between Microsoft HoloLens 2 and 3D Slicer. (Alicia Pose Diez de la Lastra) + + +* OpenIGTLink-Unity. + diff --git a/PW39_2023_Montreal/Projects/LiveTrackedUltrasoundProcessingWithPytorch/README.md b/PW39_2023_Montreal/Projects/LiveTrackedUltrasoundProcessingWithPytorch/README.md new file mode 100644 index 000000000..23d1a06f6 --- /dev/null +++ b/PW39_2023_Montreal/Projects/LiveTrackedUltrasoundProcessingWithPytorch/README.md @@ -0,0 +1,76 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/LiveTrackedUltrasoundProcessingWithPytorch/README.html + +project_title: Live tracked ultrasound processing with PyTorch +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Tamas Ungi + affiliation: Queen's University + +- name: Rebecca Hisey + affiliation: Queen's University + +- name: Róbert Szabó + affiliation: Queen's University / Óbuda University + +- name: Colton Barr + affiliation: Queen's University / BWH + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + +--- + +# Project Description + + + +Our past code for training and deploying ultrasound segmentation in real time was based on TensorFlow. Example project: + + +The goal for this project week is to provide a new open-source implementation using PyTorch and modern AI tools like MONAI and wandb. A Slicer module will also be provided to deploy trained AI on recorded or live ultrasound streams. + +## Objective + + + +1. Export annotated ultrasound+tracking data for training +2. Example code for training +3. Slicer module to use trained models on ultrasound data in Slicer + +## Approach and Plan + + + +1. All data processing and training code will be here: +2. Slicer module will be here: + +## Progress and Next Steps + + + + +1. Model training and testing is implemented in this repository: +2. Successfully used RunNeuralNet from DeepLearnLive to run a trained PyTorch segmentation model on live ultrasound data. OpenIGTLink data transfer is a good way to run AI models in parallel with Slicer. https://github.com/SlicerIGT/aigt/tree/master/DeepLearnLive/RunNeuralNet +3. Need to do precise performance estimation to see the limit of frame rate we can handle from an ultrasound scanner. Also need to explore the effect of AI model size on accuracy and performance. + +# Illustrations + + +![2023-06-15_09-18-01](https://github.com/NA-MIC/ProjectWeek/assets/2071850/ed7e2adc-f23b-4785-ab39-4e33b3466968) + + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/LongitudinalModelOfPsychosisConversion/README.md b/PW39_2023_Montreal/Projects/LongitudinalModelOfPsychosisConversion/README.md new file mode 100644 index 000000000..4816398c3 --- /dev/null +++ b/PW39_2023_Montreal/Projects/LongitudinalModelOfPsychosisConversion/README.md @@ -0,0 +1,83 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/LongitudinalModelOfPsychosisConversion/README.html + +project_title: AMP SCZ Longitudinal model of psychosis conversion +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Pablo Polosecki + affiliation: IBM Research + country: USA + +- name: Nora Penzel + affiliation: MGH + country: USA + +- name: Ofer Pasternak + affiliation: BWH + country: USA + +- name: Guillermo Cecchi + affiliation: IBM Research + country: USA + +--- + +# Project Description + + + +This project is part of the AMP SCZ program, an initiative for early detection of risk for schizophrenia(). + +A key goal in AMPSCZ is to predict which patients that present initially mild or sub-threshold symptoms will eventually develop psychosis. Most predictive models are based on data acquired on their first medical visit (the baseline visit). An important question is how much is gained by following patients over time (longitudinal data). In this project we will implement predictive models that make use of this longitudinal information for psychosis prediction. We will focus on implementing a type of models called "joint models", which incorporate time-varying predictors into well known survival analyses. + +https://www.ampscz.org/ + +## Objective + + + +1. Objective A. Implement a Python-based version of longitudinal models adapted for common best practices in machine learning (separate train/test, scikit-learn compatible methods). +2. Objective B Quantify the advantage of longitudinal models vs baseline predictors in a legacy dataset. + +## Approach and Plan + + + +1. Write a python wrapper, using rpy2, for the R library JM that implements longitudinal analysis. +2. Use synthetic and legacy datasets to test the predictions. +3. Use python libraries such as lifelines or scikit-survival to implement survival analysis with baseline predictions only. +4. Implement permutation tests in time to asses the significance of prediction improvements due to longitudinal change. + +## Progress and Next Steps + + +Our progress was somewhat less than it could have been otherwise since we could only stay for the first half of the week. Nevertheless we made significant progress on the aspects that benefited the most from in-person interaction: understanding the complex dataset we are working with. +1. We first spent time understanding the coding of different events in the dataset, including a number of inconsistencies that were revealed as we dived into the data. We learned that psychosis conversion events are only recorded in special visits, and that their date does not coincide with the conversion date. The latter is coded in a separate field a followed a different date format. +2. We followed an incremental strategy to decide if longitudinal information from a preselected set of clinical measures provided additional prognostic information relative to baseline alone. + 1. We attempted the simplest models using linear classifiers based on baseline and follow-up clinical measures to get a rought estimate of predictive power with longitudinal data. + 2. We built and tested baseline models using pre-selected baseline variables from the literature. + 3. We combined the baseline models with the baselien and follow-up clinical measures to get a rough estimate of their combined predictive power. +3. We started implementing the "joint modelling" approach that combines standard survival analyses with linear mixed effects modeling. + +We learned that a significant number of converters (close to 40%) do so before there is a chance for a follow-up visit. This complicates the comparison between baseline and follow-up predictions. We also saw little advantage of combining baseline and follow-up information. The mild benefits of follow-up information resided in the follow-up values alone and not their change relative to baseline. +Now that we have converged on an understanding of the dataset and the measures to be used for modeling, the next stepts involve estimations using "joint modeling". We hope to also create a python wrapper to these R packages that can be generally useful. A subsequent step would include multivariate modeling of trajectories. + + + +# Background and References + + + +* [Joint modeling in R](https://github.com/drizopoulos/JM) +* [sci-kit survival](https://scikit-survival.readthedocs.io/en/stable/index.htmll) +* [lifelines](https://lifelines.readthedocs.io/en/latest/index.html) +* [rpy2](https://rpy2.github.io/doc/v3.5.x/html/index.html) diff --git a/PW39_2023_Montreal/Projects/LungSegmentation/README.md b/PW39_2023_Montreal/Projects/LungSegmentation/README.md new file mode 100644 index 000000000..e8abb7b50 --- /dev/null +++ b/PW39_2023_Montreal/Projects/LungSegmentation/README.md @@ -0,0 +1,95 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/LungSegmentation/README.html + +project_title: 3D Slicer Lung CT Segmentation and Analysis +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: +- name: Rudolf Bumm + affiliation: KSGR + country: Germany + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Andras Lasso + affiliation: Queens University + country: Canada +--- + +# Project Description + +This is a follow-up to previous 3D Slicer lung CT segmentation PW projects. + + +## Objective + +To improve the LungCTAnalysis extension analysis in 3D Slicer, + +which is frequently used (40 runs per day) + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/51840d88-e21f-489e-9943-e292ea8994b9) + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/ee0a6b06-9647-44b7-be68-84bbd04c4256) + + +the following steps could be taken: + +1. Improve **vessel segmentation** + +2. Develop a better concept for lung **segment (sublobar) segmentation** in 3D Slicer + +3. **Identify tumors** belonging to segments and consider safety margins + +4. Suggest resection of segments which include nutritive vessel resection for **neighboring tumors** + +5. **Differentiate pulmonary arteries and veins** reliably + +6. Work on current **OpenSourceCOVID publication** + +## Approach and Plan + +Work on a dedicated Angio-CT. + + +## Progress and Next Steps + +We analyzed the usage statistics of Lung CT Analyzer and decided to keep the mechanism. + +Lung CT Segmenter + +The most expedient way to obtain a precise surgical planning result at the moment involves the use of combined airway segmentation and vessel volume rendering, utilizing the centerblock technique. This method requires minimal manual intervention. We have chosen to enhance the division of vascular structures within the vessel mask by eliminating the hilar structures, where there's a prominent overlay of vessels. Subsequently, we employ a 'grow-from-seeds' analysis which promises improved accuracy. The VMTK centerline analysis, however, appears to be a less preferable option. This is largely due to its lack of efficiency in differentiating between pulmonary arteries and vessels. + +Lung CT Analyzer + +The primary constraint of the computational method appears to be its sensitivity to the selected threshold values. Even minor variances in well-calibrated clinical CTs can lead to appreciable differences in the final outcome, affecting about 20-30% of cases. The likely source of this issue is the use of rigid threshold values, which inherently makes the classification highly responsive to the specific threshold set. Andras Lasso has proposed mitigating this sensitivity through the application of soft classifiers. We intend to follow this approach, potentially augmenting it with AI pattern detection. + +# Illustrations + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/a5b9aa50-3f4f-4a70-9edb-d90346a918c2) + +Fig 1: Pulmonary vesselmask + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/8760d090-c003-4a67-a0be-bba3c17fc677) + +Fig 2: Combined airway segmentation and vessel volume rendering, centerblock technique + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/c8e5f155-5c03-4b94-9983-0fb6850ae7a1) + +Fig 3: Centerline analysis with VMTK + +# Background and References + +Lung CT Analyzer + +https://github.com/rbumm/SlicerLungCTAnalyzer + +From Voxels to Prognosis: AI-Driven Quantitative Chest CT Analysis Forecasts ICU Requirements in 78 COVID-19 Cases (preprint) + +https://doi.org/10.21203/rs.3.rs-3027617/v3 diff --git a/PW39_2023_Montreal/Projects/MONAIBundleIntegrationTutorial/README.md b/PW39_2023_Montreal/Projects/MONAIBundleIntegrationTutorial/README.md new file mode 100644 index 000000000..ea2636706 --- /dev/null +++ b/PW39_2023_Montreal/Projects/MONAIBundleIntegrationTutorial/README.md @@ -0,0 +1,185 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/MONAIBundleIntegrationTutorial/README.html + +project_title: HOW TO use MONAI bundle to integrate models from MONAI model ZOO +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: +- name: Rudolf Bumm + affiliation: KSGR + country: Germany + +- name: Andres Dias-Pinto + affiliation: NVIDIA + country: USA + +- name: Andras Lasso + affiliation: Queens University + country: Canada +--- + +# Project Description + +NVIDIA AI-assisted annotation (AIAA) is no longer actively maintained and MONAI bundle has been established to load a wide selection of pre-trained models for radiology and pathology. + + +## Objective + + +Our objective is to provide a detailed step-by-step description on how to use MONAI bundle for this task. + +## Step-by-Step Description + +Setting up an AWS EC2 Windows server in the cloud: +The detailed process [is described here](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerCloud/). + +Install MONAILabel + +Do a “cd $home”, to the place where the MONAILabel folder will be created. + +Firstly, begin by uninstalling the existing MONAILabel and Monai installations, because as of 6/2023 we need a special MONAI version to make this word. +This is accomplished by running the following commands in the terminal: + + pip uninstall monailabel + pip uninstall monai + +Then, follow to our outlined procedures for setting up MONAILabel from scratch. +Please refer to [these instructions](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/MONAILabelLung/MONAILabel_Installation.html), they can be found on our Project Week webpage 1. + +Follow these guidelines until reaching the command to set the MONAILabel script paths: + + $Env:PATH += ";C:\Users\yourname\MONAILabel\monailabel\scripts" + +After setting the paths, the next step is to install Monai in a special version (this may change in the future and will be adopted): + + pip install monai==1.2.0 + +After a “cd $home”, where the MONAILabel folder is located, the start_server command can be issued: + + monailabel start_server --app MONAILabel/sample-apps/monaibundle --studies c:/Data/Task06_Lung/imagesTr --conf models lung_nodule_ct_detection + + +possible arguments for "conf models" are: + +lung_nodule_ct_detection\ +pancreas_ct_dints_segmentation\ +prostate_mri_anatomy\ +renalStructures_UNEST_segmentation\ +spleen_ct_segmentation\ +spleen_deepedit_annotation\ +swin_unetr_btcv_segmentation\ +wholeBody_ct_segmentation\ +wholeBrainSeg_Large_UNEST_segmentation\ + +After this command, the correct and requested model is automatically loaded from the [Monai Model Zoo](https://monai.io/model-zoo.html), which is a highly commendable feature. + +Then proceeded to test the setup with the 3D Slicer and the MONAILabel extension using the CT Chest dataset. + +It is great to see that the AI successfully detects some nodules! + +Sidenote: The process was implemented on an AWS EC2 Windows server instance with an A10G GPU (24 GB dedicated video RAM). + + +## Approach and Plan + +During the workshop, create the wholeBody_ct_segmentation MONAILabel model on an AWS EC2 Windows instance. + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/06d8146a-4d0e-4a6d-a7d3-59158f773647) + +We'll attempt using the server simultaneously from various places while documenting the installation procedure. + +The IP address of the MONAILabel server is http://52.209.177.211:8000/. During the Project week (Monday through Wednesday), the server will be accessible daily from 2 p.m. to 4 p.m. local time. + +## Progress and Next Steps + +We held three workshops during the conference with good success. In each of the workshops we were able to + +- reliably connect to the AWS server instance +- start MONAILabel +- use the monaibundle app +- load one of the two demonstrated models +- start 3D Slicer on the server +- perform inference with the preconfigured model +- demonstrate label adjustments +- submit labels +- work remotely on the AWS server instance +- answer first-timer questions + + +Here is a list of the bundles from the [MONAI Model Zoo page](https://monai.io/model-zoo.html) as of 5/23: + +**Clara train COVID19 3D classification**: A pre-trained model for 3D COVID-19 classification using CT images【1†source】 + +**Clara train COVID19 3D segmentation**: A pre-trained model for 3D COVID-19 lung and infection segmentation from CT images【3†source】. + +**COVID19 3D segmentation**: A pre-trained model for 3D COVID-19 lung and infection segmentation from CT images【5†source】. + +**Decathlon BrainTumourSegmentation**: A 3D segmentation model pre-trained on the Brain Tumour Segmentation (BraTS) subset of the Medical Segmentation Decathlon dataset【7†source】. + +**Decathlon HippocampusSegmentation**: A 3D segmentation model pre-trained on the Hippocampus subset of the Medical Segmentation Decathlon dataset【9†source】. + +**Decathlon LiverTumourSegmentation**: A 3D segmentation model pre-trained on the Liver Tumour subset of the Medical Segmentation Decathlon dataset【11†source】. + +**Endoscopic tool segmentation**: A pre-trained binary segmentation model for endoscopic tool segmentation【13†source】. + +**Lung nodule ct detection**: A pre-trained model for volumetric (3D) detection of the lung lesion from CT image on LUNA16 dataset【17†source】. + +**Mednist gan**: An example of a GAN generator that produces hand x-ray images like those in the MedNIST dataset【23†source】. + +**Mednist reg**: An example of a ResNet and spatial transformer for hand x-ray image registration【25†source】. + +**Pancreas ct dints segmentation**: Searched architectures for volumetric (3D) segmentation of the pancreas from CT image【29†source】. + +**Pathology nuclei classification**: A pre-trained model for Nuclei Classification within Haematoxylin & Eosin stained histology images【33†source】. + +**Pathology nuclei segmentation classification**: A simultaneous segmentation and classification of nuclei within multitissue histology images based on CoNSeP data【37†source】. + +**Endoscopic Tool Segmentation:** A pre-trained binary segmentation model for endoscopic tool segmentation【13†source】. + +**Lung Nodule CT Detection:** A pre-trained model for volumetric (3D) detection of lung lesions from CT images using the LUNA16 dataset【17†source】. + +**MedNIST GAN:** An example of a GAN generator that produces hand X-ray images like those in the MedNIST dataset【23†source】. + +**MedNIST REG:** An example of a ResNet and spatial transformer for hand X-ray image registration【25†source】. + +**Pancreas CT Dints Segmentation:** A model for volumetric (3D) segmentation of the pancreas from CT images【29†source】. + +**Pathology Nuclei Classification:** A pre-trained model for nuclei classification within haematoxylin & eosin-stained histology images【33†source】. + +**Pathology Nuclei Segmentation Classification:** A model for simultaneous segmentation and classification of nuclei within multitissue histology images based on CoNSeP data【37†source】. + +**Pathology Nuclick Annotation:** A pre-trained model for segmenting nuclei cells with user clicks/interactions【43†source】. + +**Pathology Tumor Detection:** A pre-trained model for metastasis detection on the Camelyon16 dataset【47†source】. + +**Prostate MRI Anatomy:** A pre-trained model for volumetric (3D) segmentation of the prostate from MRI images【51†source】. + +**Renalstructures Unest Segmentation:** A transformer-based model for renal segmentation from CT images【53†source】. + +**Spleen CT Segmentation:** A pre-trained model for volumetric (3D) segmentation of the spleen from CT images【57†source】. + +**Spleen Deepedit Annotation:** A pre-trained model for 3D segmentation of the spleen organ from CT images using DeepEdit【61†source】. + +**Swin Unetr BTCV Segmentation:** A pre-trained model for volumetric (3D) multi-organ segmentation from CT images【65†source】. + +**Valve Landmarks:** A network used to find where valves attach to the heart to help construct 3D FEM models for computation. The output is an array of 10 2D coordinates【69†source】. + +**Ventricular Short Axis 3label:** A network that segments full cycle short axis images of the ventricles, labelling LV pool separate from myocardium and RV pool【71†source】. + +**Wholebody CT Segmentation:** A pre-trained SegResNet model for volumetric (3D) segmentation of the 104 whole body segments【75†source】. + + +# Illustrations + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/cdc7d159-2670-433a-945e-4c7000c21f80) + + +# Background and References + + https://docs.monai.io/en/stable/bundle_intro.html + https://monai.io/model-zoo.html diff --git a/PW39_2023_Montreal/Projects/MhubSlicerIntegration/README.md b/PW39_2023_Montreal/Projects/MhubSlicerIntegration/README.md new file mode 100644 index 000000000..dea58cd74 --- /dev/null +++ b/PW39_2023_Montreal/Projects/MhubSlicerIntegration/README.md @@ -0,0 +1,74 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/MhubSlicerIntegration/README.html + +project_title: MHub-Slicer Integration +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Leonard Nürnberg + affiliation: Department of Radiology + country: Brigham and Women’s Hospital, Boston, MA + +- name: Dennis Bontempi + affiliation: Department of Radiology + country: Brigham and Women’s Hospital, Boston, MA + +- name: Justin Johnson + affiliation: Department of Radiology + country: Brigham and Women’s Hospital, Boston, MA + +- name: Andrey Fedorov + affiliation: Department of Radiology + country: Brigham and Women’s Hospital, Boston, MA + +- name: Hugo Aerts + affiliation: Department of Radiology + country: Brigham and Women’s Hospital, Boston, MA + +--- + +# Project Description + + + +MHub is a repository of self-contained deep-learning models trained for a wide variety of applications in the medical and medical imaging domain. MHub provides the community with reproducible and transparent AI pipelines that work out of the box as intended by the developers. + +As part of our efforts, we developed a first version of a Slicer MHub extension that allows users to run different AI models directly in Slicer without the need to install potentially conflicting dependencies as part of their Slicer Python installation. + +## Objective + + + +The goal of this project is to polish the extension, publish it, and further explore its potential applications and user feedback to expand the extension's capabilities, address its limitations, and ensure its seamless integration with Slicer. + +## Approach and Plan + + + +Work on identified issues/enhancements, and collect feedback from the Slicer community. + +## Progress and Next Steps + + + +*No response* + +# Illustrations + + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/31729248/a437e55a-4772-4cc8-862f-455241d03014) + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/MobileAugmentedRealityInteractiveNeuronavigator/README.md b/PW39_2023_Montreal/Projects/MobileAugmentedRealityInteractiveNeuronavigator/README.md new file mode 100644 index 000000000..673cef8d9 --- /dev/null +++ b/PW39_2023_Montreal/Projects/MobileAugmentedRealityInteractiveNeuronavigator/README.md @@ -0,0 +1,72 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/MobileAugmentedRealityInteractiveNeuronavigator/README.html + +project_title: 'MARIN: Mobile Augmented Reality Interactive Neuronavigator (in Slicer)' +category: IGT and Training +presenter_location: In-person + +key_investigators: +- name: Mehrdad Asadi + affiliation: Concordia University + country: Canada + +- name: Étienne Léger + affiliation: Montreal Neurological Institute + country: Canada + +- name: Bahar Jahani + affiliation: Concordia University + country: Canada + +- name: Zahra Asadi + affiliation: Concordia University + country: Canada +--- + +# Project Description + + +[MARIN](https://github.com/AppliedPerceptionLab/MARIN) is an application that can be used in conjunction with a neuronavigation platform to enable in situ AR guidance on a mobile device. It currently supports iOS and works in conjunction with [Ibis](https://github.com/IbisNeuronav/Ibis) (with the additional [MARIN plugins](https://github.com/AppliedPerceptionLab/IbisPluginsExtraMARIN)). The goal of this project is to implement the same support in Slicer. + +## Objective + + + +1. Add Slicer support for MARIN (provide the same functionalities that Ibis currently does) + +## Approach and Plan + + + +MARIN is a mobile application that can overlay virtual structures over the live camera feed from a device, enabling *in situ* augmented reality navigation for surgical applications (see image below). The MARIN application itself can interface with any platform, provided that the platform supports real-time communication, can handle tracking and generate 3D renderings. Slicer has all of these capabilities. Communication between Slicer and MARIN can be set-up through the OpenIGTLinkIF module. The main components that will have to be implemented are Slicer modules to handle device configuration and rendering of tracked virtual objects. + +## Progress and Next Steps + + + +Because MARIN and the OpenIGTLinkIF don't currently support the same video codecs (H264 only for MARIN vs VP9 only for OpenIGTLinkIF), most of this week's effort was focused on extending MARIN to support more codecs as well as support sending unencoded images. Further work could then be done on the Slicer side to enable more codecs as well. This would allow more flexibility and support for more devices. Unencoded frames will be limited in terms of resolution by the available bandwidth. + +# Illustrations + + + +MARIN demo, with Ibis: + +![teaser_looped_small](https://github.com/NA-MIC/ProjectWeek/assets/17100565/07faf583-2238-4760-8a54-896b75c2f300) + +# Background and References + + +Article: Léger, É., Reyes, J., Drouin, S., Popa, T., Hall, J. A., Collins, D. L., Kersten-Oertel, M., "MARIN: an Open Source Mobile Augmented Reality Interactive Neuronavigation System", International Journal of Computer Assisted Radiology and Surgery (2020). +https://doi.org/10.1007/s11548-020-02155-6 + +Source code repository: [MARIN](https://github.com/AppliedPerceptionLab/MARIN/tree/master) diff --git a/PW39_2023_Montreal/Projects/MobileAugmentedRealityInteractiveNeuronavigator/teaser_looped_small.gif b/PW39_2023_Montreal/Projects/MobileAugmentedRealityInteractiveNeuronavigator/teaser_looped_small.gif new file mode 100644 index 000000000..a2628d51f Binary files /dev/null and b/PW39_2023_Montreal/Projects/MobileAugmentedRealityInteractiveNeuronavigator/teaser_looped_small.gif differ diff --git a/PW39_2023_Montreal/Projects/MpreviewDevelopmentOfAStreamlinedSlicerModuleForManualImageAnnotation/README.md b/PW39_2023_Montreal/Projects/MpreviewDevelopmentOfAStreamlinedSlicerModuleForManualImageAnnotation/README.md new file mode 100644 index 000000000..cadef941f --- /dev/null +++ b/PW39_2023_Montreal/Projects/MpreviewDevelopmentOfAStreamlinedSlicerModuleForManualImageAnnotation/README.md @@ -0,0 +1,101 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/MpreviewDevelopmentOfAStreamlinedSlicerModuleForManualImageAnnotation/README.html + +project_title: mpReview Development of a streamlined Slicer module for manual image annotation +category: Cloud / Web +presenter_location: In-person + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: BWH + +- name: Nadya Shusharina + affiliation: MGH + +- name: Andrey Fedorov + affiliation: BWH + +- name: Andras Lasso + affiliation: Queen's University + +--- + +# Project Description + + + +Currently, there does not exist a simple, streamlined way to annotate many DICOM series and save Segmentation objects. Users have to manually load data into Slicer, set up the layouts themselves (in the case of multi-parametric data), name the files, etc. -- this is difficult to do for a large dataset! + +There are extensions that already exist, for instance [CaseIterator](https://github.com/JoostJM/SlicerCaseIterator), [FlywheelCaseIterator](https://github.com/Slicer/ExtensionsIndex/pull/1942) and [SegmentationReview](https://github.com/zapaishchykova/SegmentationReview). Unfortunately, some do not take DICOM files as input, and so far cannot handle cases where a clinician needs to view multiple series at the same time. + +The 3DSlicer module mpReview (part of the SlicerProstate extension) was previously developed to handle these cases, specifically for assisting with manual annotation of the prostate and other related anatomical regions. In previous project weeks, we have streamlined the extension [here](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/mpReview/) and updated the module to use the latest SegmentEditor, and incorporated the use of Google Cloud Platform [here](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/mpReview/). + +However, there are improvements that can be made in terms of functionality. For instance, we would like to allow the user to access multiple types of servers, and perform annotation of body parts other than the prostate. + +In this project week we'd like to focus on getting user feedback on the current state of the extension, and ideas for improvements and features to add. We will try out extensions mentioned above to get ideas of how ours could be more streamlined. + +## Objective + + + +1. Get feedback from the current [multiple_server_feature](https://github.com/deepakri201/mpReview/tree/multiple_server_feature) branch of the module. +2. Try out [CaseIterator](https://github.com/JoostJM/SlicerCaseIterator), [FlywheelCaseIterator](https://github.com/Slicer/ExtensionsIndex/pull/1942), and [SegmentationReview](https://github.com/zapaishchykova/SegmentationReview) +3. Brainstorm ideas for improvements and features to include. +4. Define the steps that are needed to accomplish the changes. +5. Release the module as a new Slicer extension + +## Approach and Plan + + + +1. We will provide instructions below how to setup the module, and download sample data, etc. --> see below "Installation Instructions for Users" +2. We will ask users to experiment with the module and provide us feedback. +3. We will make some changes to the module based on user feedback and from our experimentation with the other extensions +4. At the same time we'll test out other approaches + +## Progress and Next Steps + + + +1. I made some UI changes to the module from feedback from Nadya and Andrey +2. I tested out two Slicer extensions that are also used for annotations -- [CaseIterator](https://github.com/JoostJM/SlicerCaseIterator) and [SegmentationReview](https://github.com/zapaishchykova/SegmentationReview). Still waiting for Flywheel instance to test FlywheelCaseIterator +3. I was able to get multiple users to try the module, and fixed some of the errors they found +4. I also demo'd the module and received very useful feedback [here](https://docs.google.com/document/d/1_Ou1Uns0LrzQ_w-As-1u1PSnLxyqXgUuNgVtkm2Eebc/edit?usp=sharing) +5. Next week I will make a PR so the latest version of mpReview can be used. After that I will submit it as a separate extension with some additional features + +# Illustrations + + + +Demo of the module + + + + + +# Background and References + + + +We have worked on this during multiple project weeks, [PW35](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/mpReview/) and [PW37](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/mpReview/). The code from PW37 is available [here](https://github.com/deepakri201/mpReview/tree/multiple_server). + +# Installation Instructions for Users + +1. Clone the mpReview [multiple_server_feature branch](https://github.com/deepakri201/mpReview/tree/multiple_server_feature) +2. Add the following extensions: SlicerDICOMWeb, DCMQI, QuantitativeReporting, SlicerDevelopmentToolbox +3. Enable the developer mode in the Application settings and add the path from #1 to the module list +4. If you want to use the local Slicer DICOM database to experiment with the module, download some sample data from this dropbox link [here](https://www.dropbox.com/scl/fo/br63z27pfjm421vbn9m96/h?dl=0&rlkey=unxlhva3ifkden5usxfn6b78d). We've provided data from a typical prostate MR imaging scan (collection QIN-Prostate-Repeatability) and a typical CT scan (from NLST). +5. Or, if you want to be adventurous and try out using the Google Cloud Platform server, you will first need to have a google account and a project. You will have a 90 day free trial with credits, or go [here](https://learn.canceridc.dev/getting-started-with-idc) to request $300 credits from Imaging Data Commons. Then follow [this Google Colab notebook](https://colab.research.google.com/drive/1nDsnERKpWWr32xK_M7_pA1GjHPbghwjK#scrollTo=FaapolCoufCX) to set up your very own DICOM datastore which will hold the same data from above. You will also need to install the [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) on your machine (for Mac/Linux make sure you install it in your home directory and add to PATH). In windows it should be added to the path automatically. +6. Install the mpReview module by going to Applications and adding the path. +7. If you have time, it would be great to get your feedback on this [google form](https://docs.google.com/forms/d/e/1FAIpQLSe2fGjdiWVfPSh3gDOoZ5fm0IaUHdB4lultvjwRqVskodN2sw/viewform?usp=sf_link) :) diff --git a/PW39_2023_Montreal/Projects/NodeFocus/README.md b/PW39_2023_Montreal/Projects/NodeFocus/README.md new file mode 100644 index 000000000..4f135fcad --- /dev/null +++ b/PW39_2023_Montreal/Projects/NodeFocus/README.md @@ -0,0 +1,69 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Node focus in views +category: Infrastructure +presenter_location: In-person + +key_investigators: +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada +--- + +# Project Description + +3D software applications often provide feedback mechanisms for selecting objects and showing users which objects they have selected, or are interacting with. This allows some visualizations to be hidden when the object is not in focus. + +## Objective + + + +This project aims to implement a similar node selection system in 3D Slicer, allowing users to: +- See the nodes that they are hovering over or interacting with in the various subject hierarchy trees or node selectors. +- Select nodes by clicking on them in one of the views. + +If you would like to offer suggestions or feedback on the current prototype, then come see me in-person. + +## Approach and Plan + + + +1. Continue development of the node focus infrastructure. +2. Get feedback from hands-on use to better improve the implementation. +3. Improve visualization and performance. + +## Progress and Next Steps + + + +1. Prototype implementation can be found here: https://github.com/Sunderlandkyl/Slicer/tree/focus_node_prototype +2. Improve rendering of volumes + hard selection. +3. Implement picking of node types other than markups. + +# Illustrations + + + +Example showing segmentations: + +![Atlas node focus](https://github.com/NA-MIC/ProjectWeek/assets/9222709/cd0fd740-2aee-4010-b73d-dc8a53f8e58e) + +Example showing markups: + +![Markups node focus](https://github.com/NA-MIC/ProjectWeek/assets/9222709/2ecbef2b-e7a2-4317-9e9d-1191f5a75d4f) + +Example showing models using a combobox: + +![Combobox model node focus](https://github.com/NA-MIC/ProjectWeek/assets/9222709/7450c678-f8eb-482b-97c2-e0b95d4e05bc) + +# Background and References + +- [Development branch](https://github.com/Sunderlandkyl/Slicer/tree/focus_node_prototype) diff --git a/PW39_2023_Montreal/Projects/NousNavRelease/README.md b/PW39_2023_Montreal/Projects/NousNavRelease/README.md new file mode 100644 index 000000000..857ee49b2 --- /dev/null +++ b/PW39_2023_Montreal/Projects/NousNavRelease/README.md @@ -0,0 +1,99 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Towards NousNav Major Version Release +category: IGT and Training +presenter_location: In-person + +key_investigators: +- name: Sam Horvath + affiliation: Kitware, Inc. + country: USA + +- name: Colton Barr + affiliation: Queen's University / Brigham and Women's Hospital + country: Canada + +- name: Sarah Frisken + affiliation: Brigham and Women's Hospital + country: USA + +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: USA + +- name: Alex Golby + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + +NousNav is an ongoing project led by Dr. Alex Golby at Brigham and Women's Hospital to build and disseminate a low-cost neuronavigation system. Built as a 3D Slicer Custom App, NousNav uses low cost optical tracking (Optitrack Duo) in combination with custom optically-tracked tools and reference arrays to facilitate patient registration, procedure planning, and navigation. + +For this project we are going to finalize remaining issues on the tracker to move towards a 1.0 release + +## Objective + + + +1. Triage existing issues into a major/minor release tasks +1. Clear issues for the 1.0 Release + +## Approach and Plan + + + +Major tasks to work on: + +1. DICOM patient management + 1. Storage of MRBs as secondary captures + 1. Store XML only? +1. Landmarks management rework + 1. Granular color management + 1. Single landmarks class +1. LPS orientation in Nav phase + +## Progress and Next Steps + + + +1. Reworked the registration landmark support. + 1. Support any sufficient subset of the complete landmark list + 2. Registration picking provides better control of order of landmark collection + 3. Improved color consistency across landmark usage +2. Made patient interface styling consistent with rest of application +3. Demo'd recent changes to the rest of the NousNav team +4. Discussed with other IGT devs the best way to reuse NousNav effort in other projects + 1. Slicer NavigationAppTemplate: core functionality of the NousNav workflow made available as a reusable template similar to SlicerCAT +5. Will tag the 1.0 release at the end of this week. + + +# Illustrations + + +![Screenshot 2023-06-13 15 29 24](https://github.com/NA-MIC/ProjectWeek/assets/25040869/cea0c2c0-0f83-4af3-afc6-a2b858ba886a) +![Screenshot 2023-06-13 17 11 09](https://github.com/NA-MIC/ProjectWeek/assets/25040869/7da490fe-f790-49b7-b2b9-1b475f3ec0ea) + + +# Background and References + + +[NousNav website](https://www.nousnav.org/) diff --git a/PW39_2023_Montreal/Projects/OpenMeshedAnatomy/README.md b/PW39_2023_Montreal/Projects/OpenMeshedAnatomy/README.md new file mode 100644 index 000000000..207abdd3c --- /dev/null +++ b/PW39_2023_Montreal/Projects/OpenMeshedAnatomy/README.md @@ -0,0 +1,92 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Open Meshed Anatomy +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: +- name: Andy Huynh + affiliation: The University of Western Australia + country: Australia + +- name: Michael Halle + affiliation: Surgical Planning Lab (SPL) + country: United States + +- name: Benjamin Zwick + affiliation: The University of Western Australia + country: Australia + +--- + +# Project Description + +Open Meshed Anatomy (OMA) is a PhD project, in collaboration with the [Open Anatomy Project](https://www.openanatomy.org/) (OAP), aiming to facilitate and improve computational simulations of human body parts using free and open atlases available from OAP. By leveraging these existing segmented and labeled atlases, OMA streamlines the creation of computational grids/meshes for researchers and integrates valuable spatial anatomy information for a better understanding and visualization of simulation results. Similar to OAP's goals, the project will offer these resources and visualization tools via an open web platform or 3D Slicer, fostering global collaboration and research among the scientific community. + +## Objective + + + +1. Objective A. Determine the optimal way to assign labels to mesh elements/nodes describing structural names from the atlas. + +2. Objective B. Improve SlicerAtlasEditor functionalities and merge to SlicerOpenAnatomy extension. Refer to Figure 1 and 2. + +3. Objective C. Implement contextual visualisation of results from computational simulations. Refer to Figure 3. + + +## Approach and Plan + + + +Objective A: +1. What file extension to use (e.g. VTK?) or to use external file. +2. Labelling via tags for better querying functionalities (Mike Halle's idea) + +Objective B: +1. Add option to download via URL the different atlas label maps that Open Anatomy Project offers, including its hierarchy structure json files within 3D Slicer or extension. +2. Fix non-manifold/corrupt labeled voxels in the label map. This will be useful for generating a clean surface mesh for visualization or volumetric meshing. Refer to Figure 2. + +Objective C: +1. Have options to query different computational results based on different anatomy (from atlas). This may be done via tags or using the structure json files. Refer to Figure 3. +2. Use [SlicerCBM](https://github.com/SlicerCBM/SlicerCBM) as a sample application of this. + +## Progress and Next Steps + + +![Img1](https://github.com/andy9t7/SlicerAtlasEditor/blob/main/img/download-import.png?raw=true) +![Img2](https://github.com/andy9t7/SlicerAtlasEditor/blob/main/img/material-dipole.png?raw=true) +![Img3](https://github.com/andy9t7/SlicerAtlasEditor/blob/main/img/corpus-callosum-dipole.png?raw=true) + + + + +# Illustrations + + + +![Simplifying Open Anatomy Project's Brain Atlas to ROI](https://github.com/andy9t7/SlicerAtlasEditor/blob/main/img/merge-roi.png?raw=true) +**Figure 1: Simplifying Open Anatomy Project's Brain Atlas to ROI using SlicerAtlasEditor** + +![Fixing non-manifold voxels in atlas label map](https://github.com/andy9t7/SlicerAtlasEditor/blob/main/img/fix-non-manifolds.png?raw=true) +**Figure 2: Fixing non-manifold voxels in atlas label map. [1]** + +![Query and visualize simulation results](https://github.com/andy9t7/SlicerAtlasEditor/blob/main/img/query-and-visualise.png?raw=true) +**Figure 3: Query and visualize simulation results on ROI** + +# Background and References + + + +The Open Anatomy Project website: https://www.openanatomy.org/. +Open Anatomy Project's Brain Atlas: https://github.com/mhalle/spl-brain-atlas. +SlicerOpenAnatomy: https://github.com/PerkLab/SlicerOpenAnatomy. +SlicerAtlasEditor: https://github.com/andy9t7/SlicerAtlasEditor. +SlicerCBM: https://github.com/SlicerCBM/SlicerCBM. + +[1] S. J. Owen, M. L. Staten, and M. C. Sorensen, “Parallel hexahedral meshing from volume fractions,” _Engineering with Computers_, vol. 30, no. 3, pp. 301–313, Jul. 2014, doi: [10.1007/s00366-012-0292-8](https://doi.org/10.1007/s00366-012-0292-8). diff --git a/PW39_2023_Montreal/Projects/OptimizingBundleSizeOfPolysegWasmForWebApplications/README.md b/PW39_2023_Montreal/Projects/OptimizingBundleSizeOfPolysegWasmForWebApplications/README.md new file mode 100644 index 000000000..8359f4f29 --- /dev/null +++ b/PW39_2023_Montreal/Projects/OptimizingBundleSizeOfPolysegWasmForWebApplications/README.md @@ -0,0 +1,77 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Optimizing Bundle Size of PolySeg-WASM for Web Applications +category: Early Presenter +topic-category: Cloud / Web +presenter_location: Online + +key_investigators: + +- name: Alireza Sedghi + affiliation: OHIF + country: Canada + +- name: Kyle Sunderland + affiliation: Queen's University + coutnry: Canada + +- name: Jaswant Panchumarti + affiliation: Kitware, Inc. + country: USA + + +--- + +# Project Description + +The Institute of Cancer Research (ICR) has created PolySeg-WASM is an extended WASM wrapper for the [PerkLab/PolySeg](https://github.com/PerkLab/PolySeg) library, including C++ code repurposed from [Slicer](https://github.com/Slicer/Slicer) and [SlicerRT](https://github.com/SlicerRt/SlicerRT). + +In the [previous year project](https://github.com/NA-MIC/ProjectWeek/blob/master/PW38_2023_GranCanaria/Projects/OHIF_PolySeg/README.md) we created the contour segmentation representation for [Cornerstone3D library](https://www.cornerstonejs.org/live-examples/contourrendering), now this year we want to use the polySEG to convert the contours to closed surfaces. + +The repo by ICR does the job However, the bundle is huge (3.6MB) which is not optimal for the web applications. This project aims to find out how to reduce the bundle size by choosing the VTK dependencies. + +## Objective + + +1. **Analyze VTK dependencies**: dentify the specific VTK components used in the PolySeg-WASM library that contribute the most to the bundle size in order to determine areas for potential optimization. +2. **Optimize VTK bundle size**: Reduce the bundle size of PolySeg-WASM by selectively choosing essential VTK dependencies, excluding or replacing components with lightweight alternatives, while maintaining the required functionality. +3. **Evaluate performance and functionality**: Assess the performance and functionality of the optimized PolySeg-WASM library to ensure that the reduction in bundle size does not compromise accuracy or efficiency in converting contours to closed surfaces for web applications. + +## Approach and Plan + + +1. Perform a detailed code analysis to identify the specific VTK components used in PolySeg-WASM. +2. Measure the size contribution of each VTK component to the overall bundle size of PolySeg-WASM. +3. Document the findings, including a breakdown of the size contribution of each component. + +## Progress and Next Steps + +0. Updated the vtk that polyseg-wasm is using to the one that slicer is using which is specified [here](https://github.com/Slicer/Slicer/blob/main/SuperBuild/External_VTK.cmake#L136-L146), it was previously depending on latest vtk +1. We initially tried to reduce the final wasm size by disabling various modules in the vtk cmake. However, this approach didn't yield the desired results. +2. Next, we examined the polySEG-wasm's cmake to determine potential modifications for size reduction. We discovered that all vtk dependencies are included in ${VTK_LIBS}, set by cmake. By incrementally adding these dependencies, we pinpointed the minimum number of libraries needed, resulting in a 550 KB reduction in the final wasm size. The final set of included libraries was VTK::CommonDataModel VTK::CommonCore VTK::CommonExecutionModel VTK::FiltersCore VTK::FiltersExtraction VTK::ImagingStencil VTK::ImagingStatistics VTK::ImagingMorphological. +3. Furthermore, we observed that the browser gzips the loaded wasm, bringing the final resource size down to 800 KB - a significant improvement from before. +4. PR created [here ](https://bitbucket.org/icrimaginginformatics/polyseg-wasm/pull-requests/1) + +### Next steps + +1. We should look into if we can even narrow down even more the VTK::CommonDataModel and VTK::CommonCore to only include those sub libs that is necessary for the build + + +# Illustrations + +![image](https://github.com/NA-MIC/ProjectWeek/assets/7490180/fe1091cc-32f5-4710-a605-8345ce399849) + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/7490180/8ab133e7-aa73-4b02-8c1d-3dc807861b64) + + +# Background and References + + + +[PolySEG repo ](https://github.com/PerkLab/PolySeg) +[ICR Wrapper](https://bitbucket.org/icrimaginginformatics/polyseg-wasm/src/master/) diff --git a/PW39_2023_Montreal/Projects/PrismVolumeRendererRefactoringAndBugFixing/README.md b/PW39_2023_Montreal/Projects/PrismVolumeRendererRefactoringAndBugFixing/README.md new file mode 100644 index 000000000..1f22bb47c --- /dev/null +++ b/PW39_2023_Montreal/Projects/PrismVolumeRendererRefactoringAndBugFixing/README.md @@ -0,0 +1,71 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: PRISM Volume Renderer – Refactoring and bug fixing +category: VR/AR and Rendering +presenter_location: In-person + +key_investigators: + +- name: Andrey Titov + affiliation: ÉTS + country: Canada + +- name: Camille Hascoët + affiliation: ÉTS + country: Canada + +- name: Simon Drouin + affiliation: ÉTS + country: Canada + +--- + +# Project Description + + + +The goal of this project is to enable the development of advanced 3D rendering techniques in Slicer. The goal is to facilitate access to GPU shaders and enable GPU-based filtering in Slicer by improving shader access multipass rendering in VTK and Slicer. The [PRISM Module](https://github.com/ETS-vis-interactive/SlicerPRISMRendering) in Slicer will serve as a test environment for the new capabilities. + +PRISM has a significant amount of unused and/or legacy code that was made for version 4.11, which isn't used anymore. The goal of the project is to simplify PRISM volume renderer to make it easier to work with and to remove as many bugs as possible. + +## Objective + + + +1. Fixing the Outline shader +2. Closing Slicer after opening PRISM shouldn't generate errors + +## Approach and Plan + + + +1. Debug the code to see what is happening +2. Try to simplify the shader until something appears on the screen, and then add the code back + +## Progress and Next Steps + + + +1. The scale of the gradients calculated on the volume has been properly adjusted. +2. This gradient computation was necessary to make Edge Enhancement work. +3. Minor refactoring of the code. + +# Illustrations + + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/22062174/eef32864-83d5-4da8-a447-1d5b4f4b29f1) +![image](https://github.com/NA-MIC/ProjectWeek/assets/22062174/613897ec-e2a3-4345-b03e-423d64a0fe39) +![image](https://github.com/NA-MIC/ProjectWeek/assets/22062174/28a4c483-b9cd-467c-a182-33b87b1086d9) + + +# Background and References + + + + diff --git a/PW39_2023_Montreal/Projects/README.md b/PW39_2023_Montreal/Projects/README.md new file mode 100644 index 000000000..c927214a5 --- /dev/null +++ b/PW39_2023_Montreal/Projects/README.md @@ -0,0 +1,18 @@ +# How to create a new project + +- Post questions about the project idea and team on the [Project Week forum][forum], our communication mechanism as of PW28. +- When you are ready, add a new entry in the list of **Projects** by creating a new `README.md` file in subfolder in `Projects` folder, and copying contents of [project description template][project-description-template] file into it. Step-by-step instructions for this are: + +1. Open [project description template][project-description-template] and copy its full content to the clipboard + * If the link does not work (https issues) please try [here](https://github.com/NA-MIC/ProjectWeek/blob/master/PW39_2023_Montreal/Projects/Template/README.md) +3. Go back to [Projects](https://github.com/NA-MIC/ProjectWeek/tree/master/PW39_2023_Montreal/Projects) folder on GitHub +4. Click on "Create new file" button +5. Type `YourProjectName/README.md` + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** +6. Paste the previously copied content of project template page into your new `README.md` +7. Update at least your project's __title, category, key investigators, location, and project description sections__ +8. Create a [pull request](https://help.github.com/articles/creating-a-pull-request/) with the new page + + +[forum]: https://discourse.slicer.org/c/community/project-week +[project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW39_2023_Montreal/Projects/Template/README.md diff --git a/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/README.md b/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/README.md new file mode 100644 index 000000000..64ab1e18a --- /dev/null +++ b/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/README.md @@ -0,0 +1,73 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/README.html + +project_title: Rendering support for multiple views +category: VR/AR and Rendering +presenter_location: In-person + +key_investigators: + +- name: Sara Rolfe + affiliation: Seattle Children's Research Institute + country: USA + +- name: Murat Maga + affiliation: University of Washington + country: USA + +- name: Chi Zhang + affiliation: Seattle Children's Research Institute + country: USA + +--- + +# Project Description + + + +The goal of this project is to extend the Volume Rendering interface to improve the convenience of multiple volume comparisons. We aim to create and test prototypes of features that will be added to the SlicerMorph extension in the short term and discuss appropriateness of integration into Slicer core. + +## Objective + + + +Features to support multiple volume comparisons: + +## Approach and Plan + + + +1. Objective A: Create module to manage two relative views, manage nodes displayed/transformed in each +2. Objective B: Create prototype that links individual rendering properties of each volume in a folder. + +## Progress and Next Steps + + + +1. Created a Python function to link/unlink relative views. +2. Developed prototype for module to manage temporarily linked views: QuickAlign +3. Module name contributed by Andras Lasso +4. Testing use of QuickAlign +5. Added beta version as a [test module in the SlicerMorph extension](https://github.com/SlicerMorph/SlicerMorph/tree/master/QuickAlign) + +# Illustrations +Initial rendering of two fetal mouse scans: + +ViewSyncBefore + +After alignment and temporary view linking using ViewSync: + +ViewSyncAfter + + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/ViewSyncAfter.gif b/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/ViewSyncAfter.gif new file mode 100644 index 000000000..68c418e33 Binary files /dev/null and b/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/ViewSyncAfter.gif differ diff --git a/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/viewSync.png b/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/viewSync.png new file mode 100644 index 000000000..d04e63a40 Binary files /dev/null and b/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/viewSync.png differ diff --git a/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/viewSyncBefore.gif b/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/viewSyncBefore.gif new file mode 100644 index 000000000..2c0866e63 Binary files /dev/null and b/PW39_2023_Montreal/Projects/RenderingSupportForMultipleViews/viewSyncBefore.gif differ diff --git a/PW39_2023_Montreal/Projects/ShapeAXI/README.md b/PW39_2023_Montreal/Projects/ShapeAXI/README.md new file mode 100644 index 000000000..96d6d10ef --- /dev/null +++ b/PW39_2023_Montreal/Projects/ShapeAXI/README.md @@ -0,0 +1,159 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: ShapeAXI Shape Analysis Exploration and Interpretability +category: Quantification and Computation +presenter_location: In-person + +key_investigators: +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +- name: Nathan Hutin + affiliation: University of Michigan + +- name: Luc Anchling + affiliation: University of Michigan + country: France + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: Selene Barone + affiliation: University of Catanzaro + country: Italy + +- name: Jonas Bianchi + affiliation: University of Pacifique + country: USA + +- name: Marcela Gurgel + affiliation: University of Michigan + country: USA + +- name: Najla Al Turkestani + affiliation: University of Michigan + country: USA + +- name: Felicia Miranda + affiliation: University of Sao Paulo + country: Brezil + +- name: Denise Curado + affiliation: University of Michigan + country: USA + +- name: Kinjal Mavani + affiliation: University of Michigan + country: USA + +- name: Kinjal Mavani + affiliation: University of Michigan + country: USA + +- name: Margaret Eason + affiliation: University of Michigan + country: USA + +- name: Aron Aliage del Castilo + affiliation: University of Michigan + country: USA +--- + +# Project Description + + + +ShapeAXI is an innovative project that focuses on advancing the field of shape analysis, exploration, and interpretability through the application of artificial intelligence (AI) techniques. The project aims to develop novel algorithms and tools that can effectively analyze and interpret complex shapes, enabling deeper insights and understanding in various domains such as computer vision, computer graphics, and biomedical imaging. By harnessing the power of AI, ShapeAXI aims to revolutionize shape analysis by automating the process of shape exploration, identifying patterns, and extracting meaningful information. The project strives to enhance interpretability, enabling users to comprehend and interpret the underlying structure and characteristics of shapes with greater clarity. ShapeAXI holds the promise of unlocking new possibilities in shape-related research and applications, ultimately leading to advancements in fields such as object recognition, shape synthesis, and shape-based decision-making systems. + + +## Objective + + + +1. Develop advanced shape analysis algorithms: Design and develop cutting-edge algorithms that can efficiently analyze and process complex shapes, encompassing both 2D and 3D domains. These algorithms will leverage AI techniques such as deep learning, computer vision, and geometric modeling to provide accurate and robust shape analysis capabilities. + +2. Enable shape exploration and discovery: Create tools and techniques that allow users to explore and navigate through shape spaces effectively. By leveraging AI-driven approaches, the project will enable users to discover shape patterns, similarities, and differences, facilitating insights into shape characteristics and structures. + +3. Enhance shape interpretability: Develop methods to enhance the interpretability of shape analysis results, enabling users to understand and interpret the underlying meaning and significance of shape features. This includes visual explanations, feature attribution techniques, and intuitive representations to facilitate human comprehension of shape analysis outcomes. + +4. Foster cross-domain applicability: Ensure the developed shape analysis techniques and tools are applicable across various domains, such as computer vision, computer graphics, biomedical imaging, and manufacturing. The project will focus on creating adaptable and versatile solutions that can be effectively utilized in different application areas. + +5. Promote open-source collaboration: Foster a collaborative and open-source approach to encourage knowledge sharing and community involvement. The project will aim to release relevant software libraries, datasets, and benchmarks, allowing researchers and practitioners to build upon the developed tools and algorithms and advance the field collectively. + +6. Validate and benchmark performance: Conduct extensive validation experiments and comparative studies to assess the performance and efficacy of the developed algorithms and tools. This includes benchmarking against existing methods and datasets, ensuring the reliability and generalizability of the proposed shape analysis techniques. + + + +## Approach and Plan + + + +1. Develop algorithms for shape analysis, incorporating deep learning techniques, computer vision, and geometric modeling. +3. Create a user-friendly 3D Slicer extension with a software interfaces for creating the explainability maps on shapes after classification. +4. Enhance interpretability through techniques like visual explanations and feature attribution methods. +5. Validate the developed algorithms and tools through extensive experiments and benchmarking against existing methods. +6. Foster open-source collaboration by releasing software libraries, datasets, and benchmarks. +7. Document methodologies and findings for knowledge sharing and prepare technical papers and presentations. + + + +## Progress and Next Steps + + + +1. Develop the algorithms for shape analysis and classification. +2. Create heat maps using GradCAM and propagate them to the shapes + +# Illustrations + + + +# Results + +## Inputs + +|surf|class| +|---|---| +|/path/to/model.vtk|0| +|/path/to/model2.vtk|1| +|/path/to/model3.vtk|2| + +## Automated training - testing +![01 Final_Classificationfold0_test_prediction_norm_confusion](https://github.com/juanprietob/ProjectWeek/assets/7086191/0cf38ad9-0e34-4cb2-a7ed-fb4a7b146b1e) +![01 Final_Classificationfold0_test_prediction_roc](https://github.com/juanprietob/ProjectWeek/assets/7086191/7f9c650c-d561-4cb4-9dcd-2011cba8b95c) + +## Explainability + + + + + + +# Background and References + + + +1. https://github.com/DCBIA-OrthoLab/Fly-by-CNN.git +2. Selvaraju, Ramprasaath R., Abhishek Das, Ramakrishna Vedantam, Michael Cogswell, Devi Parikh, and Dhruv Batra. "Grad-CAM: Why did you say that?." arXiv preprint arXiv:1611.07450 (2016). diff --git a/PW39_2023_Montreal/Projects/SlicerCBM/README.md b/PW39_2023_Montreal/Projects/SlicerCBM/README.md new file mode 100644 index 000000000..794bf1303 --- /dev/null +++ b/PW39_2023_Montreal/Projects/SlicerCBM/README.md @@ -0,0 +1,135 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: SlicerCBM "Computational Biophysics for Medicine in 3D Slicer" +category: Quantification and Computation +presenter_location: In-person + +key_investigators: +- name: Ben Zwick + affiliation: The University of Western Australia + country: Australia + +- name: Saima Safdar + affiliation: The University of Western Australia + country: Australia + +- name: Andy Huynh + affiliation: The University of Western Australia + country: Australia +--- + +# Project Description + + + +SlicerCBM is an extension for 3D Slicer that provides tools for creating and solving computational models of biophysical systems and processes with a focus on clinical and biomedical applications. Features include grid generation, assignment of material properties and boundary conditions, and solvers for biomechanical modeling and biomechanics-based non-rigid image registration. + +## Objective + + + +1. Package SlicerCBM modules as an installable 3D Slicer extension. + +## Approach and Plan + + + +1. Complete the requirements for a new 3D Slicer extension () + +2. Add the SlicerCBM extension to the Slicer Extensions Catalog. + +## Progress and Next Steps + + + +1. Completed the [SlicerFreeSurferCommands](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/SlicerFreeSurferCommands/) project, which provides modules that are used in the SlicerCBM workflow. + +2. Discussed opportunities for integrating [SlicerCBM](https://slicercbm.org) with [NousNav](https://www.nousnav.org) for image-guided surgery simulations (both as a research tool and for potential clinical applications). + +3. Solved the EEG forward problem on the SPL brain atlas mesh created using the [SlicerAtlasEditor](https://github.com/andy9t7/SlicerAtlasEditor) from the [Open Meshed Anatomy](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/OpenMeshedAnatomy/) project. + +4. We will continue working on SlicerCBM next week at PerkLab (Queen's University). + +# Illustrations + + + +Flowchart of the patient-specific solution of the iEEG forward problem in deforming brain. Brain shift caused by implantation of electrodes is computed using the biomechanical model. The computed displacement field is used to transform the DTI to the postoperative configuration. This warped DTI is then used as the basis for creating the iEEG forward model. +![fig_flowchart-eeg](https://github.com/NA-MIC/ProjectWeek/assets/33216696/ef320477-0540-460f-8412-122977ef2641) + +Original (actual preoperative) and deformed (predicted postoperative) MR images compared with original CT image and electrode positions. Postoperative CT image and electrode positions (white spheres in CT and red points in the slice planes) are overlaid on the (a,b,c) MRI acquired preoperatively and (d,e,f) MRI registered to postoperative configuration of the brain obtained using biomechanics-based image warping. +![fig_mri_ct_elec_unwarped_and_warped](https://github.com/NA-MIC/ProjectWeek/assets/33216696/b860a491-94a2-4c1b-8a8a-c1e3d1bfb42d) + +Tissue label maps based on (a,b,c) original preoperative and (d,e,f) deformed by insertion of electrodes postoperative image data. Tissue classes are colored as follows: scalp (pink); skull (yellow); GM (gray); WM (white); and CSF (blue). The location of the electrode grid array can be identified by the line of black voxels in the vicinity of the right temporal and parietal lobes. +![fig_labelmaps](https://github.com/NA-MIC/ProjectWeek/assets/33216696/f41bc134-12e6-42e2-b776-901060c56915) + +Mean conductivity (1/3 tr(C)) for models constructed using (a,b,c) original preoperative and (d,e,f) deformed by insertion of electrodes postoperative image data. The ECoG electrode grid substrate is denoted by the purple outline. +![fig_cond_MC](https://github.com/NA-MIC/ProjectWeek/assets/33216696/bd2cff16-6ed8-4e5a-af2e-1155f11a8369) + +Streamlines of the electric field generated by a current dipole source located in the temporal lobe of an epilepsy patient. Finite element solution using a regular hexahedral grid implemented in MFEM. +![brain-electric-field](https://github.com/NA-MIC/ProjectWeek/assets/33216696/29a3fe30-4353-49bb-ae91-ec6225eba7f6) + +# Background and References + + + +Code repository and documentation: + +- +- + +Sample data: + +- Zwick BF, Safdar S, Bourantas GC, Joldes GR, Hyde DE, Warfield SK, + Wittek A, Miller K. Data for patient-specific solution of the + electrocorticography forward problem in deforming brain [Data + set]. Zenodo; 2022. + +Publications: + +- Zwick BF, Safdar S, Bourantas GC, Joldes GR, Hyde DE, Warfield SK, + Wittek A, Miller K. Image data and computational grids for + computing brain shift and solving the electrocorticography + forward problem. Data in Brief. 2023;48:109122. + + +- Safdar S, Zwick BF, Yu Y, Bourantas GC, Joldes GR, Warfield SK, + Hyde DE, Frisken S, Kapur T, Kikinis R, Golby A, Nabavi A, + Wittek A, Miller K. SlicerCBM: automatic framework for + biomechanical analysis of the brain. Int J CARS. 2023. + + +- Safdar S, Zwick BF, Bourantas G, Joldes GR, Warfield SK, Hyde DE, + Wittek A, Miller K. Automatic Framework for Patient-Specific + Biomechanical Computations of Organ Deformation: An Epilepsy (EEG) + Case Study. In: Nielsen PMF, Nash MP, Li X, Miller K, Wittek A, + editors. Computational Biomechanics for Medicine. Cham: Springer + International Publishing; 2022. p. 75–89. + + +- Zwick BF, Bourantas GC, Safdar S, Joldes GR, Hyde DE, Warfield SK, + Wittek A, Miller K. Patient-specific solution of the + electrocorticography forward problem in deforming + brain. NeuroImage. 2022;263:119649. + + +- Yu Y, Safdar S, Bourantas GC, Zwick BF, Joldes GR, Kapur T, Frisken + S, Kikinis R, Nabavi A, Golby A, Wittek A, Miller K. Automatic + framework for patient-specific modelling of tumour resection-induced + brain shift. Comput Biol Med. 2022;143:105271. + + +- Safdar S, Joldes GR, Zwick BF, Bourantas GC, Kikinis R, Wittek A, + Miller K. Automatic Framework for Patient-Specific Biomechanical + Computations of Organ Deformation. In: Miller K, Wittek A, Nash M, + Nielsen PMF, editors. Computational Biomechanics for Medicine. Cham: + Springer; 2021. p. 3–16. + diff --git a/PW39_2023_Montreal/Projects/SlicerFlatpak/README.md b/PW39_2023_Montreal/Projects/SlicerFlatpak/README.md new file mode 100644 index 000000000..f06d6605a --- /dev/null +++ b/PW39_2023_Montreal/Projects/SlicerFlatpak/README.md @@ -0,0 +1,76 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/SlicerFlatpak/README.html + +project_title: 'Slicer Flatpak' +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware, Inc. + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Sam Horvath + affiliation: Kitware, Inc. + country: USA + +--- + +# Project Description + +📄 Slicer Flatpak is a project focused on packaging the 3D Slicer software as a Flatpak. This initiative aims to offer an easy and universal way to install and run 3D Slicer on any Linux distribution that supports Flatpak. By doing this, it seeks to reduce installation complexities and improve compatibility across different systems. The distribution of 3D Slicer as a Flatpak has potential benefits. + +The convenience of having a 3D Slicer Flatpak has been long discussed in the 3D Slicer Discourse platform ([source](https://discourse.slicer.org/t/interest-to-create-flatpak-for-3d-slicer-have-issue-with-guisupportqtopengl-not-found/16532)). Soon after PW38, we started a renewed discussion on the topic and initiated efforts to make 3D Slicer Flatpak a reality. We have completed the first distribution of 3D Slicer as a Flatpak, making progress towards our objectives. + +## Objective + +🎯 The objective of this project is to: + +1. Consolidate the 3D Slicer Flatpak build infrastructure. +2. Resolve the issue with SimpleITK and enable its deployment along with 3D Slicer Flatpak. +3. Test and verify 3D Slicer extensions for compatibility with the Flatpak version. +4. Discuss and plan the integration and release strategy, including the possibility of submission to flathub. + +## Approach and Plan + +📝 Our approach to achieving the objectives is as follows: + +1. Continuously develop and refine the 3D Slicer Flatpak, addressing limitations and improving its functionality. +2. Work towards resolving the SimpleITK problem and ensure seamless deployment of SimpleITK along with 3D Slicer Flatpak. +3. Enable the use of the Slicer Extension Manager and explore options for deploying extensions, considering sandboxed environments and local deployments. +4. Plan a strategy for integration and release, including the submission of patches to the Slicer repository, maintaining the flatpak generator and repository under [RafaelPalomar/Slicer-Flatpak](https://github.com/RafaelPalomar/Slicer-Flatpak), and evaluating the possibility of integration with flathub. + +## Progress and Next Steps + +🚀 Here is an overview of our progress so far: + +1. ✅ Completed the first distribution of the 3D Slicer Flatpak, providing users with an initial version to test and provide feedback. A repository with documentation can be found at [https://github.com/RafaelPalomar/org.slicer.Slicer-flatpak-repository](https://github.com/RafaelPalomar/org.slicer.Slicer-flatpak-repository) +2. ❌ Currently, we are still working on resolving the SimpleITK problem and ensuring proper integration with 3D Slicer Flatpak. +3. ❌ Testing and verification of 3D Slicer extensions are ongoing. We are actively exploring ways to improve compatibility. +4. 📅 We recently met to plan a strategy for integration and release: + - Some patches will be submitted to the Slicer repository to enhance the flatpak generator and improve the Slicer CMake infrastructure. Reference: [RafaelPalomar/Slicer-Flatpak](https://github.com/RafaelPalomar/Slicer-Flatpak) + - For the time being, the flatpak generator, manifest, and flatpak repository will continue under [https://github.com/RafaelPalomar](https://github.com/RafaelPalomar). We need to evaluate the cost of maintenance and the impact of the package before considering a move. + - We will assess the maintenance effort and impact on the Slicer community before deciding on potential integration with flathub. + +# Background and References + +- Slicer Flatpak Repository: [RafaelPalomar/Slicer-Flatpak](https://github.com/RafaelPalomar/Slicer-Flatpak) +- Slicer Flatpak Manifest and Repository: [RafaelPalomar/org.slicer.Slicer-flatpak-repository](https://github.com/RafaelPalomar/org.slicer.Slicer-flatpak-repository) +- Flathub: [flathub.org](https://flathub.org) diff --git a/PW39_2023_Montreal/Projects/SlicerFreeSurferCommands/README.md b/PW39_2023_Montreal/Projects/SlicerFreeSurferCommands/README.md new file mode 100644 index 000000000..6f09e1a1e --- /dev/null +++ b/PW39_2023_Montreal/Projects/SlicerFreeSurferCommands/README.md @@ -0,0 +1,106 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Slicer FreeSurfer Commands +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: +- name: Ben Zwick + affiliation: The University of Western Australia + country: Australia + +- name: Andy Huynh + affiliation: The University of Western Australia + country: Australia + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA +--- + +# Project Description + + + +[SlicerFreeSurferCommands](https://github.com/SlicerCBM/SlicerFreeSurferCommands) aims to provide a graphical user interface for running [FreeSurfer](https://freesurfer.net) commands within [3D Slicer](https://www.slicer.org). + +For example: +- [mri_watershed - strip skull and other outer non-brain tissue](https://surfer.nmr.mgh.harvard.edu/fswiki/mri_watershed): + ``` + mri_watershed -brainsurf surface.vtk mri-t1.mgz stripped.mgz + ``` + Note that FreeSurfer uses mgz file format. + +- [SynthStrip: Skull-Stripping for Any Brain Image](https://surfer.nmr.mgh.harvard.edu/docs/synthstrip/) implemented as [FreeSurferSynthStripSkullStripScripted](https://github.com/SlicerCBM/SlicerFreeSurferCommands/tree/main/FreeSurferSynthStripSkullStripScripted) module: + ``` + mri_synthstrip -i input -o stripped -m mask --no-csf + ``` + +- [SynthSeg: Segmentation of brain MRI scans](https://surfer.nmr.mgh.harvard.edu/fswiki/SynthSeg) implemented as [FreeSurferSynthSeg](https://github.com/SlicerCBM/SlicerFreeSurferCommands/tree/main/FreeSurferSynthSeg) module: + ``` + mri_synthseg --i --o [--parc --robust --fast --vol --qc --post --resample --crop --threads --cpu --v1 --ct] + ``` + Note that SynthSeg is a [Python package](https://github.com/BBillot/SynthSeg) that can be installed without FreeSurfer. + +## Objective + + + +1. Complete the development of existing modules based on Slicer user and developer feedback. +2. Develop additional modules for other commands (e.g. SynthSeg). +3. Package modules as an installable 3D Slicer extension. + +## Approach and Plan + + + +1. Demonstrate and get feedback on the use and implementation of the existing modules from Slicer users and developers. +2. Discuss the implementation of the modules with Slicer developers (in particular the use of CLI vs scripted Python modules for this application). +3. Modify modules based on feedback from Slicer developers. +4. Complete the [new extension checklist](https://github.com/SlicerCBM/SlicerFreeSurferCommands/issues/1). + +## Progress and Next Steps + + + +### Progress + +1. Developed Python scripted module (and CLI module which is now deprecated) for FreeSurfer's SynthStrip command for skull stripping. +2. Developed Python scripted module for FreeSurfer's SynthSeg Brain MRI Segmentation command. +3. Completed the [new extension checklist](https://github.com/SlicerCBM/SlicerFreeSurferCommands/issues/1). + +### Next steps + +1. Discuss merging [SlicerFreeSurfer](https://github.com/PerkLab/SlicerFreeSurfer) and [SlicerFreeSurferCommands](https://github.com/SlicerCBM/SlicerFreeSurferCommands), and moving to Slicer GitHub organization. See: + - + - + +# Illustrations + + + +FreeSurfer SynthStrip Skull Strip +![FreeSurfer SynthStrip Skull Strip](https://raw.githubusercontent.com/SlicerCBM/SlicerFreeSurferCommands/d26913aa2ea18af71400e41aab09982b4daa7c77/Screenshot01.png) + +FreeSurfer SynthSeg Brain MRI Segmentation +![FreeSurfer SynthSeg Brain MRI Segmentation](https://raw.githubusercontent.com/SlicerCBM/SlicerFreeSurferCommands/2a855e9784e7a707448e9ea2a550b4f23007eab1/Screenshot02.png) + +# Background and References + + + +Software repository: + +FreeSurfer website: + +Similar extensions for 3D Slicer: +- [SlicerFreeSurfer](https://github.com/PerkLab/SlicerFreeSurfer) +- [SlicerNeuroSegmentation](https://github.com/HOA-2/SlicerNeuroSegmentation) diff --git a/PW39_2023_Montreal/Projects/SlicerIDCBrowser/README.md b/PW39_2023_Montreal/Projects/SlicerIDCBrowser/README.md new file mode 100644 index 000000000..de686278c --- /dev/null +++ b/PW39_2023_Montreal/Projects/SlicerIDCBrowser/README.md @@ -0,0 +1,71 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/SlicerIDCBrowser/README.html + +project_title: Slicer-IDCBrowser +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Bill Clifford + affiliation: Institute for Systems Biology + country: USA + +--- + +# Project Description + + + +[NCI Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/) is a cloud-based repository of publicly available cancer imaging data co-located with analysis and exploration tools and resources. Currently, to download images from IDC users need to use command-line `s5cmd` tool. Our objective is to develop an extension providing +user interface within the Slicer platform to allow browsing and download of images from IDC. + +## Objective + + +1. Start development of the extension using SlicerTCIABrowser as a template. +2. Release extension. + +## Approach and Plan + + + +1. Starting from the existing TCIABrowser extension, re-implement extension API to utilize IDC API instead of TCIA API to browse IDC content and retrieve collection/patient/study/series lists. +2. Update the UI of the extension to coordinate with the output of IDC API. +3. Compare performance of IDC API vs TCIA API. +4. Deploy `s5cmd` within the extension. + + +## Progress and Next Steps + +1. Developed initial version of the module: [https://github.com/fedorov/SlicerIDCBrowser](https://github.com/fedorov/SlicerIDCBrowser). Tested on mac with pre-installed s5cmd. Confirmed working functionality to browse collection/patient/study/series and download individual series. +2. Identified limitations of the [IDC API](https://learn.canceridc.dev/api/getting-started): insufficient documentation, missing features to retrieve necessary attributes at various levels of hierarchy (resulting in blank values for the content of the navigation table). Work on the refined API is underway. +3. Since IDC API is using BigQuery, there is noticeable latency during interaction when compared with TCIA API. Download of the images is perhaps faster. +4. Next steps: refine API and update UI once done, automate deployment of s5cmd, revisit the need for cache, refine UI, publish extension. + +# Illustrations + +![idcbrowser_pw39_small](https://github.com/NA-MIC/ProjectWeek/assets/313942/642dc4dc-c51d-40dd-8f44-60e89dde0ad3) + +# Background and References + + + +* [NCI Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/) +* [IDC data download instructions](https://learn.canceridc.dev/data/downloading-data) +* [SlicerTCIABrowser](https://github.com/QIICR/TCIABrowser) +* installation of system-specific libraries from Slicer module - need to do something like this for s5cmd: [https://github.com/Slicer/Slicer/blob/main/Modules/Scripted/ScreenCapture/ScreenCapture.py#L873](https://github.com/Slicer/Slicer/blob/main/Modules/Scripted/ScreenCapture/ScreenCapture.py#L873) +* detection of system configuration to select s5cmd binary: [https://doc.qt.io/qt-5/qsysinfo.html#productType](https://doc.qt.io/qt-5/qsysinfo.html#productType) diff --git a/PW39_2023_Montreal/Projects/SlicerLiver/README.md b/PW39_2023_Montreal/Projects/SlicerLiver/README.md new file mode 100644 index 000000000..85fa724ec --- /dev/null +++ b/PW39_2023_Montreal/Projects/SlicerLiver/README.md @@ -0,0 +1,97 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/SlicerLiver/README.html + +project_title: Slicer-Liver +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Gabriella D'Albenzio + affiliation: Oslo University Hospital + country: Norway + +- name: Ruoyan Meng + affiliation: NTNU + country: Norway + +- name: Ole V. Solberg + affiliation: SINTEF + country: Norway + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +--- + +# Project Description + + + +[Slicer-Liver](https://github.com/ALive-research/Slicer-Liver) is an advanced 3D Slicer extension developed for liver therapy planning. The extension currently offers essential features for liver resection planning and accurate computation of vascular territories. As part of an ongoing project, our aim is to further enhance the existing functionalities and introduce new tools for volumetry computation. Our objective is to provide a comprehensive and user-friendly solution for liver therapy planning within the Slicer platform. + +## Objective + + + +1. Advanced manipulation of deformable surfaces for resection planning. Our current solution for resection planning involves the deformation of Bezier surfaces in a 4x4 grid implemented by means of Slicer Markups (). We are planning to include advanced features such as coloring and grouping of markups for a more effective manipulation. +2. Volumetry computation. Planning of liver therapies largely relies on a volumetry analysis derived from the therapy plan. We are planning to include versatile tools for volume computations. +3. Release of Slicer-Liver 1.0. As Slicer-Liver is becoming an feature rich extension, we aim to release the latest developments achieved during this and the last Project Week in the extension manager (currently, the version released in the Extension Manager does not contain the latest advances). + +## Approach and Plan + + + +1. Discussion and find a strategy to improve our Markups-based resections interaction (Custom C++ markups vs. Python logic) +2. Implementation of the new features (new markups interaction and volumetric computation tools). +3. Testing of the new features and release of the new extension. + +## Progress and Next Steps + + + +1. Thanks to changes made by Sara Rolfe in the markup module (Seattle Children's Research Institute USA) we are now able to modify the interaction of Markups-based resections. +2. Integrated a new set of volumetric computation tools using the region growing method + +![liver-volumetry-resection0](https://github.com/NA-MIC/ProjectWeek/assets/75131750/91485aa5-b4ff-431f-933c-681cacaf54d1) + +![liver-volumetry-resection](https://github.com/NA-MIC/ProjectWeek/assets/75131750/388743ad-45ca-4c9c-bcd6-f72d2e14d7ca) + + + +# Illustrations + +

+ + + +

+ +

+ + + +

+ +resection + + +resectogram + + + +*No response* + +# Background and References + + + +* [Slicer-Liver on Project Week](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerLiver/) +* diff --git a/PW39_2023_Montreal/Projects/SlicerPipelinesV2/README.md b/PW39_2023_Montreal/Projects/SlicerPipelinesV2/README.md new file mode 100644 index 000000000..fbc6332a4 --- /dev/null +++ b/PW39_2023_Montreal/Projects/SlicerPipelinesV2/README.md @@ -0,0 +1,63 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Slicer Pipelines v2 +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Harald Scheirich + affiliation: Kitware, Inc. + country: United States + +--- + +# Project Description + + + +Slicer Pipelines is a framework to support the creation of workflows (Pipelines) inside of slicer. It allows users to attach a variety of slicer operations with pipeline support to each other and create a module that can then be executed on its own. Pipelines v2 is based on the work that Connor and others did with the ParameterWrapper. + +## Objective + + + +1. Adapt the PipelineCaseIterator to the new pipeline architecture + +## Approach and Plan + + + +1. Basic refactoring so that PipelineCaseIterator runs with a simple test case +2. Move from allowing single input directory to driving input through csv file +3. Adapt the output side of the case iterator to support multiple values +4. Write output data into csv file +5. Test with different pipelines + +## Progress and Next Steps + + + +1. Refactoring has been done, basic CaseIterator runs with test pipeline +2. CSV files can be read to drive input parameters +3. Name and store nodes for pipelines that produce multiple nodes +4. Write `results.csv` with scalar ouput values and result node paths + +# Illustrations + + + +*No response* + +# Background and References + + + +- Slicer Pipelines Module Repository: +- Project Week 36: +- Project Week 38: diff --git a/PW39_2023_Montreal/Projects/SlicerThemesExtension/README.md b/PW39_2023_Montreal/Projects/SlicerThemesExtension/README.md new file mode 100644 index 000000000..2f0a9299f --- /dev/null +++ b/PW39_2023_Montreal/Projects/SlicerThemesExtension/README.md @@ -0,0 +1,67 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: SlicerThemes Extension and Demo +category: Infrastructure +presenter_location: In-person + +key_investigators: +- name: Sam Horvath + affiliation: Kitware, Inc. + country: USA + + +--- + +# Project Description + + +[In the previous project week](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerQSS/) we worked on integrating the qt-material package into a Slicer module. + +This project is aimed on publishing the qt-material interface as an extension and tuning it for 3D Slicer and custom apps + +## Objective + + + +1. Publish the SlicerThemes extension. +1. Create a SOP for SlicerThemes compatible icon files +1. Use SlicerThemes to integrate styling in a custom app + +## Approach and Plan + + + +1. Finish all pending issues with SlicerThemes. + 1. Update UI to support arbitrary color lists + 1. Finalize Slicer Light/Dark themes + 1. Add better support for setting console colors + 1. Address icon issues +1. Publish Extension on index +1. Create a color scheme for [KBVTrainer](https://www.kitware.com/introducing-imstk-part2/) + +## Progress and Next Steps + + +1. Fixed up qt-material installation issues +1. Submitted [PR](https://github.com/Slicer/ExtensionsIndex/pull/1947) for adding to extension index + + +# Illustrations + + + +![Screenshot1](https://github.com/NA-MIC/ProjectWeek/assets/25040869/58bb575b-aad1-4cb3-a385-2f3e051ce4ef) + +# Background and References + +[SlicerThemes Repository](https://github.com/sjh26/SlicerThemes) + + diff --git a/PW39_2023_Montreal/Projects/SlicerVRInteraction/GUIWidgetInteraction_CustomWidget.mp4 b/PW39_2023_Montreal/Projects/SlicerVRInteraction/GUIWidgetInteraction_CustomWidget.mp4 new file mode 100644 index 000000000..981b93930 Binary files /dev/null and b/PW39_2023_Montreal/Projects/SlicerVRInteraction/GUIWidgetInteraction_CustomWidget.mp4 differ diff --git a/PW39_2023_Montreal/Projects/SlicerVRInteraction/GUIWidgetInteraction_DataModule.mp4 b/PW39_2023_Montreal/Projects/SlicerVRInteraction/GUIWidgetInteraction_DataModule.mp4 new file mode 100644 index 000000000..65dc1a1c1 Binary files /dev/null and b/PW39_2023_Montreal/Projects/SlicerVRInteraction/GUIWidgetInteraction_DataModule.mp4 differ diff --git a/PW39_2023_Montreal/Projects/SlicerVRInteraction/README.md b/PW39_2023_Montreal/Projects/SlicerVRInteraction/README.md new file mode 100644 index 000000000..784980e68 --- /dev/null +++ b/PW39_2023_Montreal/Projects/SlicerVRInteraction/README.md @@ -0,0 +1,119 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/SlicerVRInteraction/README.html +- /PW39_2023_Montreal/Projects/SlicerVRInteraction/Readme.html + +project_title: SlicerVR - Restore Interactions +category: VR/AR and Rendering +presenter_location: Remote + +key_investigators: +- name: Csaba Pintér + affiliation: EBATINCA + country: Spain + +- name: Simon Drouin + affiliation: ÉTS Montréal + country: Canada + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware, Inc. + country: USA + +- name: Andrey Titov + affiliation: ÉTS Montréal + country: Canada + +- name: Tina Nantenaina + affiliation: ÉTS Montreal + country: Canada + +- name: Lea Vong + affiliation: ÉTS Montréal + country: Canada + +- name: Lucas Gandel + affiliation: Kitware, Inc. + country: France +--- + +# Project Description + + + +The main controller interactions in SlicerVR have been broken for about a year, some interaction types even longer. It would be crucial for keeping SlicerVR usable to make the interactions work again. + +Kitware and Robarts (Jean-Christophe Fillion Robin, Lucas Gandel, Sankhesh Jhaveri, Adam Rankin) have been investing resources and effort in rehauling the AR/VR backend in VTK for a while, thus now we have a new OpenXR backend and restructured libraries SlicerVR is built on. The goal is to give a small push to their efforts in terms of SlicerVR interactions during the project week, towards restoring at least the previous feature set. + +## Objective + + + +In PW 37, basic interaction has been fixed. + +1. Fix the two-controller world move/zoom (i.e. 3D pinch) +2. Customization of controller buttons. Either via the + * Method in-place (functions integrated [here](https://github.com/KitwareMedical/SlicerVirtualReality/pull/87), see also [here](https://github.com/KitwareMedical/SlicerVirtualReality/pull/83)) + * Json manifest files (see [here](https://github.com/Kitware/VTK/tree/master/Rendering/OpenVR)) + +## Approach and Plan + + + +1. Set up a VR workstations at ETS to be able to test and develop +2. Fix two hands interactions +3. Implement custom interaction to test both customization methods + +## Progress and Next Steps + + + +1. Fixed 3D pinch interaction in commit [SlicerVirtualReality@49f1896d6](https://github.com/KitwareMedical/SlicerVirtualReality/commit/49f1896d652c6b27051cd41e8244b52cd28c2dab) +2. Rebased the GUI widgets branch into a [new branch](https://github.com/cpinter/SlicerVirtualReality/tree/gui-widget-20230612) +3. Fixed lookup of `vtk_openvr_actions.json` and `vtk_openvr_binding_*.json` files for both build and install tree. See commit [SlicerVirtualReality@a4d465b73](https://github.com/KitwareMedical/SlicerVirtualReality/commit/a4d465b7321a6cdd2e0c3aa85eb04899be471b17) integrated through [PR-117](https://github.com/KitwareMedical/SlicerVirtualReality/pull/117) +4. Make in-VR GUI widget work (with many workarounds and limitations) + +# Illustrations + + + + +Laser pointer interactions with widget existing in Slicer: + + + +Laser pointer interactions with custom widget: + + + +More comprehensive demonstration: + +[![More comprehensive demonstration](https://i9.ytimg.com/vi_webp/Ny5gmIFbhK4/mq1.webp?sqp=CJTDrKQG-oaymwEmCMACELQB8quKqQMa8AEB-AH-CYAC0AWKAgwIABABGEsgSyhlMA8=&rs=AOn4CLDerRLwDJQoa2buCxVCCoKyIv-glA)](https://youtu.be/Ny5gmIFbhK4) + +# Background and References + + + +![Class diagram SlicerVR vs VTK](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerVRInteractions/slicer-vr-class-diagram-2.png) + +Past project week pages +* [Project week #38 page](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerVRInteractions/) +* [Project week #37 page](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/SlicerVRInfrastructure) +* [Project week #35 page](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/SlicerVR/) +* [Project week #34 page](https://projectweek.na-mic.org/PW34_2020_Virtual/Projects/SlicerVR/) + +Pinter, C., Lasso, A., Choueib, S., Asselin, M., Fillion-Robin, J. C., Vimort, J. B., Martin, K., Jolley, M. A. & Fichtinger, G. (2020). SlicerVR for Medical Intervention Training and Planning in Immersive Virtual Reality. IEEE Transactions on Medical Robotics and Bionics, vol. 2, no. 2, pp. 108-117, May 2020, doi: 10.1109/TMRB.2020.2983199. diff --git a/PW39_2023_Montreal/Projects/Slicerros2/README.md b/PW39_2023_Montreal/Projects/Slicerros2/README.md new file mode 100644 index 000000000..03dc8222f --- /dev/null +++ b/PW39_2023_Montreal/Projects/Slicerros2/README.md @@ -0,0 +1,84 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: SlicerROS2 +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Junichi Tokuda + affiliation: Brigham and Women's Hospital + country: USA + +- name: Laura Connolly + affiliation: Queen's University + country: Canada + +- name: Anton Deguet + affiliation: Johns Hopkins University + country: USA + +- name: Arvind S. Kumar, + affiliation: Johns Hopkins University + country: USA + +--- + +# Project Description + + + +The goal of SlicerROS2 is to provide an open-source software platform for medical robotics research. Specifically, the project focuses on architectures to seamlessly integrate a robot system with medical image computing software using two popular open-source software packages: Robot Operating System (ROS) and 3D Slicer. + +## Objective + + + +1. Demo - Set up a live demo using SlicerROS2 and [myCobot](https://www.elephantrobotics.com/en/mycobot-en/). +2. Dissemination - Review and improve online documentation for rosmed.github.io +3. Plan - Discuss future directions and maintenance (other potential projects, integration into nightly build, etc). + +## Approach and Plan + + + +1. Set up a build environment on a laptop with Ubuntu 22.04 +2. Set up myCobot + +## Progress and Next Steps + + + +1. Set up ROS2 Humble Hawksbill on Ubuntu 22.04 [ROS2 Humble Installation on Ubuntu (Debian)](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html) +2. Built the SlicerROS2 estension [Slicer ROS2 Getting Started](https://slicer-ros2.readthedocs.io/en/latest/pages/getting-started.html) + - The Slicer ROS2 could not be compiled due to an issue in the CMakeLists.txt. It has been fixed and incorporated into the main repository ([pull request on GitHub](https://github.com/rosmed/slicer_ros2_module/pull/66) ) +4. Set up ROS interface for myCobot [Github repository]( https://github.com/elephantrobo,cs/mycobot_ros2 ) + - Change the name of the device file in line 14 in listen_real.py ('/dev/5yUSB0') to '/dev/5yACM0'. +5. Replace the robot model in the ros interface. +6. Launch the interface by: +~~~~ +ros2 launch mycobot_280 slider_control.launch.py +~~~~ + +# Illustrations + + + + + +[download video](https://github.com/NA-MIC/ProjectWeek/releases/download/project-week-resources/PW39_SlicerROS2_myCobot.mp4) + +# Background and References + + + +The National Institute of Biomedical Imaging and Bio-engineering of the U.S. National Institutes of Health (NIH) under award number R01EB020667, and 3R01EB020667-05S1 (MPI: Tokuda, Krieger, Leonard, and Fuge). The content is solely the responsibility of the authors and does not necessarily represent the official views of the NIH. + +The National Sciences and Engineering Research Council of Canada and the Canadian Institutes of Health Research, the Walter C. Sumner Memorial Award, the Mitacs Globalink Award and the Michael Smith Foreign Study Supplement. diff --git a/PW39_2023_Montreal/Projects/Systoleos/README.md b/PW39_2023_Montreal/Projects/Systoleos/README.md new file mode 100644 index 000000000..2669ed22e --- /dev/null +++ b/PW39_2023_Montreal/Projects/Systoleos/README.md @@ -0,0 +1,86 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: SystoleOS +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware, Inc. + country: USA + +--- + +# Project Description + + + +Over a span of more than ten years, 3D Slicer has paved the way for cutting-edge biomedical research. Its unprecedented success is pushing the frontiers of research, leading numerous research groups and corporations to recognize 3D Slicer as a credible software for designing medical devices. These devices not only have the potential to support routine clinical workflows but may also evolve into marketable products. Although 3D Slicer's development has been largely research-focused, its modular architecture fosters the creation of industrial prototypes. + +Systole OS envisions a harmonious integration of 3D Slicer and its associated software, such as the Plus Toolkit, MONAI Label, and others, within a freely accessible, open-source operating system based on GNU/Linux. This aims to facilitate the development and deployment of medical devices. + +![](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SystoleOS/systole.png) + +The following are key features we aim to leverage with Systole OS: + +## Objective + +1. **Updating Packages:** We are planning to ensure the timely update and maintenance of existing packages, targeting specifically the release Slicer-5.3.0. + +2. **Integration and Testing Infrastructure:** Develop a robust infrastructure that supports seamless integration and rigorous testing to maintain the highest quality standards. + +3. **Generation of Containers and VMs:** Establish a systematic approach for generating containers and Virtual Machines (VMs) that can effectively support both development and testing processes. + +## Approach and Plan + + + +1. **Package Assessment:** Review the status of existing packages and identify necessary updates for the release Slicer-5.3.0. + +2. **Update Planning:** Develop a plan and timeline for implementing the necessary updates. + +3. **Update Implementation:** Carry out the plan to update packages in line with the established timeline. + +4. **Kubernetes Infrastructure Setup:** Begin the process of setting up a Kubernetes-based infrastructure to support our integration and testing needs. + +5. **Testing Protocol Development:** With the Kubernetes infrastructure ready, establish systematic protocols for integration and testing to ensure high quality standards. + +6. **Container and VM Generation:** Implement a systematic approach for creating containers and Virtual Machines (VMs) for development and testing, ensuring this approach is scalable as needed. + +## Progress and Next Steps + +🚀 Here is an overview of our progress so far: + +1. 🔜 We have partially completed a Kubernetes infrastructure to build containers and virtual machines with pre-installed Systole. This infrastructure is based on [ArgoCD](https://argoproj.github.io/argo-cd/), [ArgoWorkflow](https://argoproj.github.io/argo-workflows/), and [Bitnami Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets). +2. We are currently in the planning phase for updating the packages to target the release Slicer-5.3.0. +3. We are continuing the setup process for the Kubernetes infrastructure to support integration and testing needs. + +## Next Steps + +Moving forward, our next steps include: + +1. Implementing the necessary updates to the packages in line with our plan and timeline. +2. Finalizing the setup of the Kubernetes infrastructure for building containers and virtual machines. +3. Initiating testing and production of SystoleOS containers and virtual machines for use by other researchers. + +## Illustrations + +*No response* + +# Background and References + +- [SystoleOS Gentoo Overlay Repository](https://github.com/SystoleOS/gentoo-overlay): Repository for the SystoleOS Gentoo overlay. +- [SystoleOS Infrastructure Repository](https://github.com/SystoleOS/infrastructure): Repository for the SystoleOS infrastructure project. +- [SystoleOS Workflows Repository](https://github.com/SystoleOS/workflows): Repository for the SystoleOS workflows project. diff --git a/PW39_2023_Montreal/Projects/Template/README.md b/PW39_2023_Montreal/Projects/Template/README.md new file mode 100644 index 000000000..a9f4de6bc --- /dev/null +++ b/PW39_2023_Montreal/Projects/Template/README.md @@ -0,0 +1,58 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Write full project title here +category: Uncategorized +presenter_location: Online + +key_investigators: +- name: Person Doe + affiliation: University + +- name: Person2 Doe2 + affiliation: University2 + country: Spain +--- + +# Project Description + + + +## Objective + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW39_2023_Montreal/Projects/Template/README.md.j2 b/PW39_2023_Montreal/Projects/Template/README.md.j2 new file mode 100644 index 000000000..f499b8f4a --- /dev/null +++ b/PW39_2023_Montreal/Projects/Template/README.md.j2 @@ -0,0 +1,56 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: {{ title | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +category: {{ category | regex_replace("\n$|\n\.\.\.\n$", "") }} +presenter_location: {{ presenter_location | regex_replace("\n$|\n\.\.\.\n$", "") }} + +key_investigators: +{% for investigator in investigators %} +- name: {{ investigator.name | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} + affiliation: {{ investigator.affiliation | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- if investigator.country %} + country: {{ investigator.country | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- endif %} +{% endfor %} +--- + +# Project Description + + + +{{ description }} + +## Objective + + + +{{ objective }} + +## Approach and Plan + + + +{{ approach }} + +## Progress and Next Steps + + + +{{ progress }} + +# Illustrations + + + +{{ illustrations }} + +# Background and References + + + +{{ background }} diff --git a/PW39_2023_Montreal/Projects/TrackedUltrasoundIntegrationIntoNousnavALowCostNeuronavigationSystem/README.md b/PW39_2023_Montreal/Projects/TrackedUltrasoundIntegrationIntoNousnavALowCostNeuronavigationSystem/README.md new file mode 100644 index 000000000..47821a90d --- /dev/null +++ b/PW39_2023_Montreal/Projects/TrackedUltrasoundIntegrationIntoNousnavALowCostNeuronavigationSystem/README.md @@ -0,0 +1,92 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/TrackedUltrasoundIntegrationIntoNousnavALowCostNeuronavigationSystem/README.html + +project_title: Tracked ultrasound integration into NousNav, a low-cost neuronavigation system +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Colton Barr + affiliation: Queen's University / Brigham and Women's Hospital + country: Canada + +- name: Sarah Frisken + affiliation: Brigham and Women's Hospital + country: USA + +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: USA + +- name: Tamas Ungi + affiliation: Queen's University + country: Canada + +- name: Sam Horvath + affiliation: Kitware, Inc. + country: USA + +--- + +# Project Description + + + +NousNav is an ongoing project led by Dr. Alex Golby at Brigham and Women's Hospital to build and disseminate a low-cost neuronavigation system. Built as a 3D Slicer Custom App, NousNav uses low cost optical tracking (Optitrack Duo) in combination with custom optically-tracked tools and reference arrays to facilitate patient registration, procedure planning, and navigation. + +The system is being continually updated based on user feedback. An important next step in development is the integration of tracked ultrasound data. + +## Objective + + + +1. Gather user feedback on the current iteration of the system and establish potential next steps for development. +2. Discuss approaches for integrating tracked ultrasound data into the navigation workflow. +3. Create a NousNav prototype that includes tracked ultrasound. + +## Approach and Plan + + + +1. Setup demo of NousNav system for participants to try and systematically collect user feedback. +2. Collaborate with colleagues working on tracked neurosurgical ultrasound to establish best practices for integrating ultrasound into the system +3. Create custom build of NousNav with basic tracked ultrasound workflow elements integrated. + +## Progress and Next Steps + +1. Streamed imaging data from Telemed on Windows 11 via PLUS. +2. Performed ultrasound calibration using 3D printed tracking cluster designed and printed by Tamas. +3. (Almost) visualized tracked ultrasound images of registered skull phantom within NousNav. + + +*No response* + +# Illustrations + +![NousNavScreenshot_3](https://github.com/NA-MIC/ProjectWeek/assets/25553662/c1c07bc7-db48-4b28-9cbf-ecf2de631ce9) + +![NousNavScreenshot_4](https://github.com/NA-MIC/ProjectWeek/assets/25553662/09124ad8-94ec-4ec6-be50-86dae4d29b7b) + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW39_2023_Montreal/Projects/TrainingAiAlgorithmsOnIdcData/README.md b/PW39_2023_Montreal/Projects/TrainingAiAlgorithmsOnIdcData/README.md new file mode 100644 index 000000000..95023c719 --- /dev/null +++ b/PW39_2023_Montreal/Projects/TrainingAiAlgorithmsOnIdcData/README.md @@ -0,0 +1,69 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/TrainingAiAlgorithmsOnIdcData/README.html + +project_title: Training AI algorithms on IDC data +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Cosmin Ciausu + affiliation: Brigham and Women's Hospital + country: USA + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + +[Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/) provides publicly available cancer imaging data. + +Previous works([IDC Prostate segmentation](https://github.com/ImagingDataCommons/idc-prostate-mri-analysis)) ([NLST-Body Part Regression](https://github.com/ImagingDataCommons/IDC-Tutorials/blob/master/notebooks/body_part_regression_with_structured_reports.ipynb)) demonstrated through several use cases inference and analysis of AI algorithms on IDC data. +Downloading IDC data, conversion between file imaging standards, cloud environment setup and imaging pre-processing steps were studied through these inference and analysis use cases. + +During this project week, our goal is to develop use cases of training AI algorithms on IDC data. We welcome any Project Week participants that are interested in leveraging IDC data for training AI algorithms(or evaluation) to collaborate with us! + +## Objective + + + +1. Leverage IDC data for SOTA segmentation algorithm(nnUNet, MONAI) +2. Collaborate with other members to study the feasibility of using IDC data for training AI algorithms. + +## Approach and Plan + + + +1. Using nnUNet segmentation framework for prostate segmentation on IDC data([Prostatex/QIN collection](https://portal.imaging.datacommons.cancer.gov/explore/filters/?collection_id=Community\&collection_id=QIN\&collection_id=prostate_mri_us_biopsy\&collection_id=prostatex\&collection_id=qin_prostate_repeatability)) for training purposes. +2. Expand AI training use cases beyond SOTA algorithms. + +## Progress and Next Steps + + + +1. Leverage information gained by applying inference using nnUNet prostate segmentation on several prostate imaging collections, for training pipelines. +2. Creation of whole prostate IDC training cohort: 45 T2W MRI scans and corresponding expert whole prostate annotations were used. +3. Creation of Google Colab use case showing how to build this cohort and begin a nnUNet training experiment. + +# Illustrations + + +![WORKFLOW](https://github.com/ccosmin97/ProjectWeek/assets/72577931/d9627a49-b6ca-4216-a564-4118c3b61e14) + +# Background and References + + +* [Google Colab use case for IDC training cohort and nnUNet training](https://colab.research.google.com/drive/1TmmhouNGeQ-DpGz2z83yiZh3KQ20a-1M?usp=sharing). +* [PW37_2022_Virtual -- nnUnet - Prostate segmentation on Imaging Data Commons(IDC) data](https://github.com/NA-MIC/ProjectWeek/tree/master/PW37_2022_Virtual/Projects/IDCProstateSegmentation) +* [AI Imaging analysis on IDC data](https://github.com/ImagingDataCommons/IDC-Tutorials/tree/master/notebooks#imaging-analysis-ai) diff --git a/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/README.md b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/README.md new file mode 100644 index 000000000..c9ccc43f8 --- /dev/null +++ b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/README.md @@ -0,0 +1,79 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/README.html + +project_title: Translation/rotation of select points in a list +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Sara Rolfe + affiliation: Seattle Children's Research Institute + country: USA + +- name: Murat Maga + affiliation: University of Washington + country: USA + +- name: Gabriella D'Albenzio + affiliation: Oslo University Hospital + country: Norway + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +--- + +# Project Description + + + +The goal of this project is to facilitate selection and independent manipulation of points in a list. + +This can currently be done in the Markups module by copying the points to a new list, translating/rotating the points, and copying the point positions back to the original node. However this process is tedious and error-prone. + +The initial motivation for this project was to simplify creation of synthetic data from landmark transforms by transforming an original set of landmarks into the target landmark set. + +## Objective + + + +1. Discuss overlapping goals between related projects (SlicerMorph, Slicer-Liver) +2. Develop strategy for implementation + +## Approach and Plan + + + +Two possible solutions have been discussed for the implementation: +1. Add functions to Markups Editor module in the SlicerMorph extension +2. Add to Slicer core in the Markups module. Currently the interaction handles are disabled if any point is locked. This can be modified so that locked points remain fixed and unlocked points move when using a markup's interaction handles. This solution will not allow scaling since this is not currently supported for most Markup types. + +## Progress and Next Steps + + + +1. Met to discuss use cases and overlap in needs between SlicerMorph and SlicerLiver Groups +2. Built working prototype of changes to the Markups module in 3D Slicer that meets the needs of both groups +3. Testing by remote team members identified bug in the movement of the center of rotation/translation +4. Identified and addressed issues with unplaced points and position of interaction handles with Kyle Sunderland and Andras Lasso +5. Submitted a [pull request](https://github.com/Slicer/Slicer/pull/7025) adding this new function to the Markups module. + +# Illustrations +pointManipulation1 + +pointManipulation2 + + +# Background and References + + + +1. Forum post [here](https://discourse.slicer.org/t/moving-a-subset-of-points-in-a-list/29198) discussing the issue and possible solutions. diff --git a/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/TranslatePoints1.png b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/TranslatePoints1.png new file mode 100644 index 000000000..364c7793e Binary files /dev/null and b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/TranslatePoints1.png differ diff --git a/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/TranslatePoints2.png b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/TranslatePoints2.png new file mode 100644 index 000000000..c47d64172 Binary files /dev/null and b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/TranslatePoints2.png differ diff --git a/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/pointManipulation1.gif b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/pointManipulation1.gif new file mode 100644 index 000000000..c4e38f62d Binary files /dev/null and b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/pointManipulation1.gif differ diff --git a/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/pointManipulation2.gif b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/pointManipulation2.gif new file mode 100644 index 000000000..6437b3c92 Binary files /dev/null and b/PW39_2023_Montreal/Projects/TranslationRotationOfSelectPointsInAList/pointManipulation2.gif differ diff --git a/PW39_2023_Montreal/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWholeSlideImages/README.md b/PW39_2023_Montreal/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWholeSlideImages/README.md new file mode 100644 index 000000000..f48de45dd --- /dev/null +++ b/PW39_2023_Montreal/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWholeSlideImages/README.md @@ -0,0 +1,98 @@ +--- +layout: pw39-project + +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWholeSlideImages/README.html + +project_title: Tutorials on working with DICOM annotations in pathology whole-slide images +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Chris Bridge + affiliation: MGH + country: USA + +- name: David Clunie + affiliation: PixelMed + country: USA + +- name: Curtis Lisle + affiliation: KnowledgeVis + country: USA + +- name: Maximillian Fischer + affiliation: DKFZ + country: Germany + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +--- + +# Project Description + + + +This project aims to create tutorials on how to work with DICOM annotations in pathology whole-slide images (WSIs). +We will focus on nuclei annotations stored as DICOM Microscopy Bulk Simple Annotations and compute nuclei density (cellularity) on tile-level from them. The computed cellularity values are then stored as DICOM parametric maps. + +## Objective + + + +1. Objective A: Have a Colaboratory notebook ready that at least reads DICOM Microscopy Bulk Simple Annotation files (currently from a Google Storage bucket, ideally later from the IDC directly) and computes cellularity values. +2. Objective B: Encode computed cellularity values as DICOM parametric map that can be stored back to the IDC. + +## Approach and Plan + + + +1. Testing and documentation of the current capabilities + * Establish a repository of test samples that will contain standard-compliant examples of bulk annotations (different number of points, 2D vs 3DSCOORD), segmentations (binary and fractional) and parametric maps (floating point and integer). + * Share code samples that were used to generate the examples above + * Test different combinations of Slim viewer, DICOMweb backend (Google, dcm4chee, orthanc) and test samples to understand what is supported by various components (i.e., perhaps only 3DSCOORD is supported by Slim, there may be limits on maximum size of SQ in Google Healthcare) +2. Development of the tutorial + * Investigate nuclei annotations for plausibility + * Read nuclei annotations + * Efficiently compute cellularity values + * Encode cellularity values as DICOM parametric maps + +## Progress and Next Steps + +1. We created different DICOM stores in Google (re-used the Google Cloud platform (GCP) project idc-external-031): + * Single 2DSCOORD bulk annotation file and corresponding WSI: [DICOMweb endpoint](https://healthcare.googleapis.com/v1/projects/idc-external-031/locations/us-central1/datasets/single-dicom-annotation-test/dicomStores/single-dicom-annotation-test-store/dicomWeb) + * 2D vs. 3D and point vs. polygon bulk annotation files and corresponding WSI: [DICOMweb endpoint](https://healthcare.googleapis.com/v1/projects/idc-external-031/locations/us-central1/datasets/2d_3d_point_polygon_annotation_test/dicomStores/2d_3d_point_polygon_annotation_test_store/dicomWeb) + * Different sizes of DICOM bulk annotation files: [DICOMweb endpoint](https://healthcare.googleapis.com/v1/projects/idc-external-031/locations/us-central1/datasets/diff-sizes-dicom-annotations-test/dicomStores/diff-sizes-dicom-annotations-test-store/dicomWeb) + * Single binary segmentation plus simple bulk annotation file: [DICOMweb endpoint](https://healthcare.googleapis.com/v1/projects/idc-external-031/locations/us-central1/datasets/segmentations-binary-test/dicomStores/segmentations-binary-test-store/dicomWeb) + * Single binary segmentation file produced by Curt Lisle in the context of his project: [DICOMweb endpoint](https://healthcare.googleapis.com/v1/projects/idc-external-031/locations/us-central1/datasets/segmentation-binary-curt-test/dicomStores/segmentation-binary-curt-test-store/dicomWeb) + * Single fractional segmentation plus simple bulk annotation file: [DICOMweb endpoint](https://healthcare.googleapis.com/v1/projects/idc-external-031/locations/us-central1/datasets/segmentations-fractional-test/dicomStores/segmentations-fractional-test-store/dicomWeb) + * Parametric maps: [DICOMweb endpoint](https://healthcare.googleapis.com/v1/projects/idc-external-031/locations/northamerica-northeast1/datasets/pw39-samples/dicomStores/parametric-maps/dicomWeb) + * Working example from Chris: binary segmentation plus simple bulk annotation file: [DICOMweb endpoint](https://healthcare.googleapis.com/v1/projects/idc-external-031/locations/us-central1/datasets/working-binary-segmentation-test/dicomStores/working-binary-segmentation-test-store/dicomWeb) +2. We tested and documented current capabilities of reading and visualizing annotations + * Results are summarized [here](https://docs.google.com/document/d/1FWSHL5GA47GC-bbrYOhqySGmKoQ0yLu7EM7-UbhYcSY/edit?usp=sharing) +3. We implemented code for cellularity computation and prepared a Colab notebook on which further work will be done following the project week. + +# Illustrations + + + +![Overview DICOM annotations: Segmentation IOD, Microscopy Bulk Simple Annotations IOD. Taken from https://doi.org/10.1038/s41467-023-37224-2.](./overview_annotations.png) + +# Background and References + + + +* Github page [Slim viewer](https://github.com/ImagingDataCommons/slim) +* Slim viewer instance provided by Andrey Fedorov: [slim-viewer-andrey](andrey-slim-test.web.app) +* [Slim deployment tutorial](https://docs.google.com/document/d/1857jb_wKHqyGOd49UirujDDrFE8fUPfimZPXJ19zSF4/edit?usp=sharing) - seek feedback from Max and Curt +* WIP Code under development by Chris Bridge to convert annotations from to DICOM ANN/SEG: [Github repository](https://github.com/ImagingDataCommons/idc-pan-cancer-annotations-conversion/) +* Some example [Parametric DICOM Map](https://console.cloud.google.com/storage/browser/pw39-parametric-map;tab=objects?project=idc-external-031&prefix=&forceOnObjectsSortingFiltering=false) and the [conversion code as Docker container](https://github.com/maxfscher/DICOMwsiWorkflow/tree/main) from Max Fischer. diff --git a/PW39_2023_Montreal/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWholeSlideImages/overview_annotations.png b/PW39_2023_Montreal/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWholeSlideImages/overview_annotations.png new file mode 100644 index 000000000..9fd0806a8 Binary files /dev/null and b/PW39_2023_Montreal/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWholeSlideImages/overview_annotations.png differ diff --git a/PW39_2023_Montreal/Projects/UndoRedo/README.md b/PW39_2023_Montreal/Projects/UndoRedo/README.md new file mode 100644 index 000000000..2b2a4e5bb --- /dev/null +++ b/PW39_2023_Montreal/Projects/UndoRedo/README.md @@ -0,0 +1,96 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: 3D Slicer Undo/Redo +category: Infrastructure +presenter_location: In-person + +key_investigators: +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada +--- + +# Project Description + +Global undo/redo is currently [the highest voted feature](https://discourse.slicer.org/t/is-it-possible-to-add-a-global-undo-button/16859) on the [3D Slicer Discourse feature requests board](https://discourse.slicer.org/c/support/feature-requests/9). + +## Objective + + + +1. Gather feedback on the current state of Undo/Redo in Slicer. + +## Approach and Plan + + + +To test the undo/redo functionality currently available in Slicer, add the following code to '.slicerrc' to test undo/redo with Markups: + +```python +slicer.mrmlScene.SetUndoOn() + +undoEnabledNodeClassNames = [ + "vtkMRMLMarkupsFiducialNode", + "vtkMRMLMarkupsLineNode", + "vtkMRMLMarkupsAngleNode", + "vtkMRMLMarkupsCurveNode", + "vtkMRMLMarkupsClosedCurveNode", + "vtkMRMLMarkupsPlaneNode", + "vtkMRMLMarkupsROINode", + ] +for className in undoEnabledNodeClassNames: + node = slicer.mrmlScene.CreateNodeByClass(className) + node.SetUndoEnabled(True) + slicer.mrmlScene.AddDefaultNode(node) + +def onRedo(): + slicer.mrmlScene.Redo() + +def onUndo(): + slicer.mrmlScene.Undo() + +redoShortcuts = [] +redoKeyBindings = qt.QKeySequence.keyBindings(qt.QKeySequence.Redo) +for redoBinding in redoKeyBindings: + redoShortcut = qt.QShortcut(slicer.util.mainWindow()) + redoShortcut.setKey(redoBinding) + redoShortcut.connect("activated()", onRedo) + redoShortcuts.append(redoShortcut) + +undoShortcuts = [] +undoKeyBindings = qt.QKeySequence.keyBindings(qt.QKeySequence.Undo) +for undoBinding in undoKeyBindings: + undoShortcut = qt.QShortcut(slicer.util.mainWindow()) + undoShortcut.setKey(undoBinding) + undoShortcut.connect("activated()", onUndo) + undoShortcuts.append(undoShortcut) + +toolBar = qt.QToolBar("Undo/Redo") +toolBar.addAction(qt.QIcon(":/Icons/Medium/SlicerUndo.png"), "Undo", onUndo) +toolBar.addAction(qt.QIcon(":/Icons/Medium/SlicerRedo.png"), "Redo", onRedo) +slicer.util.mainWindow().addToolBar(toolBar) +``` + +## Progress and Next Steps + + + +1. Fixed issue with camera movements creating unnecessary undo states: [Slicer/5e460ad](https://github.com/Slicer/Slicer/commit/5e460add5b9163fb2f80e33037624c97f5b4d7f4). +2. Continue to receive feedback and bug reports on the current implementation. +3. Add option to enable/disable undo from application settings in Slicer. + +# Illustrations + + +![undo_redo](https://github.com/NA-MIC/ProjectWeek/assets/9222709/13bc6fc2-c93d-41b0-b25c-c24b996c867d) + +# Background and References + +- [Slicer global undo feature request](https://discourse.slicer.org/t/is-it-possible-to-add-a-global-undo-button/16859) diff --git a/PW39_2023_Montreal/Projects/UsingLargeLanguageAiModelsToInvokeSlicerModulesAndWorkflows/README.md b/PW39_2023_Montreal/Projects/UsingLargeLanguageAiModelsToInvokeSlicerModulesAndWorkflows/README.md new file mode 100644 index 000000000..d0e488376 --- /dev/null +++ b/PW39_2023_Montreal/Projects/UsingLargeLanguageAiModelsToInvokeSlicerModulesAndWorkflows/README.md @@ -0,0 +1,86 @@ +--- +layout: pw39-project + +permalink: /:path/ + +project_title: Using large language AI models to invoke Slicer modules and workflows +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Curtis Lisle + affiliation: KnowledgeVis + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Andrey Federov + affiliation: Brigham and Women's Hospital + country: USA + +- name: Justin Johnson + affiliation: Brigham and Women's Hospital + country: USA + +- name: Theodore Aptekarev + affiliation: Slicer Community + country: Montenegro + +- name: Rudolf Bumm + affiliation: Kantonsspital Graubünden + country: Switzerland + +--- + +# Project Description + +3D Slicer is built with a powerful core to load, transform, and store manage medical images and derived datasets. Slicer has a catalog of loadable extensions that assist with or automate task-specific workflows. Slicer's web API provides remote access to invoke many of the processing steps which are available through its very complete user interface. + +The recent explosion of generative LLMs (large language models) from the AI community has demonstrated that these language models can, in some cases, translate task or problem descriptions into sequences of operations. Wouldn't it be powerful if 3D Slicer could be verbally instructed to perform operations or process datasets as requested? Theoretically, an embedded LLM could be trained on Slicer's modules, including under what circumstances the modules could be applied to transform a Slicer scene as needed to solve a problem presented by the user. + +As one example, in Operating Theaters during surgical procedures, the Slicer user interface is hard or impossible to access due to sterility restrictions and other factors. It would be helpful if clinicians could control Slicer's functions through an alternative method than the interactive user interface. For example, "Let me see the lung lesions more clearly" could be translated into increased transparency of the lung segmentation and an orientation repositioning to make a lesion segmentation visible in-situ. + +## Objective + +The goal of this project proposal is to schedule a meeting during Project Week to discuss this idea, assess the level of interest in the Slicer community, discuss early technical approaches, and decide who might be interested in working together to seek funding or pursue this together. Both clinicians with a problem to solve and AI technicians are invited to participate. + +## Approach and Plan + +* Schedule a meeting of interested parties during PW39 +* Discuss applicable existing open-source tools +* Access the value to the community and define a plan to continue if this idea has merit +* Possibly experiment with a proof of concept to connect to Slicer's API + +## Progress and Next Steps + +* There is already work underway in our community with large language models. Some of this work was demonstrated during our meeting this week. +* We researched several open-source LLM repositories that allow connection to external APIs (Application Programming Interfaces), such as what Slicer has with the web interface +* A productive meeting was held this week. Rudolf showed how Slicer documentation could be used to fine tune a LLM as a question-answering system (see attached notebook shared on Slicer discourse). Justin discussed the work on LLMs for IDC Query. We discussed training LLMs on documentation will be easier in the short term, since changes in the Slicer state (change in the MRML tree after running a model) is hard to represent semantically. Theodore demonstrated how the browser extension of GPT4 gives better answers than the base model. +* We achieved our goal of an initial meeting. During the meeting we discussed the "nearer term" goals of training an LLM to answer questions about 3D Slicer. General invocation of Slicer modules in a workflow is farther off yet, since it is hard to define the semantics of Slicer modules. Therefore, the LLM wouldn't be able to reliably learn how to compose a workflow of Slicer API calls. +* Thanks for everyone's contributions to this brainstorming session. The interactive demonstrations were appreciated. + + +# Illustrations +The technology to train and inference large language models is changing. Formerly, all data was in the permanent training set and used to directly train internal model weights. Now documents are vectorized and used for searching during the inference process, as illustrated below. LLMs are developing to have APIs that can be used to incorporate these run-time searchable documents +![LLMTechStack](https://data.kitware.com/api/v1/file/648af6ae488633cbb1275d6a/download) + +In addition to this trend, some LLMs (Gorilla, for example)) are designed to be able to search pre-processed API descriptions and then invoke external APIs during the inferencing step. The photo below shows how external APIs to convert speech to text, generate images, etc. are available for the LLM to use during inferencing: + +![GorillaUsesAPIs](https://data.kitware.com/api/v1/file/648af695488633cbb1275d67/download) + + +# Background and References + +Hugging Face has a new API called "Agents" that is designed to use tools according to their descriptions of the I/O they handle. The Agent API puts together a workflow of tools to accomplish the users request. This is not exactly what I was thinking, as there are issues related to how to identify and return a changed MRML scene, but it inspired my thinking somewhat: . + +**Gorilla:** Work that seems more directly towards a way to invoke Slicer modules via API is Gorilla, a LLAMA model fine-tuned to invoke external APIs to accomplish a requested task: . I just started reading the paper referenced on the repository site. + +Thoughts on how the advent of LLMs can change our interface to complex software: + +Early work in the commmunity to automatically generate Imaging Data Commons BigQueries from free text prommpts: . This solution exhibits some hallucination, which has been addressed by the work Justin is engaged with. + +**Using Slicer documentation as searchable content for GPT:** A link to Rudolf's ipython notebook that he presented during our meeting. Slicer docs were vectorized and used by the Langchain tools to assist GPT to provide better answers: +https://discourse.slicer.org/t/langchain-query-the-complete-3d-slicer-documentation-script-repository-and-faq-pdf-and-html-with-openai-llm/28746/6 diff --git a/PW39_2023_Montreal/README.md b/PW39_2023_Montreal/README.md new file mode 100644 index 000000000..d2a62f4bd --- /dev/null +++ b/PW39_2023_Montreal/README.md @@ -0,0 +1,316 @@ +--- +permalink: /:path/ +redirect_from: +- /PW39_2023_Montreal/README.html +- /PW39_2023_Montreal/Readme.html + +project_categories: +- Early Presenter +- VR/AR and Rendering +- IGT and Training +- Segmentation / Classification / Landmarking +- Quantification and Computation +- Cloud / Web +- Infrastructure +--- + +# Welcome to the web page for the 39th Project Week! + + +[This event](https://projectweek.na-mic.org/PW39_2023_Montreal/README.html) will take place June 12-16, 2023 in Montreal, Canada. + +* Project Week 39 will be a hybrid event with a strong in-person component +* The venue for in-person events is [École de Technologie Supérieure](https://www.etsmtl.ca/), Montreal, Canada. + +If you have any questions, you can contact the [organizers](#organizers). + +## Registration +* All participants (both remote and in-person) have to register using the [this form](https://docs.google.com/forms/d/e/1FAIpQLSe9dyTCW8Y-RWHEvBUTUXTpbP8Nd9KvUUFpoItuy9_fYKYRfw/viewform). +* Registration for **remote** participants is free. +* Registration for **in person** participants is 350 $CAN (approx. 250 $US). You can register your fee [here](https://www.eventbrite.ca/e/na-mic-project-week-39-tickets-593226786287). + +## Zoom +During Project Week, a zoom session will be running continuously and will be used for both project presentations and breakout sessions. Please join at [this link](https://etsmtl.zoom.us/j/89962816358?pwd=RW9jc3ZUM0lXOFJsL2NsaVBaME1Ddz09). Note that the **Slicer Internationalization** breakout session will use a different zoom session available using [this link](https://etsmtl.zoom.us/j/86060017076?pwd=NmVkb2ovckh6Y3ZjQzZxSUtXU09tZz09). + +## Discord +The **Discord** application is used to communicate between team members and organize activities before and during Project Week. Please join the Project Week [Discord server](https://discord.gg/yQsNVdVpS3) as soon as possible and explore its functionality before the workshop. For more information on the use of Discord before and during Project Week, please visit [this page](../common/Discord.md). + +## Venue +The images below show how to get to the PW39 conference room. If you Google "ETS", it will take you to the main building of the university (1). You need to walk about 200 meters to get to the building called "Maison des étudiants" (2). Enter the building and either climb the stairs or take the elevator on the right to reach the second floor(3). From there, you should easily find the registration desk. The conference room for PW39 is right beside. + +Venue entrance on Google Maps: [https://goo.gl/maps/xNedgMBt4C6jwiCu5](https://goo.gl/maps/xNedgMBt4C6jwiCu5) + + + +## Agenda + +{% include calendar.md from="2023-06-12" to="2023-06-16" %} + +## Breakout sessions +1. [Future of rendering in VTK and Slicer](BreakoutSessions/RenderingBreakout/README.md) +2. [AMP SCZ Program](BreakoutSessions/AMPSCZ/README.md) +3. [Slicer Platform Update Slides](https://docs.google.com/presentation/d/19-N58wTRfXMUpoltqmnPsbTL9GYXm5m0LA5vQ6gkKVE/edit?usp=sharing) +4. [IGT](BreakoutSessions/IGT/README.md) + +## Contributing Project Pages + +### Videos in project pages + +Here are some steps to make sure all of your awesome videos render correctly: + +1. Videos added by drag and drop will render correctly when viewed through GitHub, but need some extra tweaks to work in the final generated website. In your `README.md`, if you have a video link that looks like this: + ```` + https://github.com/NA-MIC/ProjectWeek/assets/66890913/8f257f29-fa9c-4319-8c49-4138003eba27 + ```` + + + Update it to: + ```` + + + ```` +2. Links to externally hosted videos (such as YouTube) will need an iframe. Replace: + ```` + https://youtu.be/ZWxE5QcGvE8 + ```` + + with + + ```` + + ```` + + + +### Updating existing project pages + +Here are the steps using the GitHub web interface: + +1. Navigate to your project's `README.md` on the GitHub website. For instance, if you want to update a project called **YourProjectName**, visit the URL https://github.com/NA-MIC/ProjectWeek/blob/master/PW39_2023_Montreal/Projects/YourProjectName/README.md . + +2. Click the edit button, as shown in this screenshot: ![Screenshot 2023-06-12 10 43 35](https://github.com/NA-MIC/ProjectWeek/assets/25040869/ab01a7bf-c1e4-4c23-9aca-e2c6421ca530) + +3. You can now edit the page, add images by dragging and dropping, and more. + +4. Once done, click "Commit Changes", and follow the instructions to create a fork and a pull request to add your changes to the webpage. See this screenshot for reference: ![Screenshot 2023-06-12 10 50 50](https://github.com/NA-MIC/ProjectWeek/assets/25040869/180e81bb-d4f9-4f65-8569-a93192b2828e) + +### Creating new project pages + +With the [Project Week GitHub Issue page](https://github.com/NA-MIC/ProjectWeek/issues/new/choose), you have three options to create your Project Page: + +1. [Create a Proposal](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=proposal%2Cevent%3APW39_2023_Montreal&projects=&template=proposal.yml&title=Proposal%3A+) issue: If you have an idea for a project page but are not quite ready to create it yet, you can create a “Proposal” issue. + +2. [Create a Project](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3APW39_2023_Montreal&projects=&template=project.yml&title=Project%3A+) issue: If you are ready to create your page, you can simply create a “Project” issue. This issue will allow you to fill out a convenient form to provide the necessary details. + +3. [Create the project page yourself using the template](Projects/README.md): If you prefer to create the Project Page yourself, you can still do so by using the provided template and submitting a pull request. + +### Project Creation Tips + +- Get your project pages created early! The day before is best to make sure everything you need for you presentation is available. The ProjectWeek site will be closed to edits for the ***10 minutes before*** both the opening and closing presentation session to ensure site generation. After this 10 minute period edits will be re-enabled. + +- If you are [creating the project page yourself using the template](Projects/README.md), **don't reuse a project page template from a previous year.** We have made significant updates to the template to support auto-generation of project pages, so previous years' templates will not function properly. + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** + - Make sure to fill out / update all of the information at the top of the README file (title, category, location, etc) + +- Remember to fill out the title for your project when using the [project creation issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3APW39_2023_Montreal&projects=&template=project.yml&title=Project%3A+) + +- Check the formatting on the Key Investigators list when creating a [project issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3APW39_2023_Montreal&projects=&template=project.yml&title=Project%3A+) (this is critical for page generation): + + `- Firstname Lastname (Affiliation, Country)` + +## Projects + +To learn how to create or update project pages, please refer to the [contributing project pages](#contributing-project-pages) section. + +{% include projects.md %} + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. + +List of registered participants so far (names will be added here after processing registrations): + + + + +1. Rafael Palomar, Oslo University Hospital, Norway, (In-person, Confirmed) +1. Sam Horvath, Kitware, USA, (In-person, Confirmed) +1. Simon Drouin, École de Technologie Supérieure, Canada, (In-person, Confirmed) +1. Steve Pieper, Isomics, Inc., USA, (In-person, Confirmed) +1. David Clunie, PixelMed, USA, (In-person, Confirmed) +1. Ron Kikinis, M.D., Harvard Medical School, USA, (In-person, Confirmed) +1. Étienne Léger, Mcgill University, Canada, (In-person, Confirmed) +1. Curtis Lisle, Curtis Lisle, USA, (In-person, Confirmed) +1. Kyle Sunderland, Queen’s University, Canada, (In-person, Confirmed) +1. Andrey Titov, École de technologie supérieure, Canada, (In-person, Confirmed) +1. Stephen Aylward, Kitware, Inc., USA, (Online) +1. Sara Rolfe, Seattle Children's Research Institute, USA, (In-person, Confirmed) +1. Andrey Fedorov, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Juan Pablo GRAFFIGNA, San Juan National University-Biomedical Engeering Institute, Argentina, (Online) +1. Deepa Krishnaswamy, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Rebecca Hisey, Queen’s University , Canada, (In-person, Confirmed) +1. Rudolf Bumm, Kantonsspital Graubünden, Switzerland, (Online) +1. Beier Yao, McLean Hospital; Harvard Medical School, USA, (Online) +1. Sylvain Bouix, ÉTS, Canada, (In-person, Confirmed) +1. Theodore Aptekarev, Slicer Community, Montenegro, (Online) +1. Lucia Magdalena Bravo Cumpian, Universidad Nacional de San Juan - facultad de Ingeniería- Instituto de Bioingenieria (INBIO) - , Argentina, (Online) +1. Mohamed Alalli BILAL, Ecole Supérieure Polytechnique, University Cheikh Anta diop of Dakar , Senegal, (Online) +1. Chris Bridge, Massachusetts General Hospital, USA, (In-person, Confirmed) +1. Ofer Pasternak, Harvard Medical School, USA, (In-person, Confirmed) +1. Shreyas Fadnavis, MGH, Harvard Medical School, USA, (Online) +1. Kevin Cho, Brigham Women's Hospital, Harvard Medical School, USA, (In-person, Confirmed) +1. Jess Tate, University of Utah, USA, (Online) +1. Guillermo Cecchi, IBM Research, USA, (In-person, Confirmed) +1. Pablo Polosecki, IBM Research, USA, (In-person, Confirmed) +1. Eduardo Castro, IBM Reseach, USA, (In-person, Confirmed) +1. Amene Asgari, Brigham and Women's Hospital, Harvard Medical School , USA, (Online) +1. Nora Penzel, Massachusetts General Hospital, Harvard Medical School, USA, (In-person, Confirmed) +1. Rodolfo Eduardo RODRÍGUEZ SCHMÄDKE, INBIO - UNSJ, Argentina, (Online) +1. Luc Anchling, University of Michigan, France, (In-person) +1. Colton Barr, Queen's University, Canada, (In-person, Confirmed) +1. Pablo Sergio Castellano Rodríguez, Universidad de Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Jose Carlos Mateo Perez, Universidad de Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Gabriella d'Albenzio, The Intervention Centre (OUS), Norway, (In-person, Confirmed) +1. Konstantinos Ntatsis, Leiden University Medical Center, Netherlands, (In-person, Confirmed) +1. Justin Kirby, Frederick National Laboratory for Cancer Research, USA, (Online) +1. Zhuopin Sun, Walter and Eliza Hall Institute of Medical Research, Australia, (Online) +1. Gabor Fichtinger , Queen's University, Canada, (Online) +1. Pape Mady THIAO , École militaire de santé de Dakar , Senegal, (Online) +1. Sarah Frisken, Brigham and Women's Hospital, USA, (Online) +1. Mohamed Alalli BILAL, Ecole Supérieure Polytechnique de Dakar, Université Cheikh Anta diop de Dakar , Mauritania, (Online) +1. João Pedro Alves Januário, University of São Paulo, Brazil, (Online) +1. Nathan Hutin, University of Michigan, France, (In-person) +1. Adam Li, Georgetown University , USA, (Online) +1. Roya Khajavibajestani, Brigham and women's hospital, USA, (Online) +1. Jean-Christophe Fillion-Robin, Kitware, USA, (Online) +1. Hassan, Concordia University, Canada, (Online) +1. Nima Masoumi, Concordia University, Canada, (Online) +1. Douglas Samuel Gonçalves, Universidade de São Paulo, Brazil, (Online) +1. Lucas Sanchez Silva, USP, Brazil, (Online) +1. Luiz Otávio Murta Junior , Universidade de São Paulo , Brazil, (Online) +1. Tina Kapur, Brigham and Women's Hospital and Harvard Medical School, USA, (In-person, Confirmed) +1. Tamas Ungi, Queen's University, Canada, (In-person, Confirmed) +1. Junichi Tokuda, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Mike Jin, Harvard Medical School, USA, (In-person, Confirmed) +1. Andras Lasso, Queen's University, Canada, (In-person, Confirmed) +1. Dennis Bontempi, BWH/MGB, USA, (In-person, Confirmed) +1. Cosmin Ciausu, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Enrique Hernandez Laredo, Universidad Autonoma del Estado de Mexico, Mexico, (Online) +1. Diana Alejandra Mendoza Mora, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Mariana Alvarez-Carvajal, Universidad Autonoma del Estado de Mexico, Mexico, (Online) +1. Daniela Patricia Schacherer, Fraunhofer MEVIS, Germany, (In-person, Confirmed) +1. Gael Garcia, Autonomous University of the State of Mexico, Mexico, (Online) +1. Andy Huynh, The University of Western Australia, Australia, (In-person, Confirmed) +1. Idrissa SECK, Université Cheikh Anta Diop de Dakar (UCAD), Senegal, (Online) +1. Papa ibra NDIAYE, Université Cheikh Anta Diop Ecole Supérieure Polytechnique (UCAD/ESP), Senegal, (Online) +1. Pape Malick GUEYE, Université Cheikh Anta Diop (UCAD) , Senegal, (Online) +1. Valeria Gómez Valdes , Universidad Autónoma del Estado de México , Mexico, (Online) +1. Abigail Mercado Ponciano, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Nubia Sofía González Casanova, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Victor Manuel Montaño Serrano, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Vianney Muñoz Jiménez, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Leonard Nuernberg, MGB, Netherlands, (In-person, Confirmed) +1. Adriana Herlinda Vilchis González, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Juan Carlos Avila Vilchis, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Sonia Pujol, Brigham and Women's Hospital, Harvard Medical School, USA, (Online) +1. David Slavíček, Brno University of Technology, Czechia, (Online) +1. Mehrdad Asadi, Concordia University, Canada, (In-person, Confirmed) +1. Sondos Ayyash, Princess Margaret Cancer Centre, Canada, (In-person) +1. Ozge Ikiz Yurtsever, Stanford University, USA, (Online) +1. Emel Alkim, Stanford University, USA, (Online) +1. Aída García Limas , UAEMéx , Mexico, (Online) +1. María Rosa Rodríguez Luque, Universidad de Las Palmas de Gran Canaria, Spain, (Online) +1. Fatou Bintou NDIAYE, Ecole Supérieure Polytechnique (ESP) of Dakar, Senegal, (Online) +1. Hui Liu, UIH, China, (Online) +1. Ahmedou Moulaye IDRISS, Faculty of Medicine / Nouakchott, Mauritania, (Online) +1. Daniel Enrique Fernández García, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Chi Zhang, Seattle Children's Research Institute, USA, (Online) +1. Ben Zwick, The University of Western Australia, Australia, (In-person, Confirmed) +1. Mohammadreza Eskandari, McGill University, Canada, (In-person, Confirmed) +1. Kartik Narayan Sahoo, University of Alberta, Canada, (Online) +1. Alicia Pose Díez de la Lastra, Universidad Carlos III de Madrid , Spain, (In-person, Confirmed) +1. Abigail Mercado Ponciano, Universidad Autónoma del Estado de México , Mexico, (Online) +1. Justin Johnson, Brigham and Women’s Hospital, USA, (In-person, Confirmed) +1. Mark Pearson, CNI Molecular Imaging, Australia, (Online) +1. Ole Vegard Solberg, SINTEF, Norway, (Online) +1. Csaba Pinter, Ebatinca, Spain, (Online) +1. Fernandez Vidal Sara, ICM, France, (Online) +1. Attila Tanács, University of Szeged, Hungary, (Online) +1. Khaled Younis, SIIM, USA, (Online) +1. Adama Wade, UCAD, Queen's, Senegal, (In-person, Confirmed) +1. Gabriel Kwiecinski Antunes, Web Kriativa, Brazil, (Online) +1. Sheikh Muhammad Usman Shami, King Edward Medical University, Pakistan, (Online) +1. Mahmoud Gamal, Alexandria University, Egypt, (Online) +1. Attila Nagy, University of Szeged, Department of Medical Physics and Informatics, Hungary, (Online) +1. Soyoung Lim, Samsung Medical Center, South Korea, (Online) +1. Cathia Michelle Nava Vargas, Universidad Autónoma del Estado de México , Mexico, (Online) +1. Umang Pandey, Universidad Carlos III de Madrid (UC3M), Spain, (Online) +1. Mamadou Samba CAMARA, UCAD, Senegal, (Online) +1. Linmin Pei, Frederick National Laboratory for Cancer Research, USA, (Online) +1. Robert Zsolt Szabo, Queen's University / Óbuda University, Hungary, (In-person) +1. Jeff VanOss, BAMF Health, USA, (Online) +1. David Allemang, Kitware, USA, (Online) +1. Alireza Mostafanejad, University of Hawaii at Manoa, USA, (Online) +1. Harald Scheirich, Kitware, USA, (Online) +1. Javier Pascau, Universidad Carlos III de Madrid, Spain, (Online) +1. Simon Oxenford, Charite Berlin, Germany, (Online) +1. Hascoët Camille, Polytech Sorbonne, France, (In-person) +1. krithika, georgetown university, USA, (Online) +1. Nadya Shusharina, Massachusetts General Hospital, USA, (Online) +1. Mohamed Abdallahi Keita, Faculté de Médecine, de Pharmacie et d’Odonto-Stomatologie, Mauritania, (Online) +1. Parikshit Juvekar, Brigham and Women's Hospital, USA, (Online) +1. Tagwa Idris, Massachusetts General Hospital, USA, (Online) +1. Ruoyan Meng, NTNU, Norway, (Online) +1. aichetou mohamed vall, faculty of medicine, Mauritania, (Online) +1. Hendou Bouboutt, Faculty of medicine , Mauritania, (In-person) +1. Mohamed Aly Dedew, Faculte medecine de Nouakchott, Mauritania, (Online) +1. Mauro Ignacio Dominguez, Independent, Argentina, (Online) +1. Mario Mata, Autonomous University of Juarez, Mexico, (Online) +1. Thanuja Uruththirakodeeswaran, University of Alberta, Canada, (Online) +1. Srivathsan Shanmuganathan, University of Alberta, Canada, (Online) +1. Davi Romao , Hospital Sirio-Libanes , Brazil, (Online) +1. Andres Diaz-Pinto, NVIDIA, UK, (Online) +1. David García Mato, Ebatinca S.L., Spain, (Online) +1. Laura Connolly, Queen's University, Canada, (Online) +1. Shadi nouri, Private, Iran, (Online) +1. Alireza Sedghi, OHIF, Canada, (Online) +1. DANIELA SOFÍA PEDROZO ROCA, INBIO - UNSJ, Argentina, (Online) +1. Mario Mata, Autonomous University of Juarez , Mexico, (Online) +1. Maximilian Fischer, German Cancer Research Center, Germany, (Online) +1. Jaswant Panchumarti, Kitware, USA, (Online) +1. Lucas Gandel, Kitware, France, (Online) +1. Orphée, McGill , Canada, (Online) +1. Thomas K Noh, University of Hawaii, USA, (Online) +1. Boubacar FATY, Ecole Superieure polytechnique, Senegal, (Online) +1. Geir Arne Tangen, SINTEF, Norway, (Online) +1. Nayra Pumar Carreras, EBATINCA, Spain, (Online) +1. Shreeraj Jadhav, Kitware Inc, USA, (Online) +1. Joe Boccanfuso, Radical Imaging, Canada, (Online) +1. Leonardo Campos, PUC-MG, Brazil, (Online) + + + +## Statistics + +Participation statistics + +## Organizers + +### Local organization +[Simon Drouin](https://www.etsmtl.ca/en/research/professors/sidrouin/) - Associate professor in the department of software and information technology engineering at [École de Technologie Supérieure](https://www.etsmtl.ca/) + +### Global Project Week organizing committee +* [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](http://www.spl.harvard.edu/pages/People/tkapur)), +* [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://drouin-simon.github.io/ETS-web//)) +* [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)) +* [@piiq](https://github.com/piiq) ([Theodore Aptekarev](https://discourse.slicer.org/u/pll_llq)) +* [@sjh26](https://github.com/sjh26) ([Sam Horvath, PhD](https://www.kitware.com/samantha-horvath/)) + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). diff --git a/PW39_2023_Montreal/images/PW39-venue.png b/PW39_2023_Montreal/images/PW39-venue.png new file mode 100644 index 000000000..16908a80e Binary files /dev/null and b/PW39_2023_Montreal/images/PW39-venue.png differ diff --git a/PW39_2023_Montreal/images/README.md b/PW39_2023_Montreal/images/README.md new file mode 100644 index 000000000..0982aaf36 --- /dev/null +++ b/PW39_2023_Montreal/images/README.md @@ -0,0 +1 @@ +This directory is meant to contain images displayed on the main PW39 page diff --git a/PW39_2023_Montreal/images/na-mic-logo.png b/PW39_2023_Montreal/images/na-mic-logo.png new file mode 100644 index 000000000..4673c73bb Binary files /dev/null and b/PW39_2023_Montreal/images/na-mic-logo.png differ diff --git a/PW39_2023_Montreal/images/pw39-event-photos.jpg b/PW39_2023_Montreal/images/pw39-event-photos.jpg new file mode 100644 index 000000000..59b87ac76 Binary files /dev/null and b/PW39_2023_Montreal/images/pw39-event-photos.jpg differ diff --git a/PW39_2023_Montreal/statistics.svg b/PW39_2023_Montreal/statistics.svg new file mode 100644 index 000000000..96e28c9bd --- /dev/null +++ b/PW39_2023_Montreal/statistics.svg @@ -0,0 +1,16255 @@ + + + + + + + + 2023-06-14T19:12:33.082915 + image/svg+xml + + + Matplotlib v3.5.2, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/PW40_2024_GranCanaria/BreakoutSessions/Rendering/README.md b/PW40_2024_GranCanaria/BreakoutSessions/Rendering/README.md new file mode 100644 index 000000000..58d7ecd21 --- /dev/null +++ b/PW40_2024_GranCanaria/BreakoutSessions/Rendering/README.md @@ -0,0 +1,144 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Future of Rendering in VTK, ITK and Slicer + +key_investigators: +- name: Rafael Palomar + affiliation: NTNU + country: Norway + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware, Inc. + country: USA + +- name: Sankhesh Jhaveri + affiliation: Kitware, Inc. + country: USA + +- name: Matt McCormick + affiliation: Kitware, Inc. + country: USA + +- name: Lucas Gandel + affiliation: Kitware SAS + country: USA + +- name: Forrest Li + affiliation: Kitware, Inc. + country: USA + +--- + +# Description + +The goal of this breakout session is to gather all parties interested in the future of rendering in VTK and Slicer, present ongoing development by Kitware and others and discuss potential future directions and clinical and biomedical needs. + +## During the Breakout Session + +Links and notes are organized at [https://hackmd.io/j0xxip3jR_O9220uJUjJKg](https://hackmd.io/j0xxip3jR_O9220uJUjJKg). It is a markdown based document we can collaboratively & interactively edit. + +Once the breakout session is over, we will contribute the information back to this page. See [Notes](#notes) below. + +## Topics + +* VTK C++: + * WebGPU + * OpenXR +* itk-viewer and itkwidgets +* vtk.js +* SlicerVirtualReality + + +# Notes + +### VTK C++: WebGPU + +_Contact: Sankhesh Jhaveri @ Kitware_ + +* Direct replacement of OpenGL for native desktop and webassembly for browsers +* Better separation between windowing UI and rendering API +* Implementation agnostic support via the [webgpu C API](https://github.com/webgpu-native/webgpu-headers) that would allow switching between dawn, wgpu, and future webgpu implementations. +* What is the timeline to transition to WebGPU ? + - this is pending funding, we are currently waiting for review of NSF grant + +Notes: +* Custom shader code written in GLSL would have to be coverted to WGSL shader language +* Moving forward we will have one volume rendering implementation +* ANARI will allow to have multiple backend for ray tracing +* Sanhesh et al are active and involved with the WebGPU forum + +See [notes](https://projectweek.na-mic.org/PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/#vtk-c-webgpu) from 39th project week. + +### VTK C++: WebAssembly + +_Contact: Jaswant Panchumarti @ Kitware_ + +See [notes](https://projectweek.na-mic.org/PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/#webassembly) from 39th project week. + +### VTK C++: OpenXR + +_Contact: Lucas Gandel @ Kitware_ + +* Hand interaction improvements for the Hololens 2: + * Combine 'aim/select' and 'grip/squeeze' poses, mimic the Unity behavior + * Hand joints/finger tracking +* Support occlusion from the real world in the Hololens 2: + * Implement scene understanding extension +* Custom controller model: + * The controller model extension is not implemented by most of the runtimes. We should provide an API for users to load their own controller model files (GLTF, OBJ, with textures). +* Registration of multiple devices: + * Investigate spatial anchors persistency and sharing such anchors across sessions. + * Enable QR code detection extension + * https://publik.tuwien.ac.at/files/publik_299074.pdf + +See [notes](https://projectweek.na-mic.org/PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/#vtk-c-openxr) from 39th project week. + + +### SlicerVirtualReality + +_Contact: Jean-Christophe Fillion-Robin @ Kitware_ + + +### vtk.js: Interactive, in-browser cinematic volume rendering of medical images + +_Contact: Stephen Aylward & Forrest Li @ Kitware_ + +* vtk.js goals + * support vtk-wasm as a rendering backend + * unified WebGPU implementation (see VTK C++ WebGPU above) +* VolView goals + * improved streaming support + * demos with python-based processing server + + +### itk-viewer and itkwidgets + +_Contact: Matt McCormick @ Kitware_ + +* [3D Slicer on the Web via ITK-Wasm and friends](https://docs.google.com/presentation/d/1IHgkgNZuN9c_uYkWAnY984kLQySznB5S66S2aHt0blQ/edit#slide=id.g2a5c217f66c_0_1091) +* `itk-viewer`: Multi-dimensional web-based image, mesh, and point set viewer +* [`itkwidgets`](https://itkwidgets.readthedocs.io/en/latest/): Python interface for visualization on the web platform to interactively generate insights into multidimensional images, point sets, and geometry. + +Notes & Links: +* https://itkwidgets.readthedocs.io/en/latest/deployments.html +* ITKIOOMEZarrNGFF + * https://github.com/InsightSoftwareConsortium/ITKIOOMEZarrNGFF + * https://pypi.org/project/itk-ioomezarrngff/ + +# Illustrations + + + +# Background and References + + +* [39th Project Week - Rendering breakout session](https://projectweek.na-mic.org/PW40_2024_GranCanaria/) +* [WebGPU in VTK](https://www.kitware.com/vtk-webgpu-on-the-desktop/) +* [WebGPU in Slicer](https://github.com/pieper/SlicerWGPU) from [Project Week 37](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/SlicerWGPU/). diff --git a/PW40_2024_GranCanaria/BreakoutSessions/Slicer/README.md b/PW40_2024_GranCanaria/BreakoutSessions/Slicer/README.md new file mode 100644 index 000000000..03d34741c --- /dev/null +++ b/PW40_2024_GranCanaria/BreakoutSessions/Slicer/README.md @@ -0,0 +1,29 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Slicer Breakouts + +key_investigators: +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware, Inc. + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Andras Lasso + affiliation: Perk Labs + country: Canada +--- + +# Links!! + +- [3D Slicer Status Presentation](https://docs.google.com/presentation/d/19uLjtQB34Rblg4sIK-kCcQ5ourwTFkO7LAonxixOHOg/edit?usp=sharing) +- [3D Slicer Live Q&A Discourse](https://docs.google.com/document/d/1TtTgr3EvWi-P8AEG074CZGWFytE6ysLLxg6C-RD0Z44/edit?usp=sharing) diff --git a/PW40_2024_GranCanaria/ContributingProjectPages.md b/PW40_2024_GranCanaria/ContributingProjectPages.md new file mode 100644 index 000000000..c57511d90 --- /dev/null +++ b/PW40_2024_GranCanaria/ContributingProjectPages.md @@ -0,0 +1,85 @@ +--- +--- +{% comment %}Extract event name (e.g "PW39_2023_Montreal"). {% endcomment %} +{%- assign event_name = page.path | split: '/' | first -%} + +# Contributing Project Pages + +## Creating new project pages + +With the [Project Week GitHub Issue page](https://github.com/NA-MIC/ProjectWeek/issues/new/choose), you have three options to create your Project Page: + +1. [Create a Proposal](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=proposal%2Cevent%3A{{ event_name }}&projects=&template=proposal.yml&title=Proposal%3A+) issue: If you have an idea for a project page but are not quite ready to create it yet, you can create a “Proposal” issue. You will still need to create a project page later. + +2. [Create a Project](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) issue: If you are ready to create your page, you can simply create a “Project” issue. This issue will allow you to fill out a convenient form to provide the necessary details. The Project Week website team will then review the issue and trigger the page creation pull request. + +3. [Create the project page yourself using the template](Projects/README.md): If you prefer to create the Project Page yourself, you can still do so by using the provided template and submitting a pull request. + +## Project Creation Tips + +- Get your project pages created early! The day before is best to make sure everything you need for you presentation is available. The ProjectWeek site will be closed to edits for the ***10 minutes before*** both the opening and closing presentation session to ensure site generation. After this 10 minute period edits will be re-enabled. + +- If you are [creating the project page yourself using the template](Projects/README.md), **don't reuse a project page template from a previous year.** We have made significant updates to the template to support auto-generation of project pages, so previous years' templates will not function properly. + + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** + - Make sure to fill out / update all of the information at the top of the README file (title, category, location, etc) + +- Remember to fill out the title for your project when using the [project creation issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) + +- Check the formatting on the Key Investigators list when creating a [project issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) (this is critical for page generation): + + `- Firstname Lastname (Affiliation, Country)` + +## Updating existing project pages + +Here are the steps using the GitHub web interface: + +1. Navigate to your project's `README.md` on the GitHub website. For instance, if you want to update a project called **YourProjectName**, visit the URL like the following: + + ``` + https://github.com/NA-MIC/ProjectWeek/blob/master/{{ event_name }}/Projects/YourProjectName/README.md + ``` + +2. Click the edit button, as shown in this screenshot: ![Screenshot 2023-06-12 10 43 35](https://github.com/NA-MIC/ProjectWeek/assets/25040869/ab01a7bf-c1e4-4c23-9aca-e2c6421ca530) + +3. You can now edit the page, add images by dragging and dropping, and more. + +4. Once done, click "Commit Changes", and follow the instructions to create a fork and a pull request to add your changes to the webpage. See this screenshot for reference: ![Screenshot 2023-06-12 10 50 50](https://github.com/NA-MIC/ProjectWeek/assets/25040869/180e81bb-d4f9-4f65-8569-a93192b2828e) + +## Videos in project pages + +Here are some steps to make sure all of your awesome videos render correctly: + +1. Videos added by drag and drop will render correctly when viewed through GitHub, but need some extra tweaks to work in the final generated website. + + + In your `README.md`, if you have a video link that looks like this: + + ``` + https://github.com/NA-MIC/ProjectWeek/assets/66890913/8f257f29-fa9c-4319-8c49-4138003eba27 + ``` + + Update it to: + + ```html + + ``` + +2. Links to externally hosted videos (such as YouTube) will need an iframe. + + Replace: + + ``` + https://youtu.be/ZWxE5QcGvE8 + ``` + + with + + ````html + + ```` diff --git a/PW40_2024_GranCanaria/NHIMperialPlaya.jpg b/PW40_2024_GranCanaria/NHIMperialPlaya.jpg new file mode 100644 index 000000000..e55692385 Binary files /dev/null and b/PW40_2024_GranCanaria/NHIMperialPlaya.jpg differ diff --git a/PW40_2024_GranCanaria/Projects/3DSlicerForLatinAmerica/README.md b/PW40_2024_GranCanaria/Projects/3DSlicerForLatinAmerica/README.md new file mode 100644 index 000000000..c1f13e736 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/3DSlicerForLatinAmerica/README.md @@ -0,0 +1,158 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 3D Slicer for Latin America +category: Infrastructure +presenter_location: In-Person + +key_investigators: +- name: Luiz Murta + affiliation: Universidade de São Paulo + country: Brazil + +- name: Lucas Sanchez Silva + affiliation: Universidade de São Paulo + country: Brazil + +- name: Adriana Herlinda Vilchis Gonzalez + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Enrique Hernandez Laredo + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: João Pedro Alves Januário + affiliation: Universidade de São Paulo + country: Brazil + +- name: Douglas Samuel Gonçalves + affiliation: Universidade de São Paulo + country: Brazil + +- name: Paulo Eduardo de Barros Veiga + affiliation: Universidade de São Paulo + country: Brazil + +- name: Diana Alejandra Mendoza Mora + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Mariana Alvarez-Carvajal + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Gael Garcia + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Valeria Gómez Valdes + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Abigail Mercado Ponciano + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Nubia Sofía González Casanova + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Victor Manuel Montaño Serrano + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Vianney Muñoz Jiménez + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Juan Carlos Avila Vilchis + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Aída García Limas + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +- name: Abigail Mercado Ponciano + affiliation: Universidad Autónoma del Estado de México + country: Mexico + +--- + +# Project Description + + + +The goal of this project is to leverage 3D Slicer's internationalization infrastructure to localize the software into Spanish and Portuguese and to develop a novel software infrastructure for tutorial localization. + + +## Objective + + + +The main purpose is to develop a functional prototype that can take screenshots from 3D Slicer, allow the user to put annotations into it, and draw these annotations in different languages. Based on the actual prototype. +1. Develop a tool in 3D Slicer that can take a screenshot as a .png file and a description of all visible widgets as a .json. +2. Develop an interactive tool that allows visual editing of screenshot annotations. +3. Develop a library that can draw annotations on a screenshot, relative to a widget. This would be used for drawing annotations automatically in different languages. +4. Translate and add strings for translations from the 3D Slicer interface and extensions, like MONAILabel. + +## Approach and Plan + + + +1. Show what we already have developed during the week, and the next steps, and discuss functionalities that we can use to achieve these steps. Also, exchange knowledge and listen to the feedback of the users, + +## Progress and Next Steps + + + + + +# Illustrations + + + +### Side-by-side + +English | Brazilian Portuguese | Spanish +:-------------------------:|:-------------------------:|:-------------------------: +![English-0](https://github.com/NA-MIC/ProjectWeek/assets/28208639/0dfc106e-5cea-4162-8eb5-94bbfb173605) | ![pt-0](https://github.com/NA-MIC/ProjectWeek/assets/28208639/285f1a5d-aa4a-4d93-9d4d-687369e58d78) | ![sp-0](https://github.com/NA-MIC/ProjectWeek/assets/28208639/ce2db67c-e84e-4d97-b55e-9c77b67a1f73) +![en-1](https://github.com/NA-MIC/ProjectWeek/assets/28208639/73c39bab-b826-4b8d-9799-8c988cd65034) | ![pt-1](https://github.com/NA-MIC/ProjectWeek/assets/28208639/3ee96287-30d9-4df0-99a0-2c69baef94ed) | ![sp-1](https://github.com/NA-MIC/ProjectWeek/assets/28208639/b250f607-077d-4875-ac46-76d5661ea01e) +![en-2](https://github.com/NA-MIC/ProjectWeek/assets/28208639/ef8caa09-2c73-427c-a7c6-dc8e3f2796fe) | ![pt-2](https://github.com/NA-MIC/ProjectWeek/assets/28208639/03e46bc3-6793-436e-a0c6-6901a69f7f8b) | ![sp-2](https://github.com/NA-MIC/ProjectWeek/assets/28208639/c8fdeab8-9e49-4d15-80fc-5f668a854ec9) +![en-3](https://github.com/NA-MIC/ProjectWeek/assets/28208639/6afafe3e-5c6e-43fa-8762-2eda583c5bfe) | ![pt-3](https://github.com/NA-MIC/ProjectWeek/assets/28208639/255ef11a-1519-4c00-aa0d-f9a955a63bc9) | ![sp-3](https://github.com/NA-MIC/ProjectWeek/assets/28208639/7d376bd9-4ede-4b37-ab85-e5835ffffa71) +![en-4](https://github.com/NA-MIC/ProjectWeek/assets/28208639/609f33bf-55b2-49c1-be43-9a83e022d9fc) | ![pt-4](https://github.com/NA-MIC/ProjectWeek/assets/28208639/c3a1b7d2-02b3-4b31-a5ac-de14549404ab) | ![sp-4](https://github.com/NA-MIC/ProjectWeek/assets/28208639/0a928595-1e7c-48a5-b7f0-eb98085c1dc4) + + +![IMG_2212-small](https://github.com/NA-MIC/ProjectWeek/assets/126077/d813b5bd-54bd-4131-8ba0-913c1feb949d) +![IMG_2211-small](https://github.com/NA-MIC/ProjectWeek/assets/126077/878f89d1-de9f-4cba-9138-69467ca7d5c0) + + + +# Background and References + + + +### GitHub Repository + +[TutorialMaker](https://github.com/SlicerLatinAmerica/TutorialMaker) +The repository has some branches for each issue raised, the main branch is basic but fixed to an English-to-Spanish translation, which involves a lot of processes and manual operations. diff --git a/PW40_2024_GranCanaria/Projects/5DImageIO/README.md b/PW40_2024_GranCanaria/Projects/5DImageIO/README.md new file mode 100644 index 000000000..f45b628a1 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/5DImageIO/README.md @@ -0,0 +1,63 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 5D image IO support in Slicer +category: Infrastructure +presenter_location: In-person + +key_investigators: +- name: Csaba Pinter + affiliation: EBATINCA, S.L. + country: Spain + +- name: Andras Lasso + affiliation: Queen's University + country: Canada +--- + +# Project Description + + + +Slicer currently does not support reading/writing (IO) of 5D images. In the driving use case this means a sequence of vector volumes. This project is about continuing an existing effort to add such support into Sequences storage. + +## Objective + + + +1. Objective A. Keep developing the current implementation +1. Objective B. Collecting use cases (what exactly to support in the first round) +1. Objective C. Discussing implementation details + +## Approach and Plan + + + +1. Use ITK metadata dictionary to change last dimension from `domain` to `time` or other "list" type +2. Start working on 5D image reading + +## Progress and Next Steps + + + +1. Build branch in Debug mode on laptop +2. Metadata dictionary is ignored when writing, or at least the "kinds" information +3. Investigate `ITK` and `nrrd` code to find a way to set the last "kind" +4. Manually edit NRRD header and see what SimpleITK reads it as + +# Illustrations + + + +# Background and References + + + +* Working branch: https://github.com/cpinter/Slicer/tree/volume-sequence-io-5D diff --git a/PW40_2024_GranCanaria/Projects/Add4DSupportToOHIFViewer/README.md b/PW40_2024_GranCanaria/Projects/Add4DSupportToOHIFViewer/README.md new file mode 100644 index 000000000..6e4a8e29d --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/Add4DSupportToOHIFViewer/README.md @@ -0,0 +1,106 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Adding 4D data support in OHIF Viewer v3 +category: Cloud / Web +presenter_location: Online + +key_investigators: +- name: Joost van Griethuysen + affiliation: The Netherlands Cancer Institute + country: The Netherlands + +- name: Alireza Sedghi + affiliation: open health imaging foundation - OHIF + country: Canada + +- name: Mo Al S’ad + affiliation: Imperial College of London + country: UK + +--- + +# Project Description + + + +OHIF Viewer v3 provides a very flexible and extensible DICOM viewer with zero-footprint running in your browser. +It is based on Cornerstone including multiplanar reformatting support, segmentations, etc. However, OHIF viewer +does not support 4D DICOM data natively. This especially affects dynamic or function CT/MR data, such as diffusion- +weighted imaging (DWI) and dynamic contrast enhanced (DCE) CT/MR. + +## Objective + + + +1. Get a feel for how OHIF creates display sets for displaying data using cornerstone. +1. Create functionality to split a given display set into multiple subsets based on the value in a provide DICOM tag + (e.g. TemporalPositionIdentifier (0020, 0100) or DiffusionBValue (0018, 9087)). +1. *Optional* Create functionality to detect if a display set is "4D", and provide a list of valid tags that can be + used to split the display set. + +## Approach and Plan + + + +1. Create a local instance of OHIF viewer +1. Create a toy dataset containing 3D, valid 4D and "invalid" 4D data (e.g. DICOM volume consisting of differently + angled subvolumes) to test/view functionality. +1. Create OHIF viewer extension to test/develop functionality, create mode to allow interaction with the extension. +1. Create functionality for splitting dataset (first on single or few known tags, no checking) +1. Improve functionality from previous point (custom tags, checking validity prior to splitting, etc.) +1. If time remains, check as to the feasibility/difficulty of supporting rendering 4D data directly. + +## Progress and Next Steps + + + +1. OHIF viewer installed locally and ready for testing/customization. +1. Created local DCM4CHEE instance with toy data from Amsterdam and IDC +1. 4D viewport created for OHIF/cornerstone3D (in OHIF [PR #3664](https://github.com/OHIF/Viewers/pull/3664), + [cornerstone3D commit 42054522](https://github.com/cornerstonejs/cornerstone3D/commit/42054522680083aada25737d5e64fb22c24cb424)). +1. Expand cornerstone functionality for splitting datasets into different frames [Cornerstone3D PR #1055](https://github.com/cornerstonejs/cornerstone3D/pull/1055). +1. Fix bug in OHIF viewer breaking the scrollbar in `SidePanel` [da595489](https://github.com/JoostJM/Viewers/commit/da5954896a3efa0d42beb782087352758460fdad). +1. Created python scripts for comparing DICOM metadata and exhaustive search of potential 4D splitting tags. + +Next Steps/ToDo: + +1. During testing, a new use case emerged: Singe SeriesInstanceUID, but 2 valid 4D stacks, + identifiable by ImageType. To correctly handle this use case, data needs to be first split by + ImageType, then by frame identifier (in this case TemporalPositionIndex). +1. Additional ToDo's as specified in OHIF [PR #3664](https://github.com/OHIF/Viewers/pull/3664), concerning updates of + the 4D datapanel GUI, and only displaying it when a valid 4D dataset is active. +1. Check the functionality of retrieving 4D tag values using cornerstone3D metadata + providers. + +# Illustrations + + + +Support for 4D multistack - DWI split by GE private tag: + + + +Support for 4D multistack - DCE split by Temporal Position Identifier + + + +# Background and References + + + +- [OHIF viewer source code](https://github.com/OHIF/Viewers). +- [OHIF viewer online documentation](https://docs.ohif.org/). +- Related work: + - OHIF [PR #3664](https://github.com/OHIF/Viewers/pull/3664). + - Cornestone live-examples: [dynamicpetct](https://www.cornerstonejs.org/live-examples/dynamicpetct) + - OHIF viewer demo for [dynamic PETCT](https://deploy-preview-3664--ohif-dev.netlify.app/dynamic-volume?StudyInstanceUIDs=2.25.232704420736447710317909004159492840763) diff --git a/PW40_2024_GranCanaria/Projects/AddAMonaiAuto3DsegInferenceExtensionTo3Dslicer/README.md b/PW40_2024_GranCanaria/Projects/AddAMonaiAuto3DsegInferenceExtensionTo3Dslicer/README.md new file mode 100644 index 000000000..148cb9f7b --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/AddAMonaiAuto3DsegInferenceExtensionTo3Dslicer/README.md @@ -0,0 +1,103 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 'Add a MONAI Auto3DSeg inference extension to 3DSlicer ' +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Rudolf Bumm + affiliation: Cantonal Hospital of Graubünden + country: Switzerland + +- name: Andras Lasso + affiliation: Queens University + country: Canada + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Andres Diaz-Pinto + affiliation: Senior Deep Learning Engineer at NVIDIA + country: UK + +- name: Umang Pandey + affiliation: Universidad Carlos III de Madrid + country: Spain + +--- + +# Project Description + + + +This project aims to implement MONAI Auto3DSeg in a 3DSlicer extension. This will enable fast inference with NVIDIA GPUs and CUDA and slower inference with CPU only. +Auto3DSeg is a relatively new technique in the MONAI project and our first experiments have been successful. inference is not as complicated as using the MONAOLabel inference function.\ +A future aim is to integrate Auto3DSeg training into the MONAILabel extension. + +## Objective + + + +1. Objective A. Implement Auto3DSeg into a new 3D Slicer extension. + +## Approach and Plan + + + +We have great starting code as well as 2 ready-to-use models from Andres Diaz-Pinto. We will build on that. +In addition, we will train a lung lobe and airway model which should be available at the PW. + +## Progress and Next Steps + + + +1. Andras developed a new extension MONAI Auto3DSeg +2. It can be downloaded via the extension manager. +3. Andres created 3 Auto3DSeg models already to enable direct inference with CT datasets + ![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/10bcc813-c4e6-4e6a-ae8e-0d3cf51b0ab3) + +4. The best models get automatically downloaded for each process +5. They will be improved with further training +6. In future, we attempt to enable your own training of Auto3DSeg models in MONAILabel. + + ![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/ea045d96-ab84-4469-86e0-acbd7bec01ad) + +2/24/2024 + +Andres and Andras achieved relevant progress working on the extension during the last weeks: + +The extension +- is now much faster +- has a wider range of available models +- includes low res models which use less VRAM +- some models were split into smaller pieces to be able to run them on 8 GB VRAM or CPU +- The overall quality of the models was largely improved + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/8bfe7c55-045b-45f9-824f-513f4b9ea0fa) + +(using NVIDIA RTX Geforce 3070 Ti) + +We´ll continue to add relevant models. + +# Illustrations + + + +Algorithm Generation: +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/286ae610-4ab7-4352-ac80-ab4d2c4773c1) + +Simulate a dataset and Auto3D datalist using MONAI functions: +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/6208629d-5a2f-4c39-a98a-0b0a98367546) + +# Background and References + + + + diff --git a/PW40_2024_GranCanaria/Projects/AevaAnnotationAndExchangeOfVirtualAnatomy/README.md b/PW40_2024_GranCanaria/Projects/AevaAnnotationAndExchangeOfVirtualAnatomy/README.md new file mode 100644 index 000000000..4a4618c9b --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/AevaAnnotationAndExchangeOfVirtualAnatomy/README.md @@ -0,0 +1,79 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 'aeva: Annotation and Exchange of Virtual Anatomy' +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: David Thompson + affiliation: Kitware + country: USA + +--- + +# Project Description + + + +Representation of anatomy in a virtual form is at the heart of clinical decision making, biomedical research, and medical training. Virtual anatomy is not limited to description of geometry but also requires appropriate and efficient labeling of regions - to define spatial relationships and interactions between anatomical objects; effective strategies for pointwise operations - to define local properties, biological or otherwise; and support for diverse data formats and standards - to facilitate exchange between clinicians, scientists, engineers, and the general public. Development of aeva, a free and open source software package (library, user interfaces, extensions) capable of automated and interactive operations for virtual anatomy annotation and exchange, is in response to these currently unmet requirements. + +aeva (annotation and exhange of virtual anatomy) is a software suite designed to work with virtual anatomy in various forms. With aeva, one can navigate anatomical information that may be in the form of images (DICOM, NIfTI), surface meshes (stl, ply, vtk) and as volume meshes (vtk, med, exodus). aeva aims to provide import/export of anatomy in various formats and annotation by selecting regions and defining attributes. Templating of annotation can be achieved with simple schemas, e.g. one designed for the knee joint. + +aeva software suite currently consists of: + +**aevaSlicer** aevaSlicer will be familiar to users of Slicer. The interface is customized and new features have been added to accommodate a workflow amenable to generation of surface and volume meshes of anatomy from medical images. + +**aevaCMB** aevaCMB will be familiar to users of ParaView and Computational Model Builder. The interface is customized and new features have been added to support operations for import and export of anatomical representations and for annotation (template based and freeform, including a powerful set of region selection). + +## Objective + + + +1. Demo the current version of the aeva suite. +2. Collect feedback and ideas. + +## Approach and Plan + + + +1. Breakout session scheduled during the week. +2. Demo the Slicer <-> CMB interop and the new graph based linkages between surfaces + +## Progress and Next Steps + + + +1. Demo and on Tuesday went well. +2. THe team made good progress creating new demo videos + +# Illustrations + + + +Selection demo: + + +Example tutorial: + + + +# Background and References + + + +* [aeva page](https://simtk.org/projects/aeva-apps) +* [aeva readthedocs](https://aeva.readthedocs.io/en/latest/) diff --git a/PW40_2024_GranCanaria/Projects/AliAutomatedLandmarkIdentificationUpdate/README.md b/PW40_2024_GranCanaria/Projects/AliAutomatedLandmarkIdentificationUpdate/README.md new file mode 100644 index 000000000..df2b4d0b3 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/AliAutomatedLandmarkIdentificationUpdate/README.md @@ -0,0 +1,82 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 'ALI: Automated Landmark Identification update' +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: + +- name: Jeanne Claret + affiliation: University of Michigan + country: USA + +- name: Gaëlle Leroux + affiliation: University of Michigan + country: USA + +- name: Lucia Cevidanes + affiliation: UoM + country: USA + +- name: Juan Prieto + affiliation: ' University of North Carolina' + country: USA + +--- + +# Project Description + + + +This is an update to a Slicer module developed during project week #37. The approach reformulates anatomical landmark detection as a classification problem through a virtual agent placed inside a 3D Cone-Beam Computed Tomography (CBCT) scan. This agent is trained to navigate in a multi-scale volumetric space to reach the estimated landmark position. The agent movements decision relies on a combination of Densely Connected Convolutional Networks (DCCN) and fully connected layers. Automated Landmark Identification (ALI) is a tool available in the extension SlicerAutomatedDentalTool. This module aims to automatically identify landmarks on different type of scans (CBCT, IOS). The current CBCT models trained include the Cranial Base, Upper and Lower Bones of the face, and the teeth (Left, Right, Upper, and Lower). + +## Objective + + + +1. Retrain the different landmarks models with new and larger datasets annotated by clinicians, with particular focus on teeth on atypical or impacted position inside the bone. +2. Hyperparameters fine tuning for maintenance and improved accuracy on the previously available code. + +## Approach and Plan + + + +1. Collected and preprocessed data +2. Model Architecture: Review and improve the existing model architecture, which utilizes Densely Connected Convolutional Networks (DCCN) and fully connected layers. +3. Hyperparameter Fine-Tuning: Conduct hyperparameter optimization to fine-tune the model for improved accuracy. This includes adjusting learning rates, batch sizes, and regularization techniques. +4. Training and Validation + Dataset Split: Divide the dataset into training, validation, and test sets to ensure proper model evaluation. + Model Training: Train the models for automated landmark identification using the restructured dataset. Employ techniques such as data augmentation to improve generalization. + Validation Metrics: Use appropriate evaluation metrics such as maximum, mean and standard deviation fo errors compared with gold standard annotations. +5. Pull request of the updated models into the SlicerAutomatedDentalTool extension to enable improved automated landmark identification for CBCT scans. +6. User Interface: Enhance the user interface to make it user-friendly and intuitive for clinicians. +7. Testing and Quality Assurance: Thoroughly test the updated module to identify and resolve any bugs or issues. Ensure that the automated landmark identification module performs accurately and reliably on different types of scans. +8. Documentation and Training: Create comprehensive documentation for users and developers, including instructions on how to use the module effectively. + +## Progress and Next Steps + + + +1. Collected and preprocessed data +2. Hyperparameter Fine-Tuning +3. Training Dataset Split: Divide the dataset into training, validation, and test sets to ensure proper model evaluation. +4. The evaluation metrics currently seems unreliable; so I am seeking guidance on how to improve. + +# Illustrations + + + +![SegTab](https://user-images.githubusercontent.com/46842010/180010603-37dce4c3-e7f8-4b3a-98a1-2874918320cb.png) + +![Slicer screen](https://user-images.githubusercontent.com/46842010/174138265-66ab080e-e885-4f76-a150-7e4da3869aa0.png) + +# Background and References + + + +Link to the AutomatedDentalTool Github: diff --git a/PW40_2024_GranCanaria/Projects/AmpSczCombiningBaselineAndLongitudinalInformationForPredictionOfPsychosisConversion/README.md b/PW40_2024_GranCanaria/Projects/AmpSczCombiningBaselineAndLongitudinalInformationForPredictionOfPsychosisConversion/README.md new file mode 100644 index 000000000..e8c6c2c21 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/AmpSczCombiningBaselineAndLongitudinalInformationForPredictionOfPsychosisConversion/README.md @@ -0,0 +1,80 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: AMP SCZ Combining baseline and longitudinal information for prediction of psychosis + conversion +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Pablo Polosecki + affiliation: IBM Research + country: USA + +- name: Nora Penzel + affiliation: MGH + country: USA + +- name: Ofer Pasternak + affiliation: MGH + country: USA + +--- + +# Project Description + + + +This project is part of the [AMP SCZ program](https://www.ampscz.org/), an initiative for early detection of risk for schizophrenia. + +A key goal in AMP SCZ is to predict which patients that present initially mild or sub-threshold symptoms will eventually develop psychosis. Most predictive models are based on data acquired on their first medical visit (the baseline visit). An important question is how much is gained by following patients over time (longitudinal data). Moreover, what is a principled way to combine baseline and longitudinal information? + +In this project we will implement predictive models that make use of both baseline and longitudinal information for psychosis prediction. This project builds on a previous [one](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/LongitudinalModelOfPsychosisConversion/), in which we implemented an approach called "joint modeling", which had important limitations. For this project, we will implement one based on a combination of two approaches: + +- Multiple kernel learning (MKL): a simple predictive model for the fusion of multiple modalities. MKL combines kernels (i.e. a similarity measure across samples) from different modalities. Some modalities could be baseline measures, while others could be longitudinal trajectories. +- Dynamic time warping (DTW): a way to estimate the dissimilarity or distance between trajectories, regardless of differences in the number of time points, sampling rate, or the existence of delays between them. It is simple to build kernels for MKL from DTW distances. + +## Objective + + + +1. Implement a Python-based version of MKL-DTW longitudinal models adapted for common best practices in machine learning (separate train/test, scikit-learn compatible methods). +2. Quantify the advantage of longitudinal models vs baseline predictors in a legacy dataset. + +## Approach and Plan + + + +1. Write an estimator of kernel distances based on DTW in python. +2. Write an extension of the MKL package MKLpy that can integrate DTW kernels for longitudinal modalities with traditional kernels for baseline modalities. +3. Benchmark performance on a legacy dataset. + +## Progress and Next Steps + + + +1. We implemented a number of similarity measures for multivariate longitudinal sequences. +2. We implemented the extension of multiple kernel learning to use these kernels in longitudinal datasets. +3. We curated a dataset from a semi-public source (NIH) with cross-sectional and longitudinal information. +4. We tried using the curared dataset to validate the new prediction method. We are currently finding some issues with the samples, which we are fixing. + +### Next steps: +5. Fix the issues with the proposed dataset. +6. Find a new dataset to make longitudinal predictions in a clinically usefull scenario (e.g. few visits) + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/ArInSlicer/README.md b/PW40_2024_GranCanaria/Projects/ArInSlicer/README.md new file mode 100644 index 000000000..e1bb9da09 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ArInSlicer/README.md @@ -0,0 +1,127 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: AR in Slicer +category: VR/AR and Rendering +presenter_location: In-person + +key_investigators: + +- name: Alicia Pose Díez de la Lastra + affiliation: Universidad Carlos III de Madrid + country: Spain + +- name: Mónica García Sevilla + affiliation: Universidad Carlos III de Madrid + country: Spain + +- name: Felix von Haxthausen + affiliation: Universidad Carlos III de Madrid + country: Spain + +- name: Javier Pascau + affiliation: Universidad Carlos III de Madrid + country: Spain + +- name: Amaia Iribar Zabala + affiliation: Fundación Vicomtech + +- name: Rafael Benito Herce + affiliation: Fundación Vicomtech + country: Spain + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA + +--- + +# Project Description + + + +In previous Project Week events we have already presented our project [AR in Slicer](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/ARinSlicer/). On it, we managed to establish a communication bridge between Microsoft HoloLens 2 and 3D Slicer using OpenIGTLink communication protocol. + +To achieve this, we created our own custom-made client-side socket in Unity using C# programming language. Nevertheless, our solution presented some computational limitations and only enable the exchange of one message of each kind (one transform and one image). + +This project aims to optimize this issue. One option is to optimize how the system currently works with our C# scripts. Alternatively, we can explore the incorporation of the native OpenIGTLink scripts defined in Python or C++ language. + +To give some context to the project, we focus this time on Birth Delivery Training. Birth delivery training equips healthcare professionals with the necessary skills and knowledge to handle various scenarios during childbirth, ensuring the safety and well-being of both the mother and the newborn. This specialized training covers a spectrum of techniques, from normal deliveries to emergency interventions, preparing healthcare providers to manage complications effectively. Proper training enhances the capacity to recognize and address potential risks, fostering a timely response in critical situations. + +In this project we aim at creating a new solution for birth delivery training based on an optimized version of the previous [AR in Slicer](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/ARinSlicer/) project. This new app will enable collaborative teacher-student interaction with the holograms for a better learning experience. + +## Objective + +This project has a double purpose: + +1. Optimize the message exchange by the platforms. One option for that is to explore the incorporation of OpenIGTLink protocol in Python language into the Unity project. + 1.1. Ultimately, we could to offer the possibility to work with the complete set of OpenIGTLink to enable the creation of more complete AR applications. +2. Create a collaborative AR application that connects multiple HoloLens 2 + 2.1. We focus on the field of birth delivery training. In this app, a teacher can show the birth delivery maneuvers to a student using this AR system. + +## Approach and Plan + + + +1. Read existing literature on the utilization of Python in Unity +2. Transfer Python scripts defining OpenIGTLink protocol to an empty Unity project +3. Test their functioning in an application similar to the one presented in [AR in Slicer](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/ARinSlicer/) - Also available in [this GitHub repository](https://github.com/BSEL-UC3M/HoloLens2and3DSlicer-PedicleScrewPlacementPlanning.git). +4. Create an application in Unity that can share multiple messages with 3D Slicer. The application should enable to decide if we want to be the master or the slave (to chose which user will manipulate the virtual information and which one will be limited to visualization). It should be possible to change this option dynamically. +5. Import all necessary models to the scene in Unity +6. Register virtual information with a mannequin in real life to provide haptic feedback +7. Combine this information with a tracker (EMTS) to record the movement of tools in real life. +8. Replicate the movement of actual tools in our virtual models using the same OpenIGTLink bridge + +## Progress and Next Steps +Upon arriving here, we found some different ways to address this problem, so finally, we did not follow the original plan, but explored these other approaches: + +![ProgressDiagram](https://github.com/NA-MIC/ProjectWeek/assets/66890913/803249b1-eac1-4d40-8b3e-957cd021bb79) + +These days we have tried multiple approaches for displaying 3D Slicer information in Microsoft Hololens 2 (HL2). +These are the results: +We have three options: Stream information to HoloLens 2 from a computer, build an application directly in HL2, or use 3D Slicer. +### 1. Computer +#### 1.1. UWPOpenIGTLink +JC provided us with a [Windows Runtime component](https://github.com/IGSIO/UWPOpenIGTLink) for OpenIGTLink. Upon building it (for x64 architecture, as we want to read it from a computer), it creates a Winmd that should provide direct access to the libraries. We tried both methods here: building the app for the computer and directly running it from the Unity editor. None of the options worked, as they are not Universal Windows Platforms. + +#### 1.2. OpenIGTLink +We decided to build the original [OpenIGTLink protocol in C++](https://github.com/openigtlink/OpenIGTLink/blob/master/Documents/Protocol/index.md) for an x64 architecture and then create a wrapper in C# to read these libraries in Unity. This has the potential to work, although it requires quite a lot of hard work and could not be finished this week. We will keep on exploring this possibility during the next months. + +#### 1.3. Python scripting +It is possible to implement [python scripts in Unity](https://docs.unity3d.com/Packages/com.unity.scripting.python@6.0/manual/index.html). Theoretically, since there is a [python version for the OpenIGTLink protocol](https://github.com/lassoan/pyigtl), it should be possible to feed Unity with the OpenIGTLink Python library to seamlessly exchange all types of messages. Nevertheless, due to time constraints, we could not test this approach during this week. Maybe on the next one... + +### 2. Microsoft HoloLens 2 +The next alternative was to actually build the application on ARM64 architecture for HoloLens 2 using the [UWPOpenIGTLink](https://github.com/IGSIO/UWPOpenIGTLink). In this case, the Winmd can be read because HL2 is a Universal Windows Platform, so no wrapper is needed. Therefore, this option should be suitable too, and we might also work on it during the next months. + +### 3. 3D Slicer +Finally, now [OpenXR is finally available in 3D Slicer](https://github.com/KitwareMedical/SlicerVirtualReality), we also explored this path to stream information without depending from Unity. +This worked and seamlessly displays the 3D view in 3D Slicer directly into HoloLens. Still, some factors should be improved: + - Registration between 3D Slicer and HoloLens 2 is not well managed, and models are usually rendered too far away from the user. Our current approach is to perform fiducial based registration to bring models closer. + - Models can be grabbed and dragged in the scene to see them from different perspectives. However, only near interaction is available (no far interaction, voice commands...) --yet! + - Models cannot be manipulated independently and they move as a block. To prevent this, so far, we simply "toggled selectable" those we didn't want to interact with. + - So far, no UI is visible from HL2 so the user cannot press buttons or interact with 3D Slicer interface from the glasses. + - None of this is officially available in any 3D Slicer release, but everything is working on our own built. 3D Slicer should be upgraded soon with these features once some issues are fixed, though. + +Please, check the results of our implementation in the following video: + +OpenXR in 3D Slicer: + + + +# Background and References + +All source code for the [AR in Slicer](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/ARinSlicer/) project is contained in [this GitHub repository](https://github.com/BSEL-UC3M/HoloLens2and3DSlicer-PedicleScrewPlacementPlanning.git) + +- [OpenIGTLink protocol](https://github.com/openigtlink/OpenIGTLink/blob/master/Documents/Protocol/index.md) +- [OpenIGTLink python](https://github.com/lassoan/pyigtl) +- [UWPOpenIGTLink](https://github.com/IGSIO/UWPOpenIGTLink) +- Since I had no prior knowledge of building applications, I humbly created a Word document with all the steps we followed for each build. I hope you find it useful! [CreateOpenIGTLinkWrapperForUnity.docx](https://github.com/NA-MIC/ProjectWeek/files/14142020/CreateOpenIGTLinkWrapperForUnity.docx) + + +# Acknowledgements +Special thanks to JC for his assistance during the whole event. + +Research supported by projects PI122/00601 and AC20/00102 (Ministerio de Ciencia, Innovación y Universidades, Instituto de Salud Carlos III, Asociación Española Contra el Cáncer and European Regional Development Fund “Una manera de hacer Europa”), project PerPlanRT (under the frame of ERA PerMed), TED2021-129392B-I00 and TED2021-132200B-I00 (MCIN/AEI/10.13039/501100011033 and European Union “NextGenerationEU”/PRTR). +![Acknowledgments](https://github.com/NA-MIC/ProjectWeek/assets/66890913/5fd4c05f-0028-4a00-a364-69926fda79eb) diff --git a/PW40_2024_GranCanaria/Projects/AugmentedRealityExperimentsForCardiologyImaging/README.md b/PW40_2024_GranCanaria/Projects/AugmentedRealityExperimentsForCardiologyImaging/README.md new file mode 100644 index 000000000..777afa7b7 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/AugmentedRealityExperimentsForCardiologyImaging/README.md @@ -0,0 +1,88 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Augmented reality experiments for cardiology imaging +category: VR/AR and Rendering +presenter_location: In-person + +key_investigators: + +- name: Steve Pieper + affiliation: Isomics + country: Inc., USA + +- name: Vitaliy Petrov + affiliation: Lviv Regional Clinical Hospital + country: Ukraine + +--- + +# Project Description + + + +Explore tools for clinician review and patient education using smartphone browser-based augmented reality. +There are two possible scenarios we could support: + +* Using existing SlicerHeart and SlicerVirtualReality extensions to support smartphone tracking +* Exporting Slicer scene data to a web page for rendering locally in smartphone browser + +## Objective + + + +1. Determine how much work would be required to make a system that could be tested in clinical scenarios +2. See what other people's experience has been in the feasibility and/or utility of such systems + +A few of the features we'd like to explore: +* Using the a smartphone as a controller in SlicerVirtualReality +* Communicating events from smartphone to Slicer to control rendering +* Sending rendered images to phone vs. rendering in phone locally +* Exporting data to a stand-alone scene that could be viewed on a smartphone (e.g. by emailing a link or generating a QR code) + +## Approach and Plan + + + +1. Review existing technologies for applicability for this use case +2. Talk with Project Week attendees about similar needs and experiences +3. If time, do some prototyping + +## Progress and Next Steps + + + +1. Resurrected WebServer tracker experiment +2. Updated https support with custom certificate that can be uploaded to android phone +3. Tried demo controlling volume rendering with phone +4. Showed demo to other prooject week attendees using another another android phone +5. Created a [pull request](https://github.com/Slicer/Slicer/pull/7568) to add demo to Slicer +6. Discussed plans to make the demo into a more useful tool + - Allow user to set relative position of themselves relative to the rendered scene + - Use touch screen to control rendering modes and other elements in the Slicer scene + - Transfer rendered images from Slicer to phone screen + - Set up QR code so phone can easily access Slicer WebServer + - Use websocket to minimize latency + - Perform further testing, possibly as part of AR/VR developer courses + - Define clinical scenarios and set up testing environment + +WIP in this YouTube shorts link: [https://youtube.com/shorts/JeNtDT1LF6k?feature=share](https://youtube.com/shorts/JeNtDT1LF6k?feature=share) + +# Illustrations + + + + +# Background and References + + + +* Phone based tracking communicating with Slicer from earlier Project Week: +* Use of Slicer's WebServer to host tracker data: +* vtk.js based volume rendering demo compatible with Android smartphone AR tracking: +* Background on WebXR and device compatibility: https://immersive-web.github.io/webxr/explainer.html diff --git a/PW40_2024_GranCanaria/Projects/AutomatedRegistrationOfConeBeanComputedTomographyScansMaintenance/README.md b/PW40_2024_GranCanaria/Projects/AutomatedRegistrationOfConeBeanComputedTomographyScansMaintenance/README.md new file mode 100644 index 000000000..a06296c60 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/AutomatedRegistrationOfConeBeanComputedTomographyScansMaintenance/README.md @@ -0,0 +1,86 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Automated Registration of Cone-Bean Computed Tomography scans - maintenance +category: Other +presenter_location: Online + +key_investigators: + +- name: Jeanne Claret + affiliation: University of Michigan + country: USA + +- name: Gaëlle Leroux + affiliation: UoM + country: USA + +- name: Eduardo Duarte Caleme + affiliation: UoM + country: USA + +- name: Claudia Mattos + affiliation: UoM + country: USA + +- name: Lucia Cevidanes + affiliation: UoM + country: USA + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + +The Automated Registration tool, AREG, was first presented at the NA-MIC project week #39. +It aims to reduce the sources of error in the 3D image processing workflow by automating the orientation and registration of 3D Cone-Beam Computed Tomography. These methods combine classical algorithmic approaches and AI-based models trained and tested on de-identified CBCT volumetric images. + +The registration method is based on an automatic tool, AMASS, available in the extension SlicerAutomatedDentalTool, to perform a segmentation of the different regions of reference used for the regional voxel-based registration + +The different methods for automatic orientation and registration of 3D CBCT scans rely on a combination of algorithmic and deep-learning techniques to perform both the orientation and the registration automatically. It also uses work that our group of researchers has already developed. Our Python-based algorithm and requires multiple libraries for the different image-processing tasks accomplished throughout the proposed method: SimpleITK, VTK, SimpleElastix. To implement these tools, we also used the Medical Open Network for Artificial Intelligence (MONAI) library, which is a PyTorch-based framework for medical image analysis. MONAI offers several advantages for our work, such as high performance, modularity, and interoperability with other libraries. + +## Objective + + + +1. Maintain the code to make it work properly on the new version of Slicer + +## Approach and Plan + + + +1. Find the issue by testing +2. Correct the problem + +## Progress and Next Steps + + + +1. The module AREG is working inly with itk-elastix==0.17.1 +2. In the last release of SlicerAutomatedDentalTools, users are asked if they agree to change the libraries versions of their Slicer environment. + +# Illustrations + + +![Screenshot from 2024-02-02 08-49-19](https://github.com/NA-MIC/ProjectWeek/assets/91120559/cca61e1d-e380-4acf-b904-cd9a78be8080) + +![Workflow](https://github.com/lucanchling/ProjectWeek/assets/72148963/a6617e85-df6e-426f-ab4a-eef322453e7e) + +![MaskComparison](https://github.com/lucanchling/ProjectWeek/assets/72148963/7312a43f-8b00-4513-bf75-0cf1a363b310) + +![AREGCBCTExample](https://github.com/lucanchling/ProjectWeek/assets/72148963/66574b8d-a9b0-465a-a5ef-4206bb2d84dd) + +# Background and References + + + +* [AMASS and AREG](https://github.com/DCBIA-OrthoLab/SlicerAutomatedDentalTools) diff --git a/PW40_2024_GranCanaria/Projects/CbctToothSegmentation/README.md b/PW40_2024_GranCanaria/Projects/CbctToothSegmentation/README.md new file mode 100644 index 000000000..92c50fcc2 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/CbctToothSegmentation/README.md @@ -0,0 +1,61 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: CBCT Tooth Segmentation +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Sadhana Ravikumar + affiliation: Kitware + country: USA + +--- + +# Project Description + + + +We are working on a Slicer extension for automated segmentation of individual teeth in cone-beam CT dental scans using a deep-learning based approach. We are focusing this week on getting it into the extension manager. + +## Objective + + + +1. Submit the extension to the extension index. +2. Get extension approved in the next few weeks. + +## Approach and Plan + + + +1. Clean up remaining bugs with the code. +2. Address issues on the extension submission checklist + +## Progress and Next Steps + + + +1. Readme, tutorials and screenshots added. +2. Opened [pull request](https://github.com/Slicer/ExtensionsIndex/pull/2000) + +# Illustrations + + + +![Screenshot01](https://github.com/NA-MIC/ProjectWeek/assets/25040869/2ad567b6-04e8-4f3e-8faa-6c0019cef5ac) + +# Background and References + + + +[GitHub Repository](https://github.com/KitwareMedical/SlicerCBCTToothSegmentation) diff --git a/PW40_2024_GranCanaria/Projects/CloudAppStreaming/README.md b/PW40_2024_GranCanaria/Projects/CloudAppStreaming/README.md new file mode 100644 index 000000000..585012e6e --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/CloudAppStreaming/README.md @@ -0,0 +1,73 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Cloud-based app streaming for Slicer +category: Cloud / Web +presenter_location: In-person + +key_investigators: +- name: Csaba Pinter + affiliation: EBATINCA S.L. + country: Spain + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Juan Ruiz Alzola + affiliation: ULPGC + country: Spain + +- name: Rafael Nebot Medina + affiliation: Instituto Tecnológico de Canarias, SA + country: Spain + +- name: Davide Punzo + affiliation: Freelancer + country: France +--- + +# Project Description + + + +We plan to create a service that simplifies using AWS for cloud-based app streaming of Slicer (with or without extensions, or a Slicer custom app), effectively resulting in a single-click launch of the desired Slicer distribution in a new tab in the web browser. +In this project we are looking to gather requirements, expectations, limitations, or any kind of information we should take into account while planning and then developing the service. + +## Objective + + + +1. Objective A. Talk to as many interested people as possible about where and how they would use it, and what expectations they have about such service + +## Approach and Plan + + + +1. Small breakout session about Slicer in web +2. Small breakout session about SlicerHub (Rafael Nebot) +3. Demo of ImagineHive (Davide) + +## Progress and Next Steps + + + +1. Explored several existing use cases in the breakout sessions +2. Learned a lot about possible approches and problems + +# Illustrations + + + +# Background and References + + + +* Previous related project: https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerHub/ diff --git a/PW40_2024_GranCanaria/Projects/CommonSplineSurfacesInfrastructureForSurgicalPlanningInSlicerHeartAndSlicerLiver/README.md b/PW40_2024_GranCanaria/Projects/CommonSplineSurfacesInfrastructureForSurgicalPlanningInSlicerHeartAndSlicerLiver/README.md new file mode 100644 index 000000000..87671ed20 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/CommonSplineSurfacesInfrastructureForSurgicalPlanningInSlicerHeartAndSlicerLiver/README.md @@ -0,0 +1,87 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Common spline surfaces infrastructure for surgical planning in Slicer-Heart and Slicer-Liver +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Gabriella D'Albenzio + affiliation: Oslo University Hospital + country: Norway + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +- name: Csaba Pinter + affiliation: Ebatinca + country: Spain + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + +The primary goal is to unify the spline surface infrastructures of Slicer-Liver and Slicer-Heart, focusing on the integration of Bezier tensor product surfaces and NURBS tensor product surfaces. This integration aims to create a versatile and robust tensor-product surface filter capable of generating a variety of surface families. + +## Objective + + + +1. **Development of a Generic Tensor-Product Surface Filter:**\ + Explore the feasibility of creating a filter that can handle both Bezier and NURBS surfaces, along with other potential surface types. + +2. **Establishment of a Class Hierarchy:**\ + Discuss the design of a class hierarchy that effectively supports different spline surface families. This includes determining the commonalities and differences between the current implementations in Slicer-Heart and Slicer-Liver. + +3. **Creation of a Common Extension Framework:**\ + Aim to develop a common extension framework that can be readily adapted for future extensions, ensuring scalability and flexibility. + +4. **Integration with vtkAddon/VTK:**\ + Consider how this new infrastructure could potentially be incorporated into the vtkAddon or VTK libraries, enhancing the broader community's access to these tools. + +## Approach and Plan + + + +* **Research and Analysis:**\ + Conduct an analysis of the existing spline surface implementations in Slicer-Heart and Slicer-Liver. Identify the key features, limitations, and potential areas of synergy. + +* **Design and Development:**\ + Develop a detailed plan for the implementation, including the architecture of the generic tensor-product surface filter and an eventual class hierarchy. + +* **Plan for further development:**\ + As a common spline infrastructure could be useful in contexts wider than IGT, plan for further integration of this infrastructure in vtkAddon / VTK. + +## Progress and Next Steps + + + +TBD + +# Illustrations + + + +![heart-NURBS](https://github.com/NA-MIC/ProjectWeek/assets/1978682/756b9f13-d359-4f58-a556-3eca7dc813c3) + +![liver_bezier](https://github.com/NA-MIC/ProjectWeek/assets/1978682/ade0e3cc-b44c-465e-ad6b-6ac2d2c1f78b) + +# Background and References + + + +1. [Slicer-Liver](https://github.com/ALive-research/Slicer-Liver) +2. [Slicer-Heart](https://github.com/SlicerHeart/SlicerHeart) +3. diff --git a/PW40_2024_GranCanaria/Projects/ContourRepresentationFromLabelmapsPolysegInCornerstone3D/README.md b/PW40_2024_GranCanaria/Projects/ContourRepresentationFromLabelmapsPolysegInCornerstone3D/README.md new file mode 100644 index 000000000..4920c74a8 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ContourRepresentationFromLabelmapsPolysegInCornerstone3D/README.md @@ -0,0 +1,68 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Contour Representation from Labelmaps - PolySEG in Cornerstone3D +category: Cloud / Web +presenter_location: Online + +key_investigators: + +- name: Alireza Sedghi + affiliation: OHIF + country: Accolade Imaging, Canada +- name: Mo Alasad + affiliation: XNAT, ICR + country: UK + +--- + +# Project Description + +We've been working hard on implementing polySEG in cornerstone3D, and currently we have implemented the following converters in [a PR under review in Cornerstone3D](https://github.com/cornerstonejs/cornerstone3D/pull/844): + +However, we still have two converters remaining that seem to be more complex: surface to contour and labelmap to contour. We're excited to tackle these challenges and continue moving forward. This project aims to work on this. + +## Objective + +1. Finish the surface cutting in each slice in the viewport and provide proper API around it for precaching inside a webworker +2. Try to implement two versions using our SVG rendering framework, and try to handle contour holes and islands + +## Approach and Plan + +1. Convert the representations to Surface and cut through them to render in viewports +2. For editing, get the intersections and travers to find a closed loop polygon +3. Try to identify holes in order to render them as holes + +## Progress and Next Steps + + + +1. I successfully completed the first task by using vtkClipClosedSurface to cut through the surface and render it as polyData within the viewport. +2. Additionally, I implemented pre-caching of all slices within a web worker. This ensures that all cuts are calculated in advance, eliminating the need to wait for the user to scroll through each slice. + + +**Next steps** +- include to re-cache upong the orientation change, since we need to cancel the previous job on the worker and start a new one +- Also We need to also add another representation to edit the contours via our contour SVG editing tools, I have got the code from Forrest Li (Kitware) for the vtkContourLoopExtraction and [created a PR to vtk.js here](https://github.com/Kitware/vtk-js/pull/3003) + +# Illustrations + + + + + + + + +*No response* + +# Background and References + +https://github.com/PerkLab/PolySeg +https://bitbucket.org/icrimaginginformatics/polyseg-wasm/src/master/ diff --git a/PW40_2024_GranCanaria/Projects/CurrentStateOfDicomwebForPathology/README.md b/PW40_2024_GranCanaria/Projects/CurrentStateOfDicomwebForPathology/README.md new file mode 100644 index 000000000..c983cb988 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/CurrentStateOfDicomwebForPathology/README.md @@ -0,0 +1,72 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Current state of DICOMweb for pathology +category: DICOM +presenter_location: In-person + +key_investigators: + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Chris Bridge + affiliation: MGH + country: USA + +- name: David Clunie + affiliation: PixelMed + country: USA + +- name: André Homeyer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +--- + +# Project Description + + + +DICOMweb™ defines a set of RESTful services for web-based medical imaging with DICOM. This project aims at getting a detailed understanding of the capabilities of available (Python) libraries and tools that implement DICOMweb and how they facilitate working with pathology whole-slide images (WSI) via DICOMweb services. The summarized knowledge will be the basis for discussions and help to decide what further work makes sense (e.g. new library, contributing specific functionality to an existing library etc.). + +## Objective + + + +1. Objective A: Have a document (document A) describing capabilities of available tools. +2. Objective B: Have a document (document B) summarizing discussions and plans for further work. + +## Approach and Plan + + + +1. Get practical experience with the libraries: wsidicom, dicomslide (both underlyingly using dicomweb_client). +2. Set-up document A +3. Set-up times to discuss during project week +4. Summarize discussion in document B + +## Progress and Next Steps + +1. Prepared small Jupyter notebook [here](https://colab.research.google.com/drive/1WxOVtLOGwt7xSOy7SghbOWxcDzG_XAD4?usp=sharing) on how to get started with wsidicom vs. dicomslide to access data via DICOMweb +2. Summarized capabilities of both libraries beyond DICOMweb in a Google Doc [here](https://docs.google.com/document/d/1qWjzwneL4em7fQYdCfaP6RG6AtmAz23o5ZARfCo1Evs/edit?usp=sharing). + +Both will be updated/extended after the project week as relevant work and discussions goes on. + +# Illustrations + +![Conceptual overview of DICOMweb](./dicomweb.png) \ +*Conceptual overview of DICOMweb. Taken from: https://www.dicomstandard.org/using/dicomweb/capabilities.* + +# Background and References + +- Repository [dicomslide](https://github.com/ImagingDataCommons/dicomslide) +- Repository [wsidicom](https://github.com/imi-bigpicture/wsidicom) +- Repository [dicomweb-client](https://github.com/ImagingDataCommons/dicomweb-client) diff --git a/PW40_2024_GranCanaria/Projects/CurrentStateOfDicomwebForPathology/dicomweb.png b/PW40_2024_GranCanaria/Projects/CurrentStateOfDicomwebForPathology/dicomweb.png new file mode 100644 index 000000000..3e96ac352 Binary files /dev/null and b/PW40_2024_GranCanaria/Projects/CurrentStateOfDicomwebForPathology/dicomweb.png differ diff --git a/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/MITK_local.png b/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/MITK_local.png new file mode 100644 index 000000000..cc8bf5477 Binary files /dev/null and b/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/MITK_local.png differ diff --git a/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/README.md b/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/README.md new file mode 100644 index 000000000..c482a8786 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/README.md @@ -0,0 +1,80 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 'Custom ChatGPT for MITK ' +category: Other +presenter_location: Online + +key_investigators: + +- name: Ashis Ravindran + affiliation: DKFZ + country: Germany + +- name: Stephen Schaumann + affiliation: DKFZ + country: Germany + +--- + +# Project Description + + + +Development of a custom chat-based helper tailored specifically for the users & developers of the Medical Imaging Interaction Toolkit (MITK), utilizing recent LLM advancements. The objective is to create a conversational AI interface that assists MITK users in navigating the software, troubleshooting issues esp. for developers, and obtaining relevant information. Leveraging the advances in the field of AI, the custom assistant (e.g. MITK GPT) can provide personalized assistance adapting to individual preferences and workflows. +One of the easiest ways to materialize this proposal to use the OpenAI's custom GPT feature and expand on it. + + +Even though the proposal is currently pitched for MITK, it could benefit other tools eg. Slicer3D, as well. + +## Objective + + + +Custom versions of ChatGPT for the users & developers of the Medical Imaging Interaction Toolkit (MITK) + +## Approach and Plan + + + +* One of the easiest ways to materialize this proposal is to use the OpenAI's custom GPT feature and expand on it. + +* Build deployable solutions outside ChatGPT for retrieval-augmented generation (RAG). +* Explore feasibility of chat LLM solutions out there for MITK users & dev. + +## Progress and Next Steps + + +1. We created a CustomGPT inside ChatGPT for MITK users. Introducing: *MITK Buddy* ! + * URL: https://chat.openai.com/g/g-E36xLFyf5-mitk-buddy + * Only available for ChatGPT Plus users and, upper limit on data. + * -> Promising results (see image below), showing feasibility of the basic idea. However, this solution is not suitable for access within the application. +2. We explored building our own (semi-)local solutions for retrieval-augmented generation. + * Developed prototype web app based on Streamlit & Langchain (Python) libraries. + * Code available here for adoption: https://github.com/ASHISRAVINDRAN/custom-chatbot-app + * -> Does not reach the same level of quality as Custom GPT, but is completely under own control (regarding how it runs, but also what data is shared) +3. Explored other online LLM solutions out there which works nicely with MITK. + * ChatGPT knowledge is only until Jan 2022. + * Phind.com seems updated & catered more towards helping (MITK) developers. + * -> There are many tools out there already for developers. There is less need to incorporate custom information that is not already there. + It seems unfeasible to achieve better results than e.g. Phind.com , so our focus laid more on getting a custom solution for our users + +# Illustrations + + +1. MITK Buddy + ![MITK Buddy](./mitk_buddy.png) + +2. Locally deployable web-app solution based on OpenAI GPT4. Can be locked out of any external knowledge. + ![MITK OpenAI](./MITK_local.png) + + +# Background and References + + + +* Langchain: https://python.langchain.com/docs/get_started/introduction diff --git a/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/mitk_buddy.png b/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/mitk_buddy.png new file mode 100644 index 000000000..71ba67392 Binary files /dev/null and b/PW40_2024_GranCanaria/Projects/CustomChatgptForMitk/mitk_buddy.png differ diff --git a/PW40_2024_GranCanaria/Projects/DICOMLabelmaps/README.md b/PW40_2024_GranCanaria/Projects/DICOMLabelmaps/README.md new file mode 100644 index 000000000..1a7117e6e --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/DICOMLabelmaps/README.md @@ -0,0 +1,113 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Implement new DICOM Label Map Segmentation Supplement +category: DICOM +presenter_location: In-person + +key_investigators: +- name: Michael Onken + affiliation: Open Connections GmbH + country: Germany + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: David Clunie + affiliation: PixelMed (IDC) + country: USA + +- name: Joël Spaltenstein + affiliation: Agora Care SA + country: Switzerland + +--- + +# Project Description + +DICOM has a new Supplement underway (Supp 243: Label Map Segmentation) which is currently +in status "Public Comment". DICOM already has support for Segmentations, mainly through +the Segmentation Storage SOP Class. While it is very efficient for storing densely packed +overlapping segmentations, typical medical segmentations are non-overlapping and each +segment not covering large portions of the pixel space and the Segmentation Storage SOP +Class wastes lots of space compared to the ITK-enabled formats like NRRD when storing these. + +Slicer uses the dcmqi library to convert between DICOM Segmentations and ITK formats, and +dcmqi itself relies on the dcmseg library from DCMTK. + +In the PW, we want to build DICOM Label Map support into DCMTK, dcmqi and +finally Slicer. + +## Objective + + +1. Add Label Map support to DCMTK, dcmqi and Slicer. +2. Identify issues with the current Supplement text and provide comments to DICOM Committee. + + +## Approach and Plan + + + +1. (Optional: Move OverlapUtil code from dcmqi to upstream DCMTK). During the last months +we added support for overlapping segment detection to dcmqi which enhances export to +DICOM Segmentations in their current version. This code should go to DCMTK which has jus +been released in version 3.6.8 and is open again for new features. +2. Add DICOM Label Map support to DCMTK's dcmseg API. +3. The code will not go upstream (in DCMTK) unless the Supplement has been progressed to Final Text. +It could make sense to move the code to dcmqi classes first and mark it as experimental, also in the API. Or, +keep it in a separate branch and wait for the merge until the Supplement is finalized. +4. Add DICOM Label Map support to dcmqi, using the updated dcmseg API. +5. Add DICOM Label Map support to Slicer, to make use of the new dcmqi segmentation converter version. + +Let's see how far we can get in one week, but the goal is to have at least a first working +Label map implementation for DCMTK and then start with dcmqi support with Slicer to follow. + +## Progress and Next Steps + + + +1. DCMTK: Updated (templated) pixel data structures to also accept 16 bit +1. DCMTK: Updated all dependent classes (also some outside the segmentation code) to accept that as well +3. DCMTK: Prepare code for labelmap support + 1. Changed bookkeeping and access mechanism for segments (allow sparse numberings, allow 0) + 2. Mitigate checks where necessary + 3. Implemented missing attribute "Spatial Locations Preserved" +4. DCMTK: Successfully tested roundtrip for MONOCHROME2 Labelmaps produced by highdicom +5. dcmqi: + 1. Modified to handle the concept of multiple segmentations per frame. + 2. Added search in each frame to find what segments are on the frame for `LABELMAP`s. + 3. Modified to become tolerant of non-monotonically increasing `SegmentNumber`s and accept a + segment with `SegmentNumber` of `0`. + 5. Adapted to updated DCMTK pixel data and segment access API + +Next steps: +1. DCMTK: + 1. Test support for 16 bit segmentation pixel data (added but not tested yet) + 2. Add support for PALETTE COLOR model (palette is not imported / exported yet) + 3. Add unit tests for all color model / bit depth combinations +2. dcmqi: + 1. Actualy link it against the new DCMTK run it and test it... + 2. Performance can propably be improved significanly by not making a bunch of function + calls within tight loops. + 4. Set up tests with LabelMap segmentation objects. + +# Illustrations + + + +# Background and References + +- DICOM Supplement 243 "Label Map Segmentation": [PDF Download](https://dicom.nema.org/medical/dicom/Supps/Drafts/sup243_02_LabelMapSeg.pdf) +- DCMTK: [Homepage](https://www.dcmtk.org) and [GitHub](https://github.com/DCMTK/dcmtk/) + - DCMTK version with labelmap enhancements: [Michael's GitHub](https://github.com/michaelonken/dcmtk/tree/Labelmap) +- dcmqi: [Guide](https://qiicr.gitbook.io/dcmqi-guide/) and [GitHub](https://github.com/QIICR/dcmqi/) + - dcmqi with labelmap enhancements: [WIP PR on GitHub](https://github.com/QIICR/dcmqi/pull/491) +- Slicer: [Homepage](https://www.slicer.org/) and [GitHub](https://github.com/Slicer/Slicer) diff --git a/PW40_2024_GranCanaria/Projects/DevelopmentRefinementOfTheIdcIndexPythonInterfaceToImagingDataCommons/README.md b/PW40_2024_GranCanaria/Projects/DevelopmentRefinementOfTheIdcIndexPythonInterfaceToImagingDataCommons/README.md new file mode 100644 index 000000000..fb917f25c --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/DevelopmentRefinementOfTheIdcIndexPythonInterfaceToImagingDataCommons/README.md @@ -0,0 +1,135 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Development/refinement of the idc-index python interface to Imaging Data Commons +category: Infrastructure +presenter_location: Remote + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Vamsi Thiriveedhi + affiliation: BWH + country: USA + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Leonard Nürnberg + affiliation: Maastricht University + country: Netherlands + +- name: Steve Pieper + affiliation: Isomics Inc + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware Inc + country: USA + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Chris Bridge + affiliation: MGH + country: USA + +--- + +# Project Description + + + +[`idc-index`](https://github.com/ImagingDataCommons/idc-index) is a lightweight python package that wraps mini-index of the data available in Imaging Data Commons and the s5cmd download tool. With this package, one can search basic attributes of IDC data, build subset and download corresponding files without login, and without setting up any prerequisites specific to either Google or AWS as easy as below: + +```bash +$ pip install 'idc-index==0.2.11' +``` + +```python +from idc_index import index + +client = index.IDCClient() +client.download_from_selection(collection_id="nsclc_radiomics", downloadDir="./my_copy") +``` + +Its basic functionality is demonstrated in this tutorial: . + +[SlicerIDCBrowser](https://github.com/ImagingDataCommons/SlicerIDCBrowser) already relies on `idc-index` for searching and downloading data from IDC. + +## Objective + + + +1. Raise awareness about +2. Improve functionality +3. Collect feedback to prioritize future developments + +## Approach and Plan + + + +1. collect feedback about what functionality would be useful to add or how to refine the API +2. discuss capabilities that would be needed to support digital pathology use cases +3. refine organization of the underlying index and exposed metadata attributes +4. finish setting up GitHub actions to simplify updates and python package publishing +5. documentation +6. discuss with python packaging experts what is the recommended practice handling attachments/binary dependencies for a python package (ie, see [https://github.com/ImagingDataCommons/idc-index/issues/3](https://github.com/ImagingDataCommons/idc-index/issues/3) and [https://github.com/ImagingDataCommons/idc-index/issues/27](https://github.com/ImagingDataCommons/idc-index/issues/27)) + +## Progress and Next Steps + + + +1. Refinement and testing to fix regressions in 0.2.9 +2. Discussed with Leo +3. Discussion with @pieper re utility. Feedback: "Speaking for myself, this exercise made wish we had some api documentation for idc-index. Also is there a way to report progress during the download? Also some better error messages would help. I tried pasting the collection name from the portal as the collection_id and I get a pyhon error about a manifest not existing. I had to use the collection query to figure out what the mapping rule is. It would be nice if the idc-index methods could include a mapping so that either version of the collection string is accepted. Otherwise it worked well though and this is definitely a nice way to access the data!" +4. @pieper was curious if it was possible to retrieve instanace level urls from SeriesInstanceUID. @vkt1414 created a demo notebook https://colab.research.google.com/drive/1va1xHMe1pgqZqp7RpI1VxqBKBOiGD-TW?usp=sharing - added to the package as a new API endpoint +5. need to have documentation (relevant discussion https://github.com/encode/httpx/discussions/1220) +6. Added API for getting intance-level URLs and viewer URLs +7. Started working on the documentation +8. JC is contributing a PR to refactor and introduce improvements to packaging and github actions https://github.com/ImagingDataCommons/idc-index/pull/32 +9. Discussed how to improve API with Leo and Steve; need to document specific usage scenarios of what the users would like to achieve, and use those to drive revisions of the API +10. Discussed the scope of support of slide microscopy metadata queries - need to investigate how to best represent those, since these are instance-level attributes, while currently idc-index is series-based. +``` +ContainerID, +PixelSpacing, +Rows, +Columns, +TotalPixelMatrixRows, +TotalPixelMatrixColumns, +ImageType, +TransferSyntaxUID, +SpecimenDescriptionSequence> +PrimaryAnnotationStructureSequence(PASS)>Code scheme, value.. +SpecimenUID, +and several others under PASS, +SpecimenPrepStepContentItemSequence>Coding terms, +OpticalPathSequence, +IlluminationTypeCodeSequence, +IlluminationColorCodeSequence, +Wavelength, +PyramidUID, +PyramidLabel +``` + +# Illustrations + + + +*No response* + +# Background and References + + + +* [NCI Imaging Data Commons](https://imaging.datacommons.cancer.gov) +* [`idc-index`](https://github.com/ImagingDataCommons/idc-index) diff --git a/PW40_2024_GranCanaria/Projects/DicomSeriesClassificationAndVisualizationOfParameters/README.md b/PW40_2024_GranCanaria/Projects/DicomSeriesClassificationAndVisualizationOfParameters/README.md new file mode 100644 index 000000000..ac8a6b945 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/DicomSeriesClassificationAndVisualizationOfParameters/README.md @@ -0,0 +1,116 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: DICOM series classification and visualization of parameters +category: DICOM +presenter_location: In-person + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Bálint Kovács + affiliation: DKFZ + country: Germany + +- name: Stefan Denner + affiliation: DKFZ + country: Germany + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +- name: David Clunie + affiliation: PixelMed (IDC) + country: USA + +--- + +# Project Description + + + +To use and develop AI methods, significant data curation is required. In some cases like prostate cancer segmentation, clinicians often use multiple MRI sequences for diagnosis such as T2, diffusion-weighted series, and derived maps. + +Unfortunately, the information describing the sequences is often missing or incorrect, as it's prone to errors from technicians. The proper sequence could be analyzed visually, but this is cumbersome if thousands of scans need to be analyzed. Therefore, automatic methods for determining the right series are of interest. + +We propose methods to aid in the curation of DICOM data, as well as aids to help in vizualization of DICOM parameters. + +## Objective + + + +We would like to develop approaches for aiding in the curation of data. The first will be the development of visualization tools to understand DICOM parameters used in scanning. Secondly, we will develop AI methods for classifying MRI scans, with a focus on prostate cancer. + +## Approach and Plan + + + +1. Use packages such as [hiplot ](https://ai.meta.com/blog/hiplot-high-dimensional-interactive-plots-made-easy/) to visualize DICOM scanning parameters across different collections and modalities in IDC. +3. Develop approaches for data curation using AI - e.g. determine the scan sequence, or if endorectal coil is present, etc. + +## Progress and Next Steps + + + +1. Started [repo here](https://github.com/deepakri201/DICOMTagViz/) for initial [hiplot](https://github.com/facebookresearch/hiplot) exploration of DICOM tags of T2 weighted axial series of prostate imaging collections from IDC +2. Had some very helpful discussions with David, Maria and Chris about understanding of parameters and previous work done in this area +3. Created similar interactive plots for DWI and ADC across different prostate collections +4. Developed a hierarchical approach for classification of prostate scans, starting with ProstateX collection -- for T2 axial, DWI, ADC, and DCE classification. +5. We'll later try this out on other prostate collections. + +# Illustrations + + + +Hiplot visualization of T2 weighted axial parameters from 5 different prostate cancer imaging collections in IDC + +![](https://github.com/NA-MIC/ProjectWeek/assets/59979551/420c4733-c27e-4ef8-87a9-ebc35bb8e224) + +Same hiplot but with rendering in the browser! + +![](https://github.com/NA-MIC/ProjectWeek/assets/59979551/043542a8-99fb-42ad-8724-dc94588027c3) + +**** Workflow **** + +![PRODICOM](https://github.com/NA-MIC/ProjectWeek/assets/59979551/970116c8-faa0-4945-8d77-55f61f7ef042) + +# Background and References + +[GitHub repo](https://github.com/deepakri201/DICOMTagViz/) + +Some earlier work with parallel coordinates plots in Slicer: +* https://github.com/pieper/SlicerMultiMapper +* https://www.youtube.com/watch?v=Y4MyThyeIPs + +Some earlier work on sequence classification: +1. Ranjbar S, Singleton KW, Jackson PR, Rickertsen CR, Whitmire SA, Clark-Swanson KR, et al. A Deep Convolutional Neural Network for Annotation of Magnetic Resonance Imaging Sequence Type. J Digit Imaging. 2020 Apr;33(2):439–46. [doi:10.1007/s10278-019-00282-4](https://dx.doi.org/10.1007/s10278-019-00282-4) +2. Noguchi T, Higa D, Asada T, Kawata Y, Machitori A, Shida Y, et al. Artificial intelligence using neural network architecture for radiology (AINNAR): classification of MR imaging sequences. Jpn J Radiol. 2018 Dec;36(12):691–7. [doi:10.1007/s11604-018-0779-3](https://dx.doi.org/10.1007/s11604-018-0779-3) +3. Cluceru J, Interian Y, Lupo JM, Bove R, Butte A, Crane J. Automatic Classification of MR Image Contrast. In: ISMRM. 2020. Available from: https://archive.ismrm.org/2020/1804.html +4. Remedios S, Roy S, Pham DL, Butman JA. Classifying magnetic resonance image modalities with convolutional neural networks. In: Mori K, Petrick N, editors. Medical Imaging 2018: Computer-Aided Diagnosis. Houston, United States: SPIE; 2018. p. 89. Available from: https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10575/2293943/Classifying-magnetic-resonance-image-modalities-with-convolutional-neural-networks/10.1117/12.2293943.full [doi:10.1117/12.2293943](https://dx.doi.org/10.1117/12.2293943) +5. Braeker N, Schmitz C, Wagner N, Stanicki BJ, Schröder C, Ehret F, et al. Classifying the Acquisition Sequence for Brain MRIs Using Neural Networks on Single Slices. Cureus. 2022 Feb;14(2):e22435. [doi:10.7759/cureus.22435](https://dx.doi.org/10.7759/cureus.22435) +6. Vieira de Mello JP, Paixão TM, Berriel R, Reyes M, Badue C, De Souza AF, et al. Deep Learning-based Type Identification of Volumetric MRI Sequences. In: 2020 25th International Conference on Pattern Recognition (ICPR). Milan, Italy: IEEE; 2021. p. 1–8. Available from: https://ieeexplore.ieee.org/document/9413120 [doi:10.1109/ICPR48806.2021.9413120](https://dx.doi.org/10.1109/ICPR48806.2021.9413120) +7. Mahmutoglu MA, Preetha CJ, Meredig H, Tonn J-C, Weller M, Wick W, et al. Deep Learning–based Identification of Brain MRI Sequences Using a Model Trained on Large Multicentric Study Cohorts. Radiology: Artificial Intelligence. 2024 Jan;6(1):e230095. [doi:10.1148/ryai.230095](https://dx.doi.org/10.1148/ryai.230095) +8. Kasmanoff N, Lee MD, Razavian N, Lui YW. Deep multi-task learning and random forest for series classification by pulse sequence type and orientation. Neuroradiology. 2023 Jan 1;65(1):77–87. [doi:10.1007/s00234-022-03023-7](https://dx.doi.org/10.1007/s00234-022-03023-7) +9. Svdvoort. DeepDicomSort. 2022. Available from: https://github.com/Svdvoort/DeepDicomSort +10. van der Voort SR, Smits M, Klein S, for the Alzheimer’s Disease Neuroimaging Initiative. DeepDicomSort: An Automatic Sorting Algorithm for Brain Magnetic Resonance Imaging Data. Neuroinform. 2021 Jan 1;19(1):159–84. [doi:10.1007/s12021-020-09475-7](https://dx.doi.org/10.1007/s12021-020-09475-7) +11. HD-SEQ-ID. www.neuroAI-HD.org; 2023. Available from: https://github.com/NeuroAI-HD/HD-SEQ-ID +12. HeuDiConv. NIPY developers; 2022. Available from: https://github.com/nipy/heudiconv +13. Gai ND. Highly Efficient and Accurate Deep Learning–Based Classification of MRI Contrast on a CPU and GPU. J Digit Imaging. 2022 Jun 1;35(3):482–95. [doi:10.1007/s10278-022-00583-1](https://dx.doi.org/10.1007/s10278-022-00583-1) +14. Cluceru J, Lupo JM, Interian Y, Bove R, Crane JC. Improving the Automatic Classification of Brain MRI Acquisition Contrast with Machine Learning. J Digit Imaging. 2023 Feb;36(1):289–305. [doi:10.1007/s10278-022-00690-z](https://dx.doi.org/10.1007/s10278-022-00690-z) +15. Mello JPV de. Jpvmello/type-identification-mri-sequences. 2023. Available from: https://github.com/Jpvmello/type-identification-mri-sequences +16. Kasmanoff N. MRI Content Detection. 2022. Available from: https://github.com/nkasmanoff/mri-content-detection +17. MRI Sequence Classification - No overlapping. Available from: https://docs.google.com/document/d/1UmE7jFfWaAxsS6wXodPRkdKeyDAcVdEWXfMKGzGiiKk/edit?usp=sharing&usp=embed_facebook +18. T1 vs T2 MRI | T1and T2 MRI image comparison. mrimaster. Available from: https://mrimaster.com/t1-vs-t2-mri/ +19. Pizarro R, Assemlal H-E, De Nigris D, Elliott C, Antel S, Arnold D, et al. Using Deep Learning Algorithms to Automatically Identify the Brain MRI Contrast: Implications for Managing Large Databases. Neuroinform. 2019;17(1):115–30. [doi:10.1007/s12021-018-9387-8](https://dx.doi.org/10.1007/s12021-018-9387-8) +20. Gauriau R, Bridge C, Chen L, Kitamura F, Tenenholtz NA, Kirsch JE, et al. Using DICOM Metadata for Radiological Image Series Categorization: a Feasibility Study on Large Clinical Brain MRI Datasets. J Digit Imaging. 2020 Jun;33(3):747–62. [doi:10.1007/s10278-019-00308-x](https://dx.doi.org/10.1007/s10278-019-00308-x) diff --git a/PW40_2024_GranCanaria/Projects/DicomStructuredReportsForWsiAndConversionToSegmentationObject/README.md b/PW40_2024_GranCanaria/Projects/DicomStructuredReportsForWsiAndConversionToSegmentationObject/README.md new file mode 100644 index 000000000..05fa3d682 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/DicomStructuredReportsForWsiAndConversionToSegmentationObject/README.md @@ -0,0 +1,117 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: DICOM Structured Reports for WSI and conversion to segmentation object +category: Cloud / Web +presenter_location: In-person + +key_investigators: + +- name: Maximilian Fischer + affiliation: DKFZ + country: Germany + +- name: Philipp Schader + affiliation: DKFZ + country: Germany + +- name: Marco Nolden + affiliation: DKFZ + country: Germany + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: David Clunie + affiliation: PixelMed + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Chris Bridge + affiliation: MGH + country: USA + +- name: Klaus Maier-Hein + affiliation: DKFZ + country: Germany + +--- + +# Project Description + + + +In this project, we want to investigate and compare several approaches to convert DICOM Structured reports into DICOM segmentation objects. We use the integrated SLIM Viewer in Kaapana to create Structured Reports on DICOM WSI files and also want to compare the QuPath viewer as additional DICOM WSI viewer in Kaapana. + +## Objective + + + +1. Objective A: Examine existing libraries to render the coordinates from the SR file as segmentation. +2. Objective B: Examine conversion methods to create DICOM annotation objects from the SR files +3. Objective C: Evaluate file formats of QuPath DICOM WSI annotations. +4. Objective D: Explore integration capabilities of QuPath as Viewer in Kaapana + +## Approach and Plan + + + +1. Familiarize with the highdicom librariy +2. Compare result with custom visulaizations +3. Evaluate conversion tools for SR files +4. Test PACS connectivity of QuPath + +## Progress and Next Steps + +1. Familiarized with the highdicom library for accessing the SR coordinates +2. Switching to a more generic way for rendering the coordinates +```ruby +for i in range(AnnotatetObjects3): + Type=FileFull.ContentSequence[13].ContentSequence[i].ContentSequence[2].ConceptCodeSequence[0].CodeMeaning + Coords3=FileFull.ContentSequence[13].ContentSequence[i].ContentSequence[3].GraphicData + x_coords3 = [int((Coords3[i]-Origin_X)/spacing_x) for i in range(0, len(Coords3), 3)] + y_coords3 = [int((Coords3[i]-Origin_Y)/spacing_y) for i in range(1, len(Coords3), 3)] + if Type=='Tissue': + tissue_list.append([x_coords3,y_coords3]) + color=(255,0,0) + else: + tumor_list.append([x_coords3,y_coords3]) + color=(0,0,255) + contours = np.array([[[abs(x), abs(y)] for x, y in zip(x_coords3, y_coords3)]], dtype=np.int32) +``` +. +```ruby +sr = hd.sr.srread("/Users/maximilianfischer/ProjectsMountDir/CMU-1/Consistent/SR/DICOM/1E447C90/E88940CE/4E17833F.dcm") +groups = sr.content.get_planar_roi_measurement_groups() +groups[0].roi +groups[0].roi.value +coords=[] +for x in range(groups[0].roi.value.shape[0]): + coords.append([groups[0].roi.value[x][0],groups[0].roi.value[x][1]]) +``` +**Much shorter code!** +5. Rendering still done with opencv, but planning to switch to rasterio. +6. Bioformats as new DICOM conversion library to be supported in Kaapana (currently mostly based on PixelMed.) + +# Illustrations + + +![pw38-dicom-wsi-conversion-and-model](./Visualization.png) + +*No response* + +# Background and References + + + +This project is the continuation from last years project weeks. +[PW 38](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/IDC_DICOM_WSI_workflow/) +[PW 39](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/HistologyAiModelsImportedIntoIdc/) +[Kaapana](https://kaapana.readthedocs.io/en/stable/) diff --git a/PW40_2024_GranCanaria/Projects/DicomStructuredReportsForWsiAndConversionToSegmentationObject/Visualization.png b/PW40_2024_GranCanaria/Projects/DicomStructuredReportsForWsiAndConversionToSegmentationObject/Visualization.png new file mode 100644 index 000000000..f079a03d6 Binary files /dev/null and b/PW40_2024_GranCanaria/Projects/DicomStructuredReportsForWsiAndConversionToSegmentationObject/Visualization.png differ diff --git a/PW40_2024_GranCanaria/Projects/EnablingPytorch3DOnWindowsAndOptimizingMinicondaForSlicerExtensions/README.md b/PW40_2024_GranCanaria/Projects/EnablingPytorch3DOnWindowsAndOptimizingMinicondaForSlicerExtensions/README.md new file mode 100644 index 000000000..29d6dafed --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/EnablingPytorch3DOnWindowsAndOptimizingMinicondaForSlicerExtensions/README.md @@ -0,0 +1,86 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Enabling PyTorch3D on Windows and Optimizing Miniconda for Slicer Extensions +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Leroux Gaelle + affiliation: University of Michigan + country: USA + +- name: Claret Jeanne + affiliation: University of Michigan + country: USA + +- name: Cevidanes Lucia + affiliation: University of Michigan + country: USA + +- name: Hutin Nathan + affiliation: CPE Lyon + country: France + +- name: Allemand David + affiliation: Kitware + country: USA + +- name: Prieto Juan Carlos + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + +This project focuses on enhancing compatibility and usability in two key areas. Firstly, we aim to enable the use of PyTorch3D on the Windows platform. By leveraging the Windows Subsystem for Linux (WSL2) and a virtual Miniconda environment, we intend to bypass the traditional limitations and provide Windows users with full access to PyTorch3D's capabilities. Secondly, the project seeks to improve the integration of Miniconda with Slicer extensions. Our goal is to simplify the process of creating and managing virtual environments for Slicer extensions, thereby making the procedure more intuitive. This will not only ease the use of various analytical tools and libraries but also streamline the setup process within WSL, especially for tools incompatible with Windows. This approach aims to bridge the gap in functionality and user experience across different platforms. + +## Objective + + + +Our project aims to achieve two primary objectives: + +Operationalizing PyTorch3D on Windows: The first goal is to make PyTorch3D, typically unsupported on the Windows platform, fully functional. We plan to employ the Windows Subsystem for Linux (WSL2) combined with a virtual Miniconda environment to overcome this limitation. This strategy is designed to provide Windows users with complete access to the extensive functionalities of PyTorch3D. + +Improving Miniconda Integration for Slicer Extensions: Our second objective is to enhance the use of Miniconda as a virtual environment manager specifically for Slicer extensions. We aim to streamline the process of creating and managing new virtual environments that are utilized by Slicer extensions, making the procedure more intuitive and user-friendly. This advancement will facilitate the use of specialized libraries required for a variety of analytical tools, which are currently not integrable directly into Slicer. Additionally, this approach will assist in the setup of Miniconda3 and the creation of new environments within WSL, particularly for tools that are not available on Windows. + +## Approach and Plan + + + +* Investigate PyTorch3D and Windows compatibility, pinpointing causes of incompatibility. +* Catalog PyTorch3D dependencies for WSL2 and outline requisite system configurations. +* Operationalize PyTorch3D on Windows using WSL2 and Miniconda. +* Develop an Automated Installer for WSL2 Setup on Windows. +* Create a Module to Streamline Miniconda for Slicer Extensions + +## Progress and Next Steps + + + +* Completed Initial Research on PyTorch3D and WSL2 Compatibility +* Successfully ran PyTorch3D on WSL2 +* Created an installer for WSL2 + Next Step : +* Development of a new Slicer module for managing Miniconda + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/EvaluationOfAiMethodsForProstateCancerSegmentation/README.md b/PW40_2024_GranCanaria/Projects/EvaluationOfAiMethodsForProstateCancerSegmentation/README.md new file mode 100644 index 000000000..6e200f80f --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/EvaluationOfAiMethodsForProstateCancerSegmentation/README.md @@ -0,0 +1,150 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 'Evaluation of AI methods for prostate cancer segmentation ' +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital + country: USA + +- name: Patrick Remerscheid + country: Switzerland + +- name: Cosmin Ciausu + affiliation: Brigham and Women's Hospital + country: USA + +- name: Brianna Burton + affiliation: 3D Side SA + country: Belgium + +- name: Laura Levy + affiliation: + country: Switzerland + +- name: Bálint Kovács + affiliation: DKFZ + country: Germany + +- name: Umang Pandey + affiliation: Universidad Carlos III de Madrid + country: Spain + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + + + +--- + +# Project Description + + + +When it comes to evaluating AI methods, it's important to have reproducible code and methods. We are interested in evaluating state-of-the-art AI methods for prostate cancer segmentation on data in Imaging Data Commons. Additionally, we have a non-public BWH internal dataset that we would like to use for evaluation. + +## Objective + + + +1. We first need to identify a set of publicly available AI methods that we can use for prostate cancer segmentation. +2. We then need to identify datasets in IDC that we can use for evaluation, preferably ones with expert delineated segmentations. +3. Then, we will run inference using those methods, convert our output to a standard format (hopefully DICOM SEG) and visualize in OHIF and Slicer. +4. We will make our code and results publicly available in GitHub. + +## Approach and Plan + + + +1. We will do a literature/code repo search of the methods. +2. We will search for appropriate data in IDC using the portal/BigQuery. +3. We will create a sample set of data from multiple prostate imaging collections, including T2, DWI, ADC and ground truth segmentations. +4. We will run inference using the 4 methods on these sample data sets and visualize in Slicer. +5. If possible we will perform quantitative evaluation. + +## Progress and Next Steps + + + +1. We have identified two major branches of methods we can use, baseline methods from the [PICAI challenge](https://pi-cai.grand-challenge.org/) and two methods using MONAI. + + - PICAI has two baseline methods we can run: supervised nnUNet, semi-supervised nnDetection + - MONAI has two methods we can run: a [MONAI bundle](https://github.com/kbressem/prostate158) and a [MONAI Deploy MAP](https://github.com/Project-MONAI/research-contributions/tree/main/prostate-mri-lesion-seg) + +2. We have identified two datasets in IDC that can be used for evaluation: + + - [QIN-Prostate-Repeatability ](https://portal.imaging.datacommons.cancer.gov/explore/filters/?collection_id=qin_prostate_repeatability) + - [Prostate-MRI-US-Biopsy ](https://portal.imaging.datacommons.cancer.gov/explore/filters/?collection_id=prostate_mri_us_biopsy) + +3. We have started evaluation of the PICAI supervised nnUNet baseline model and the MONAI Deploy MAP on their training data. +4. We will take [Cosmin's work from PW38](https://github.com/ImagingDataCommons/idc-prostate-mri-analysis/tree/main/pcDetectionBundle/configs) on the MONAI bundle for prostate cancer segmentation. We'll make sure it works, and evaluate it on publicly available datasets including IDC data (continuation of [this](https://github.com/ImagingDataCommons/idc-prostate-mri-analysis/blob/main/MONAI_prostate158_cancer_qin_prost_rep.ipynb) notebook). +5. We have evaluated the two PICAI models on almost all of the 3 subsets of data. +6. We've run the MONAI bundle on a subset of the 3 collections. +7. We've also run the MONAI deploy MAP on a subset of the 3 collections. + +# Illustrations + + + +*** PICAI nnUNet supervised *** + +ProstateX: The ground truth lesion is in green, and the predicted lesion in red. + +PICAI_nnUNet_ProstateX + +*** PICAI nnDetection semi-supervised *** + +ProstateX: The ground truth lesion is in green, and the predicted bounding box in white. + +PICAI_nnDet_ProstateX + +QIN-Prostate-Repeatability: The ground truth lesion is in green, and the predicted bounding box in white. + +nnunet_bounding_box + +*** MONAI Deploy MAP *** + +Using the MONAI Deploy MAP pre-trained model for prostate and lesion segmentation on a patient from Prostate-MRI-US-Biopsy. The ground truth lesion segmentation is on the left, and the predicted prostate gland segmentation and lesion segmentations are on the right. + +![](https://github.com/NA-MIC/ProjectWeek/assets/59979551/c55ff897-e1e8-485c-b17f-0bd104f95a4e) + +Scrolling through slices of same patient as above: + +![](https://github.com/NA-MIC/ProjectWeek/assets/59979551/8fc9b45d-48ce-443d-a951-0345b6f913ea) + +Patient from QIN-Prostate-Repeatability: The ground truth lesion segmentation is on the left, and the predicted prostate gland segmentation and lesion segmentations are on the right. + +MONAI_deploy_map_QIN-Prostate-Repeatability_patient_1_study_2 + +Patient from ProstateX: The ground truth lesion segmentation is on the left, and the predicted prostate gland segmentation and lesion segmentations are on the right. + +![monai_deploy_prostatex_0000](https://github.com/NA-MIC/ProjectWeek/assets/59979551/af44b9d3-74dd-4b3e-90fd-80daf8685850) + +*** MONAI bundle *** + +Using the MONAI bundle and pretrained prostate158 model, on patients from ProstateX. The grountruth lesion is in green, and the predicted in red. + +monai_bundle_prostatex1 +monai_bundle_prostatex2 +monai_bundle_prostatex3 + +# Background and References + + + +Our github repo with notebooks (WIP): https://github.com/deepakri201/prostateSeg + +This is a continuation of the work that Cosmin did at PW38: https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/MONAI_IDC_PCa_detection/ + +IDC getting started tutorials: https://github.com/ImagingDataCommons/IDC-Tutorials/tree/master/notebooks/getting_started + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/ExtendingRadiotherapyTreatmentPlanningCapabilitiesWithinSlicerrt/README.md b/PW40_2024_GranCanaria/Projects/ExtendingRadiotherapyTreatmentPlanningCapabilitiesWithinSlicerrt/README.md new file mode 100644 index 000000000..e41abaa18 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ExtendingRadiotherapyTreatmentPlanningCapabilitiesWithinSlicerrt/README.md @@ -0,0 +1,81 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Extending Radiotherapy Treatment Planning Capabilities within SlicerRT +category: Other +presenter_location: In-person + +key_investigators: + +- name: Niklas Wahl + affiliation: DKFZ + country: Germany + +- name: Csaba Pinter + affiliation: EBATINCA + country: Spain + +- name: Francesca Spadea + affiliation: Karlsruhe Institute of Technology + country: Italy + +--- + +# Project Description + + + +We will extend the treatment planning capabilities of SlicerRT by upgrading the corresponding user interface to better separate plan optimization and dose calculation. Algorithms will be interfaced from the open source treatment planning toolkit matRad via its new Python extension pyRadPlan. +The goal is to allow full treatment planning on data loaded directly in Slicer, returning planned dose cubes for further analysis in Slicer. + +## Objective + + + +1. Python connection between SlicerRT ExternalBeamPlanning & pyRadPlan (matRad's Python interface) +2. Photon & Ion Dose calculation engines available within SlicerRT ExternalBeamPlanning +3. Updated SlicerRT ExternalBeamPlanning UI to better display planning workflow +4. Rudimentary treatment plan optimization capabilities within SlicerRT + +## Approach and Plan + + + +1. Evaluate existing internal prototype for SlicerRT / matRad Python Interface +2. Interface Forward dose calculation engines from matRad for photons and ions +3. Update ExternalBeamPlanning Infrastructure to represent four-step planning process in slicerRT: Geometry Definition, Inverse Dose precomputation, Optimization, Forward dose calculation (already existing within ExternalBeamPlanning module in SlicerRT). + +## Progress and Next Steps + +### Project week progress +1. Prototype for treatment planning with matRad Python interface cleaned up in SlicerRT +2. Enable forward calculation / conformal beam-wise planning using dose calculation and optimization as a dose engine +3. Create infrastructure within SlicerRT for separating treatment planning into dose influence matrix calculation and optimization by introducing PlanOptimizers +4. Prototype for storing dose influence matrices in BeamNodes using Eigen Sparse Matrices (ITKEigen3) + +### Next steps +1. Concatenate dose influence matrices on Plan level +2. Enable full IMRT within PlanOptimizers using dose influence matrix structure (maybe also implement a mock optimizer just applying uniform fluences) + +# Illustrations + + +### Prototype for beam-wise conformal planning: +Prostate plan with SlicerRT + +### New widget elements / infrastructure for inverse planning: +Widget Extension + +### Dose Influence storage accessible from Python for Beam Nodes: +Dose Influence Matrix accessibility + + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/FrameworkAndStepsOfGuidelinesForSlicerInternationalizationProjects/README.md b/PW40_2024_GranCanaria/Projects/FrameworkAndStepsOfGuidelinesForSlicerInternationalizationProjects/README.md new file mode 100644 index 000000000..b2dcfb15f --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/FrameworkAndStepsOfGuidelinesForSlicerInternationalizationProjects/README.md @@ -0,0 +1,75 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Framework and steps of guidelines for Slicer internationalization projects +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Eszter Asztalos-Zsembery + affiliation: University of Szeged + country: Szeged, Hungary + +- name: Attila Tanács + affiliation: University of Szeged + country: Szeged, Hungary + +- name: Attila Nagy + affiliation: University of Szeged + country: Szeged, Hungary + +- name: Imre J. Barabás + affiliation: Semmelweiss University + country: Budapest, Hungary + +- name: Frida Hauler + affiliation: Great Britain) + country: Steve Pieper (Isomics Inc, Cambridge, MA, USA + +--- + +# Project Description + + + +We aim to discuss the basics of Slicer translations, independent of the language barriers and translational tools used. +We are open for discussions, ideas and recommendations! + +## Objective + + + +Discuss and collect ideas about the topic with the members of the Slicer community. + +## Approach and Plan + + + +1. Create and possibly conduct a short survey on what directions we could take, what would be important to grasp. + +## Progress and Next Steps + + + +1. Created a form, and had it filled out by a good couple of people! + +# Illustrations + + +![kép](https://github.com/NA-MIC/ProjectWeek/assets/242559/7c4611cf-0a3e-4d40-8efd-67533899fd76) + +This is the [link to the survey](https://forms.office.com/pages/responsepage.aspx?id=MQputG9_E068Y92AXThwLNqIaLm_1KxNh48F9SJl0cVUQURLMERSVEdZMTNYWVpYMlYzUERUTUk2Ni4u) + +![kép](https://github.com/NA-MIC/ProjectWeek/assets/242559/9c3178b3-1029-4eb6-bb8f-05974bc02a6a) + + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/IgteasyABeginnersQuestInto3DSlicerAndIgtWithQuickstartsFixesAndDemos/README.md b/PW40_2024_GranCanaria/Projects/IgteasyABeginnersQuestInto3DSlicerAndIgtWithQuickstartsFixesAndDemos/README.md new file mode 100644 index 000000000..51e4b4f09 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/IgteasyABeginnersQuestInto3DSlicerAndIgtWithQuickstartsFixesAndDemos/README.md @@ -0,0 +1,100 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: IGTEasy A Beginners Quest into 3D Slicer and IGT with Quickstarts, Fixes, and Demos +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Constantin Constantinescu + affiliation: University of Las Palmas de Gran Canaria + country: Spain + +- name: Jose Carlos Mateo Pérez + affiliation: University of Las Palmas de Gran Canaria + country: Spain + +- name: Pablo Sergio Castellano Rodríguez + affiliation: University of Las Palmas de Gran Canaria + country: Spain + +- name: Nayra Pumar Carreras + affiliation: Ebatinca S.L. + country: Spain + +- name: Juan Ruiz Alzola + affiliation: University of Las Palmas de Gran Canaria + country: Spain + +- name: David García Mato + affiliation: Apolo AI + country: Spain + +- name: Fatimetou Mohamed-Saleck + affiliation: University of Nouakchott + country: Mauritania + +- name: Marie Ndiaye + affiliation: University Assane Seck of Ziguinchor + country: Senegal +--- + +# Project Description + + + +Addressing the steep learning curve for beginners in 3D Slicer and Image-Guided Therapy (IGT) is crucial for fostering wider adoption and encouraging newcomers to explore the capabilities of these powerful tools. To mitigate the initial challenges, we propose a systemtic approach that includes the development of quickstart guides, a troubleshooting library, and the creation of demo projects. + +## Objective + + + +1. Facilitating the initiation of projects for students and newcomers to Slicer IGT. +2. Synthesizing information from various tutorials. +3. Introducing learning projects. +4. Proposing solutions for the most common installation and configuration issues. + +## Approach and Plan + + + +1. Connect all tracking devices, including the installation and configuration of the appropriate drivers and software, and document each step thoroughly. +2. Perform calibration steps for both the electromagnetic and optical tracking devices, and document the process. +3. Attempt to replicate a working project on a different computer, documenting the steps taken. +4. Review existing source documents and consolidate them into a single document, incorporating our documented experiences. +5. Develop demo projects using the established infrastructure. +6. Compile all issues encountered during these processes into a troubleshooting library. + +## Progress and Next Steps + + +Progress: +1. Setup a wiki page accessible online on https://slicer.scanstart.ro. +2. Defined a workflow for starting with Slicer IGT +3. Developed tutorials for Installing Optitrack(Motive), Trakstar(Cubes), Telemed(Ultrasound), Plus Server and 3D Slicer IGT +5. Developed tutorials for the calibration of tools using the optical tracker, in English and French +6. Explanations about Plus server, Scene recording, Transform Hierarchy +7. Tested our doumented method with Marie, a newbee in 3D Slicer + +Next steps: +1. Perform the calibration with Trakstart Electromagnetic Tracker and create the coresponding tutorial +2. Translate everything in Spanish +3. Adding a tutorial for Metrics +4. Adding a tutorial for developing a user interface + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/ImageMultimodalDatabaseWithAiAssistedAnnotation/README.md b/PW40_2024_GranCanaria/Projects/ImageMultimodalDatabaseWithAiAssistedAnnotation/README.md new file mode 100644 index 000000000..0cb0497e1 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ImageMultimodalDatabaseWithAiAssistedAnnotation/README.md @@ -0,0 +1,141 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Linkage of Multimodal Medical Databases using FHIR +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Maria Monzon + affiliation: ETHz + country: Switzerland + +- name: Catherine Jutzeler + affiliation: ETHz + country: Switzerland + +- name: Umang Pandey + affiliation: Universidad Carlos III de Madrid + country: Spain + +- name: Philipp Schader + affiliation: DKFZ + country: Germany + +- name: Odile Elias + affiliation: DKFZ + country: Germany + +- name: Marco Nolden + affiliation: DKFZ + country: Germany +--- + +# Project Description + + + +Despite various existing solutions for DICOM Medical Database, (e.g. Kaapana, XNAT server, ...) these systems lack capablities to accompany the imaging data with medical records. +However, (to the best of our limited knowledge) there exist very few open sources solutions that can store multimodal data (DICOM, Electronic Health Records, REDCAP questionnaires & FHIR) for clinical research. + +FHIR represents a globally recognized standard for the interoperable exchange and integration of medical information. Adopting FHIR in medical image analysis enables more in-depth analysis by combining clinical information with imaging data. Moreover, it facilitates the representation of analysis results in a standardized format. + +Initial steps towards adopting FHIR within medical image analysis platforms have been taken during the [38th Project week](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/KaapanaClinicalData/). + +However, aspects like the general mapping of tabular data into FHIR resources remains challenging which should be addressed in this project as well as the joint visualization of results from computational image analysis together with clinical data from the patients. + +The project would comprise set up a PACS as well as a FHIR Server for storing of multimodal patient data for further AI analysis. This data storage will be designed to assist AI enabled interactive annotation. Additionally it would include visualizing capabilities (e.g., descriptive statistics). Ideally the database will also support data retieval API for data visualization dashboard. In the end the prototype should be integrated in Kaapana. + + + +## Objective + + + +This project aims to develop an open-source and ideally community-maintained solution for: + +1. Data Standardization: All collected data will be converted and standarized into research format (e.g. FHIR format) +2. Having a multimodal research database or integration of medical records. +3. Visualization tools using the system enable exploration of the multimodal dataset +4. Ideally: The infrastructure would enable connection to annotation tools such as available Kaapana / MONAI Label. + + +## Approach and Plan + + + +1. Select and download a suitable multimodal Dataset ([TCIA NSCLC](https://wiki.cancerimagingarchive.net/display/Public/NSCLC-Radiomics)) +2. Creating a local development setup + * Setup a local ORTHANC PACS server + * Setup a local HAPI FHIR server +3. Apply a quantitative image analysis method (radiomics) to the dataset +4. Mapping of clinical & imaging data into the FHIR standard + * Retrieving of DICOM data via DICOM web from the PACS and map it into FHIR ImagingStudy resources + * Creation of a tool to be able to map clinical data from a CSV file into fitting FHIR resources + * Mapping radiomics results into FHIR objects + * Connecting all of the three sources into a FHIR server +5. Visualize the information joined together to get an overview over the cohort in the dataset now enriched with quantitative image analysis results +6. Integrate FHIR server and workflows into Kaapana + +## Progress and Next Steps + + + +1. Creation of a [Github repository](https://github.com/pschader/NAMIC-PW24-Multimodel-Medical-Database) for joint development +2. Choosing the ([TCIA NSCLC](https://wiki.cancerimagingarchive.net/display/Public/NSCLC-Radiomics)) dataset for prototyping +3. Setup and configuration of the local development environment via Docker compose +4. Investiagtion how to map tabular data into FHIR resources + * [Fair4Health Data curation tool](https://github.com/fair4health/data-curation-tool): Works good for producing large scale mappings of tabular data into FHIR resources, but strongly dependant on terminology server. For our simple use case of providing researches with a fast tool to map multimodal data into FHIR resources it appears to be too complex. + * Creation of a simple webapp for loading CSV files and mapping the data into Patient & Observation FHIR resources. +5. Mapping of the DICOM resources into FHIR objects + * Creating a python script to fetch DICOM object from DICOM web API of the local ORTHANC server + * Map the DICOM metadata of the studies to FHIR ImagingStudy resources using [FHIR resources python library](https://pypi.org/project/fhir.resources/) +6. Create radiomics results from the segmentations of the dataset using Kaapana and the radiomics workflow and map those individual values into FHIR Observation resources + +Next steps: +1. Upload the FHIR data onto a FHIR server +2. Visualization of the dataset +3. Integration into Kaapana + +# Illustrations + +## Approach + +![ProjectWeekOverview](https://github.com/NA-MIC/ProjectWeek/assets/19309110/5bbcf1ee-c791-4f54-afc5-99c8e382c993) + + +## Mapping Tool + + +![FinalMMDGif](https://github.com/NA-MIC/ProjectWeek/assets/49638920/5daf092e-f3bd-4972-9320-373deee70c67) + + +## Overview +![DiagramMMDFHIR](https://github.com/NA-MIC/ProjectWeek/assets/49638920/d8becc40-fd2a-4e15-a9d2-5035c3480ddb) + + + + + + +# Background and References + + + +* We can use the MIMIC-IV Dataset with genertaed in this publication: + +* Orthanc database mongoDB plugin: +* [FHIR Server integration in Kaapana from PW38](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/KaapanaClinicalData/) +* [Kaapana](https://www.kaapana.ai/) +* [FHIR](https://www.hl7.org/fhir/) +* [FHIR resources python library](https://pypi.org/project/fhir.resources/) +* [Fair4Health Data curation tool](https://github.com/fair4health/data-curation-tool) +* Datasets: [TCIA NSCLC](https://wiki.cancerimagingarchive.net/display/Public/NSCLC-Radiomics) +* Project repository: [Github repository](https://github.com/pschader/NAMIC-PW24-Multimodel-Medical-Database) diff --git a/PW40_2024_GranCanaria/Projects/ImagineHive/README.md b/PW40_2024_GranCanaria/Projects/ImagineHive/README.md new file mode 100644 index 000000000..5e9d2d649 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ImagineHive/README.md @@ -0,0 +1,104 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: ImagineHive +category: Cloud / Web +presenter_location: In-person + +key_investigators: +- name: Davide Punzo + affiliation: Freelancer, DNA HIVE + country: France + +- name: Mauro Domínguez + affiliation: Freelancer, DNA HIVE + country: Argentina + +- name: Andras Lasso + affiliation: Perk Lab, Queen's University + country: Canada + +- name: Vahan Simonyan + affiliation: DNA HIVE + country: USA + +- name: Aram Petrosyan + affiliation: DNA HIVE + country: USA + +- name: Jeffrey W Milsom + affiliation: Weill Cornell Medicine + country: USA + +- name: Bradley B Pua + affiliation: Weill Cornell Medicine + country: USA + +- name: Art Sedrakyan + affiliation: Weill Cornell Medicine + country: USA + + +--- + +# Project Description + +![image](https://github.com/NA-MIC/ProjectWeek/assets/7985338/dc3f2fbf-9057-4caf-9fb9-66d42e6394bd) + +[ImagineHive](https://www.dnahive.com/) is a data management and analytics platform for hospital environments consisting of: + +1. Web frontend to navigate, filter and edit patients data. +1. HIVE server backend for storing and sharing data and run data analytics and processing workloads. HIVE is a massively parallel distributed computing environment where the distributed storage library is linked with computational. +1. SlicerHIVE, a Slicer-based app that shows up in the web browser and offers 3D image visualization and analysis: + * Browse and retrieve data from hospital PACS. + * Viewer and markup tools for enhanced teamwork. + * Automated segmentation tools. + +ImagineHive is currently used at the Weill Cornell Medicine/Presbyterian NY hospital (**CI3: center for intelligent image guided interventions at New York Presbyterian Hospital, New York, NY, USA**) for clinical pre-operation review and planning. + +## Objective + +1. Present ImagineHive. Get feedback. +1. Learn about user needs. +1. Foster collaborations with potential users and similar development efforts. + +## Approach and Plan + +1. Have a meetings/demos with people interested. + +## Progress and Next Steps + +1. Update of Slicer's DICOM browser to be more friendly for patient-oriented clinical workflows (visual appearance, better responsiveness - see [details](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/SlicerVisualDICOMBrowser/). +2. We had a meeting on Tuesday where we discussed the integration of customized 3DSlicer into web applications. +3. We presented a demonstration of ImageHIVE on Thursday and collected feedback. + +# Illustrations + +### HIVE front-end: + +| Patient selection | Imaging | +| --- | --- | +| | | + +### SlicerHIVE: + +| Workspace selector | Visual DICOM browser | +| --- | --- | +| | | + +| Segmentation tools | Viewer | +| --- | --- | +| | | + +### HIVE back-end: + +

+ +

+

source: Vahan et al. 2016

+ +# Background and References +- [HIVE paper](https://doi.org/10.1093/database/baw022) +- [HIVE FDA](https://github.com/FDA/fda-hive) diff --git a/PW40_2024_GranCanaria/Projects/ImplementOpenusdOutputIntoTheOpenanatomyExportExtension/README.md b/PW40_2024_GranCanaria/Projects/ImplementOpenusdOutputIntoTheOpenanatomyExportExtension/README.md new file mode 100644 index 000000000..afa37441e --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ImplementOpenusdOutputIntoTheOpenanatomyExportExtension/README.md @@ -0,0 +1,112 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Implement OpenUSD output into the OpenAnatomy Export extension +category: Cloud / Web +presenter_location: In-person + +key_investigators: + +- name: Rudolf Bumm + affiliation: KSGR + country: Switzerland +- name: Andras Lasso + affiliation: Queen's University + country: Canada +- name: Andres Diaz-Pinto + affiliation: NVIDIA & King's College London + country: United Kingdom +- name: Steve Pieper + affiliation: Isomics Inc + country: USA + +--- + +# Project Description + + + +This project aims to implement the export of OpenUSD files in the OpenAnatomyExport 3DSlicer extension. +OpenUSD files can be imported into NVIDIA Omniverse. + +OpenUSD, or Universal Scene Description, is an advanced framework for representing and handling 3D scenes and animations. Developed initially by Pixar, OpenUSD addresses the complexities involved in creating computer graphics for films, games, industrial engineering, and scientific experimentation, which often require managing large amounts of 3D data. + +NVIDIA Omniverse is a platform designed for real-time collaboration and physically accurate simulation in 3D workflows. Essentially, it's a tool for creating and operating virtual worlds, offering a shared space for creators, designers, and engineers. Here are some key aspects of NVIDIA Omniverse: + +**Real-Time Collaboration**: One of the main features of Omniverse is its ability to enable multiple users to collaborate in real-time on the same project. This is especially useful in fields like game development, architectural visualization, industrial design, and more. + +**Physically Accurate Simulation**: The platform provides tools for accurate physical simulation of materials, lighting, and environments. This allows for incredibly realistic rendering and animation, useful in fields that require high-fidelity visualizations. + +**Compatibility and Interoperability**: Omniverse is designed to be compatible with a wide range of software tools commonly used in 3D design and development. It supports a variety of file formats and has integrations with popular design software like Autodesk Maya, Adobe Photoshop, and others. + +**AI Integration:** NVIDIA has integrated various AI capabilities into Omniverse, which can assist in tasks like object recognition, scene understanding, and even automated 3D asset generation. + +**Ray Tracing and Advanced Rendering**: Powered by NVIDIA's RTX technology, Omniverse offers advanced ray tracing capabilities, resulting in highly realistic lighting and reflections. + +**Use Cases and Applications**: The platform is aimed at a range of industries, including animation and film, architecture, engineering, game development, and more. It can be used for creating virtual prototypes, digital twins, animated content, and interactive experiences. + +**Extension and Customization**: Developers can extend the capabilities of Omniverse through custom plugins and extensions, allowing for tailored solutions for specific industry needs. + +**Cloud and Edge Computing**: Omniverse can leverage cloud and edge computing, enabling large-scale simulations and collaboration across different geographical locations. + +## Objective + + + +Implement OpenUSD export in 3D Slicer + +## Approach and Plan + + + +We want to discuss, improve and then merge the pull request for OpenAnatomy. + +## Progress and Next Steps + + + +Rudolf implemented the necessary code, changed the UI and created a Pull request + +We ran into several problems realizing this, finding the right apps in OMNIVERSE and in the end we work with OMNIVERSE Create app. +This loads a USDC file which can then be modified in Omniverse. +There only needs to be one strong server driving OMNIVERSE (>RTX 3090 with 24 GB VRAM - thank you AWS for providing the server instance), underpowered clients can connect to the server via IP adress or even from the phone (install OMNIVERSE Sreaming app) +We are in the process of testing that all out, expect updates to this page in the next weeks. + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/864ebdee-da23-43cb-b9fb-5fc9b41f5ae2) + +Taking apart a 3D Slicer lung andtumor segmentation and visualize it in Omniverse: + + + + + + +# Illustrations + + + +OpenAnatomy extension with new feature + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/61b24da4-0513-4e29-a499-a75c76542f5a) + +NVIDIA Omniverse + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18140094/e86951da-fdca-486e-b204-468d2a51c223) + +# Background and References + +- [Universal Scene Description documentation](https://openusd.org/release/index.html) +- [Omniverse Platform](https://www.nvidia.com/en-us/omniverse/) +- [Pull request](https://github.com/PerkLab/SlicerOpenAnatomy/pull/19) +- Live sync: + - Requires [nucleus server](https://docs.omniverse.nvidia.com/nucleus/latest/workstation/installation.html) + - Would require development of a [connector for Slicer](https://docs.omniverse.nvidia.com/connect/latest/developing-connectors.html) See [list of applications](https://docs.omniverse.nvidia.com/connect/latest/overview.html) that support live sync. [Paraview already supports basic (export only) Omniverse live sync](https://docs.omniverse.nvidia.com/connect/latest/paraview.html). + - [Tutorials](https://docs.omniverse.nvidia.com/dev-guide/latest/tutorials.html) + - [Live sessions](https://docs.omniverse.nvidia.com/extensions/latest/ext_core/ext_live/sessions.html) diff --git a/PW40_2024_GranCanaria/Projects/ImprovingExperienceWithVolumetricSegmentationsInHighdicom/README.md b/PW40_2024_GranCanaria/Projects/ImprovingExperienceWithVolumetricSegmentationsInHighdicom/README.md new file mode 100644 index 000000000..c9f0bb377 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ImprovingExperienceWithVolumetricSegmentationsInHighdicom/README.md @@ -0,0 +1,124 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Improving Experience with Volumetric Segmentations in Highdicom +category: DICOM +presenter_location: In-person + +key_investigators: + +- name: Chris Bridge + affiliation: MGH + country: Boston, USA + +- name: David Clunie + affiliation: PixelMed Publishing + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: Boston, USA + +--- + +# Project Description + + + +### Background + +The DICOM Segmentation format is used to store image segmentations in DICOM format. Using DICOM Segmentations, which use the DICOM information model and can be communicated over DICOM interfaces, has many advantages when it comes to deploying automated segmentation algorithms in practice. DICOM Segmentations are especially flexible in many respects including the arrangement of the multiple frames that are present in the image. However, https://github.com/NA-MIC/ProjectWeek/issues/643 flexibility is sometimes criticized for making the processes of creating and parsing overly complex for "simple" cases, which are also typically the most commonly encountered. + +In particular, the case of a "3D volume" is very commonly encountered within segmentations. By "3D volume" I refer specifically to a segmentation in which frames are parallel and regularly spaced along a vector normal to each frame, possibly with multiple segments and, subject to discussion, with empty frames omitted from the volume. + +This topic was discussed as part of a broader discussion on possible improvements to the Segmentation IOD for the last project week on https://github.com/NA-MIC/ProjectWeek/issues/643#issuecomment-1582677841 issue and in particular this comment is relevant. + +This issue has been raised as one of particular interest by the Imaging Data Commons team. @dclunie @fedorov @pieper +Proposal + +The proposed project is to investigate to what extent these issues can be simplified for users on two fronts: + + By better tooling for working with segmentations, by adding special cases to the [highdicom](https://github.com/ImagingDataCommons/highdicom) python library to deal with 3D volumes. + By determining, in consultation with other DICOM experts at project week, whether additions or clarifications within the standard may be warranted. + +### Details + +Here is my initial attempt to lay out some of the issues to consider (some is adapted from the thread mentioned above): + + A key goal is that a receiver of a DICOM segmentation object should be able to determine whether it is a volume without having to parse the per-frame metadata and perform calculations based on them. Additionally, in my opinion, it would be preferable to be able to determine the spacing between slices in all volume cases without needing to perform additional calculations. + There is already a mechanism by which the creator can convey that planes are equally spaced in 3D space by setting the [DimensionOrganizationType](https://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.17.html#table_C.7.6.17-1) to '3D'. This helps a bit, but does not require that SpacingBetweenSlices attribute be present in the SharedFunctionalGroupsSequence, so the receiver in the general case still needs to calculate the spacing for themselves. Neither does it actually require the ImageOrientationPatient to be present in the SharedFunctionalGroupsSequence. Furthermore, it does not specify which order the frames are stacked in (there are two options, top to bottom or bottom to top), nor does it specify whether multiple segments are allowed and if so what the dimension organization should be (i.e. does frame position or segment change most quickly as the frame number increases?). And lastly it is entirely optional to have the DimensionOrganizationType at all. So really the '3D' DimensionOrganization still leaves too much unnecessary flexibility in my opinion and is largely "toothless" without clarification or associated requirements. + The above is actually not specific to segmentations, but are general to all IODs that use multiple frames. The issues have been noted mostly with reference to segmentations perhaps because segmentations are one of the more widely used multiframe IODs. I mention this because any changes/clarifications to the standard would have wide-reaching effects. + +As a minimum (assuming no changes to the standard), I would propose that for project week I would make the following improvements to the highdicom library: + + In the segmentation constructor, add logic to determine whether the input segmentation could be recorded as "3D", and if so, automatically store it as such with the maximum amount of useful information available to the receiver (i.e. include the SpacingBetweenSlices attribute). + Add a mechanism for a user to pass a 3D numpy array with an affine matrix to the constructor and have it stored as a "3D" segmentation. + Add a mechanism to determine whether a received segmentation is "3D", either using the DimensionOrganizationType, or if it is not present, by performing the required calculations on the metadata. If it is a volume, provide the user with a mechanism to access the affine matrix of the array, and retrieve a 3D numpy array of the segmentation with frames correctly sorted to match the affine matrix. + Hopefully work with the slicer team to prototype integrating this into slicer. + +## Objective + + + +1. Implementation in highdicom of checks to determine when a segmentation is a "volume" and populate optional metadata accordingly +2. Implementation in highdicom of mechanism to retrieve a spatially-sorted volumetric segmentation, along with its affine matrix, in a consistent way regardless of the way that the segmentation file is laid out. +3. Discuss and determine with DICOM experts and other interested parties whether changes to the DICOM standard may have a part to play in addressing some of the issues outlined above. + +## Approach and Plan + + + +1. Implement a PR to highdicom library to address the points above +2. Hold a meeting with relevant investigators (and anyone else interested) to decide on next steps (if any) regarding the standard + +## Progress and Next Steps + + + +### Changes To `highdicom` Library + +See the associated [pull request](https://github.com/ImagingDataCommons/highdicom/pull/277) (currently a work in progress as a draft PR). + +### Changes to the DICOM Standard + +After discussing with @dclunie and @pieper, we have agreed that a correction proposal shall be drafted by @dclunie as a next step. Here are my notes on what this could include: + +- Define DimensionOrganizationType value of “OTHER”. This will allow a receiver to know for sure that the image is *not* 3D (as opposed to simply not having the DimensionOrganizationType). Should this be “IRREGULAR” or something else? +- Precisely define what is meant by DimensionOrganizationType of “3D” in the case of the patient coordinate system: + - All planes have the same ImageOrientationPatient. The ImageOrientationPatient shall be factored out into the SharedFunctionalGroupsSequence (and not appear in the PerFramesFunctionalGroupsSequence). + - Planes shall be regularly spaced. The SpacingBetweenSlices must be found in the PixelMeasuresSequences within the SharedFunctionalGroups. All other pixel measures must also be shared between all frames. + - ImageOrientationPatient values shall follow the following rules (using numpy-like indexing): + + ImagePositionPatient[n+1] = ImagePositionPatient[n] + SpacingBetweenSlices * NormalVector + + Where NormalVector is a unit vector found as the vector cross product of the two direction cosines: + + NormalVector = ImageOrientationPatient[:3] x ImageOrientationPatient[3:] + + Note that this does imply that only one of the two possible ordering of planes is valid. + + - ImagePositionPatient must be used as the only dimension index. + +Notes: +- The above DOES NOT allow for the creation of BINARY Segmentations with more than one segment, since the Referenced Segment Number would need to be included as a further dimension index and there would need to be further sets of frames for each segment, which would break the strict spatial ordering. We currently feel that this is okay given that we hope that LABELMAP will become the dominant segmentation, and allowing no further dimensions considerably simplifies things. +- If we are precisely defining "3D" for the patient coordinate system, we should probably give "3D_TEMPORAL" the same treatment. Presumably this would be as above, except that there would some standardized time dimension index too, and a specified ordering of frames along the two dimensions (3d position + time). + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/ImprovingProjectPageInfrastructure/README.md b/PW40_2024_GranCanaria/Projects/ImprovingProjectPageInfrastructure/README.md new file mode 100644 index 000000000..2b388ae96 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ImprovingProjectPageInfrastructure/README.md @@ -0,0 +1,61 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Improving Project Page infrastructure +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA + +--- + +# Project Description + + + +The Project Week team will continue to make improvements to the project page generation process + +## Objective + + + +1. Decrease complexity of project page creation +2. Increase speed of site deployment + +## Approach and Plan + + +1. Test out current page infrastructure +2. Identify any issues and pain points + +## Progress and Next Steps + + + +1. Identified a significant bug in the page generation for complicated pages: [issue](https://github.com/NA-MIC/ProjectWeek/issues/960). + 1. Content sections with multiple types of content (headings, lists, html) were being discarded by the parser +3. Reworked the issue parsing to be more flexible wrt the content of each section + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/IntegratingImfusionAlgorithmsInto3DSlicer/README.md b/PW40_2024_GranCanaria/Projects/IntegratingImfusionAlgorithmsInto3DSlicer/README.md new file mode 100644 index 000000000..c30fc3189 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/IntegratingImfusionAlgorithmsInto3DSlicer/README.md @@ -0,0 +1,87 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Integrating ImFusion Algorithms into 3D Slicer +category: Other +presenter_location: In-person + +key_investigators: + +- name: Federico Gnesotto + affiliation: ImFusion GmbH + country: Germany + +- name: Martin Matilla + affiliation: ImFusion GmbH + country: Germany + +--- + +# Project Description + + + +The ImFusionSDK (a collection of libraries) contains various algorithms that are applied on medical data such as MR, CT and Ultrasound. The algorithms range from calibration to image-registration, segmentation etc. + +Our proposal is to create an extension in 3D-Slicer that exposes ImFusion algorithms to the data loaded in the 3D Slicer software. As a starting point, we will employ the existing [ImFusion extension for 3D-Slicer](https://github.com/ImFusionGmbH/public-demos/tree/release/SlicerExtension) which performs CPU and GPU-accelerated registration between single and multi modal images. + +## Objective + + + +The project plan can be broken down into the following concrete objectives: + +1. Plugin Infrastructure: Creation of Module/Plugin infrastructure for 3D Slicer (`qSlicerLoadableModule`) + +2. Data Interface: Handling and conversion of DataSets (`vtkImageData` <-> `ImFusion::Data`) + +3. Algorithm Interface: accessing ImFusion’s list of compatible algorithms + +4. Algorithm Controller Integration: Integration of GUI and Logic for configuring algorithms (`ImFusion::AlgorithmController`) into 3D Slicer (`qSlicerAbstractModuleWidget`, `vtkSlicerModuleLogic`) + +## Approach and Plan + + + +### Plugin Infrastructure +- Integrate the ImFusionSDK libraries with the Slicer-Extension via CMake. + +### Data Interface +- Convert from 3D Slicer to ImFusionSDK data representation +- Convert from ImFusionSDK to 3D Slicer representation + +### Algorithm Interface +- Access the list of algorithms in the ImFusionSDK +- Filter by algorithms that are compatible with the loaded/selected data +- Launch algorithms from 3D Slicer + +### Algorithm Controller Integration +- Create an example algorithm controller in 3D slicer +- Auto-generation in Slicer of ImFusion’s default algorithm-controllers + +## Progress and Next Steps + + + +- After hitting various walls, we decided to implement a client (3D Slicer) - server (ImFusion) solution via OpenIGTLink +- Workflow should be: + - Start ImFusion erver from 3D-Slicer on startup + - Send data to ImFusion server -> get algorithms compatible with data + - Select algorithm via GUI + - (Configure algorithm via GUI) + - Run Algorithm -> Data/Nodes are updated in 3D Slicer +# Illustrations + + +![SlicerApp-real_uQhrmYiNWQ](https://github.com/NA-MIC/ProjectWeek/assets/79929002/3a24b080-6c2a-4a4f-bb0a-3bea6fe07182) +![RegistrationModule powered by ImFusion Libraries in 3D Slicer](https://github.com/NA-MIC/ProjectWeek/assets/79929002/46c7efcb-990f-4e1e-b403-0a08c025e109) + +# Background and References + + + +ImFusion's RegistrationModule for Slicer diff --git a/PW40_2024_GranCanaria/Projects/IntraoperativeSurgicalNavigationArIntegration/README.md b/PW40_2024_GranCanaria/Projects/IntraoperativeSurgicalNavigationArIntegration/README.md new file mode 100644 index 000000000..8db58846c --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/IntraoperativeSurgicalNavigationArIntegration/README.md @@ -0,0 +1,68 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Intraoperative Surgical Navigation AR integration +category: Other +presenter_location: In-person + +key_investigators: + +- name: Domenico, Riggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Maria Francesca, Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +--- + +# Project Description + + + +Design of Intra-operative surgical navigation framework combined with live calibrated anatomical models and Augmented Reality. + +## Objective + + + +1. Xray-free Surgical Navigation tool +2. Real time calibrated model integration +3. Augmented Reality visualization for intra-operative surgery + +## Approach and Plan + + + +1. Build dynamic model from pre-operative imaging techniques +2. Build Models-Patients calibration tools +3. AR integration (unity3d based) + +## Progress and Next Steps + + + +1. Investigation of possible Surgical AR applications +2. Idea sharing +3. Networking +4. Knowledge transfer +5. Marker less tracking investigation + + + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/InvestigateMonaiGenerativeModelingForImagingDataCommons/README.md b/PW40_2024_GranCanaria/Projects/InvestigateMonaiGenerativeModelingForImagingDataCommons/README.md new file mode 100644 index 000000000..099859021 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/InvestigateMonaiGenerativeModelingForImagingDataCommons/README.md @@ -0,0 +1,86 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Investigate MONAI generative modeling for Imaging Data Commons +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Steve Pieper + affiliation: Isomics + country: Inc. USA + +- name: Mikael Brudfors + affiliation: NVIDIA + country: UK + +- name: Andres Diaz-Pinto + affiliation: NVIDIA + country: UK + +- name: Andrey Fedorov + affiliation: BWH + country: US + +- name: Birgitt Peeters + affiliation: BIDMC + country: US + +- name: Umang Pandey + affiliation: UC3M + country: Spain + +--- + +# Project Description + + + +Generative learning refers to a class of techniques that process large amounts of training data into models that can be used for a variety of tasks such as synthetic data generation, image compression, enhancing resolution, classifying images, and content based retrieval. Recently a generative package has been added to the open source MONAI software. + +This project will explore the application of MONAI generative tools to data on the NCI Imaging Data Commons. + +## Objective + + + +1. Study the existing material and collect information from other interested parties +2. Make plans about what experiments would be interesting +3. If possible do some small experiments to better understand what's possible and what effort and resources would be required to scale up + +## Approach and Plan + + + +1. Explore creating an `IDCDataset` compatible with [MONAI Datasets](https://docs.monai.io/en/latest/data.html) using [idc-index](https://github.com/ImagingDataCommons/idc-index) to fetch data +2. Investigate adapting [tutorial code](https://github.com/Project-MONAI/tutorials/tree/main/generative) to work with IDC data +3. Try running some small tests, such as running the [superresolution tutorials](https://github.com/Project-MONAI/GenerativeModels/blob/main/tutorials/generative/2d_super_resolution/2d_stable_diffusion_v2_super_resolution.ipynb) on IDC data +4. Document how IDC can be used with MONAI for research + +## Progress and Next Steps + + + +1. Discussed the project with people at project week for feedback +2. Contacted Mark Graham of KCL, a MONAI generative researcher/developer for advice +3. Implemented a first pass combination of IDC data with MONAI generative notebook +4. Ran tests on colab and workstations +5. Adapted example data (8-bit) to dicom (16-bit) data to accomodate dynamic range differences +6. Explored parallel and federated approaches + +# Illustrations + + + +*No response* + +# Background and References + + + +* Paper describing the generative features in MONAI: diff --git a/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/README.md b/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/README.md new file mode 100644 index 000000000..704613256 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/README.md @@ -0,0 +1,107 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Investigating the role of public imaging data in research +category: Infrastructure +presenter_location: Remote + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Tina Kapur + affiliation: BWH + country: USA + +- name: Marco Nolden + affiliation: DKFZ + country: USA + +- name: Ulrike Wagner + affiliation: Leidos + country: USA + +- name: Justin Kirby + affiliation: Leidos + country: USA + +- name: Leonard Nürnberg + affiliation: Maastricht University + country: Netherlands + +- name: David Clunie + affiliation: PixemMed Publishing + country: USA + +- name: Deepa Krishnaswamy + affiliation: BWH + country: USA + +- name: Vamsi Thiriveedhi + affiliation: BWH + country: USA + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + + +--- + +# Project Description + + + +Public image data repositories and collections/datasets have been developed for some time now, and they clearly play a role in imaging research, but there has been no systematic investigation of what are the main uses of those repositories and datasets. The goal of this project is to collect experience of the members of the NA-MIC Project Week community in utilizing public datasets in their everyday work. + +## Objective + + + +1. Document the primary uses of public data by the NA-MIC PW community and sources of the data. +2. Analyze the collected information to understand gaps and discuss prioritization of new features and data collection efforts based on that information. + +## Approach and Plan + + + +1. Design draft of a "survey" to collect the information. +2. Discuss the draft with a small group of interested parties and refine its content. +3. Conduct interviews with as many attendees as feasible to collect the information. +4. Discuss and analyze the information. + +## Progress and Next Steps + + + +1. Created initial document for the discussion of the survey, shared with Key Investigators: +2. Distributed survey to the attendees: [http://tinyurl.com/public-data-survey](http://tinyurl.com/public-data-survey) +3. Advertised on LinkedIn, reachhed out directly, printed out paper fliers! +4. Collected 31 responses so far (only 19 from PW40 attendees, out of 200+ registrants! PLEASE participate!) +5. Results will be released after the survey is closed to avoid influencing responses! + +# Illustrations + + + +![Survey ad](./survey_ad.jpg) + +![Participants](./participants.jpg) + +![Current participation overview](./participation.jpg) + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/participants.jpg b/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/participants.jpg new file mode 100644 index 000000000..3fbe3d459 Binary files /dev/null and b/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/participants.jpg differ diff --git a/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/participation.jpg b/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/participation.jpg new file mode 100644 index 000000000..e58af672c Binary files /dev/null and b/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/participation.jpg differ diff --git a/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/survey_ad.jpg b/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/survey_ad.jpg new file mode 100644 index 000000000..824c2a07c Binary files /dev/null and b/PW40_2024_GranCanaria/Projects/InvestigatingTheRoleOfPublicImagingDataInResearch/survey_ad.jpg differ diff --git a/PW40_2024_GranCanaria/Projects/Launch3DslicerViaClickableUrlsForViewingIdcDataViaSliceridcbrowserAndIdcIndex/README.md b/PW40_2024_GranCanaria/Projects/Launch3DslicerViaClickableUrlsForViewingIdcDataViaSliceridcbrowserAndIdcIndex/README.md new file mode 100644 index 000000000..e009bb0af --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/Launch3DslicerViaClickableUrlsForViewingIdcDataViaSliceridcbrowserAndIdcIndex/README.md @@ -0,0 +1,106 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Launch 3DSlicer via clickable URLs for Viewing IDC Data via SlicerIDCBrowser and IDC + Index +category: Cloud / Web +presenter_location: In-person + +key_investigators: + +- name: Vamsi Thiriveedhi + affiliation: BWH + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Andras Lasso + affiliation: Queens University, Canada + country: USA + +- name: Umang Pandey + affiliation: UCM3 + country: Spain +--- + +# Project Description + + + +IDC (Imaging Data Commons) has several TB of radiology data that can be viewed with various tools, such as OHIF v2, v3, and Kitware’s VolView. However, one could argue that none of these tools can match the power and versatility of 3DSlicer, which offers a wide range of features for exploring and analyzing radiology data. Steve has come up with a brilliant idea to make 3DSlicer more accessible and user-friendly: a simple URL that can be clicked to launch 3DSlicer and load the desired IDC data. This project aims to ease the way we view IDC Data on 3DSlicer by making it as easy as clicking a link by extending the capabilities of the SlicerIDCBrowser extension. + +Notes for discussion: +* we could also have a mode where user clicking on IDC manifest would trigger Slicer opening and downloading the content of the manifest - this would be similar to TCIA manifest downloader. Need to think how to report progress, since for large cohorts it will take time, and s5cmd batch run does not provide the progress reporting means. TODO: link the s5cmd progress reporting issue +* warn/communicate to the user download size on disk + +## Objective + + + +1. Enable the local 3DSlicer to launch from slicer://idc-browser/ URLs +2. Register the slicer://idc-browser/ protocol on Linux, Windows, and MacOS +3. Integrate the protocol registration for slicer://viewer/ into the 3DSlicer installation process for Linux and MacOS +4. Sequence of steps (under discussion) of how this should work when everything is done: + * user interacts with IDC Portal, which includes URLs at the study/series level for the types of data that can be handled by Slicer (exclude SM) + * when user clicks on a Slicer URL + * if user has Slicer installed, but no SlicerIDCBrowser extension - the only handler available is the default one - should it detect that extension is missing and inform user that it is needed? + * if user has Slicer and extension installed - open Slicer, select SlicerIDCBrowser, populate information in the GUI about what is being downloaded and automatically trigger the download and load into scene - need to discuss how to do error checking and alert user if certain series cannot be loaded. @vkt1414 thinks it may add delay and should just load the downloaded data into scene. + * if user does not have Slicer - probably nothing can be done, it won't work - 404 + +## Approach and Plan + + + +1. Incorporate class in SlicerIDCBrowser code base + * need to add support for progress reporting - Vamsi suggests to look at the number of files - or we can use total size of the downloaded files, since s5cmd creates multiple files during download +2. Handle registration of custom browser protocol automatically based on the underlying OS + +## Progress and Next Steps +1. [SlicerIDCBrowser](https://github.com/ImagingDataCommons/SlicerIDCBrowser) can now register the slicer://idc-browser/ protocol on all three platforms MacOS, Linux, and Windows +2. The downloading experience currently is dictated by the network speed +3. Need to explore/handle the behavior when multiple versions of slicer are present on the user's system + + @pieper thoughts on this: + > My thought would be to have a script for each platform (maybe shell for mac/linux and .bat for windows) that would be launched by the url handler. That script would launch Slicer with ` --no-main-window --python-script ` where select.py would implement the logic to find the currently running and installed Slicer's and put up a dialog box so the user can select the target. Then it would either launch a new instance of the version of the user's choice, or it would send a signal to one of the running instances to load more data. We'd need to discuss how best to send the signal. It could be a literal operating system signal or it could be use something like the WebServer module so that running instances listen for these load requests. We should think about what is the best and most useful way to impement this. + +4. @lassoan suggested + - To be able use a running slicer instance, to refer to this script to see https://github.com/lassoan/slicerio?tab=readme-ov-file#view-files-in-3d-slicer + - Use Visual DICOM Browser widget while loading a study + - Refer to his MONAI extension for reference to implementation of streaming logs to progress bar +5. @Punzo suggested + - refer to script repository in slicer for finding code + - filter by patient id by using onPatients() to show the specific patient into Visual DICOM browser +6. Umangs observations: + > If one has multiple slicer's installed along with the nighlty version. Link opens slicer for the highest stable version (All had the extension and module loaded). Would there be a way to choose at least between nightly and stable version? +7. The code is currently available at https://github.com/vkt1414/SlicerIDCBrowser and eventually may be available @ https://github.com/ImagingDataCommons/SlicerIDCBrowser +8. Once the behavior of launching urls is refined in SlicerIDCBrowser, the goal is make atleast the registration of the slicer:// protocol available on Slicer directly + +# Illustrations + +## Demo on Windows +![NewMerge-ezgif com-video-to-gif-converter](https://github.com/NA-MIC/ProjectWeek/assets/115020590/61e49e50-65a3-4f26-88c9-e0e7a3c7893d) + +## Demo on Linux (Ubuntu) +![Video_240129105420_Slice1-ezgif com-video-to-gif-converter](https://github.com/NA-MIC/ProjectWeek/assets/115020590/02c3957a-93d8-4f01-930f-c529e8de0758) + +## Demo on MacOS +![Untitled_Project_V3](https://github.com/NA-MIC/ProjectWeek/assets/115020590/46e36c57-6886-4e29-a37a-2accda22a68d) + + +# Background and References + + + +The discussion here made it easier to test a prototype and this project builds on the suggestions made here. + +[How to load nifti file from web browser link? - Development - 3D Slicer Community](https://discourse.slicer.org/t/how-to-load-nifti-file-from-web-browser-link/18664/5) +[SlicerSandbox/LoadRemoteFile/LoadRemoteFile.py at master · PerkLab/SlicerSandbox (github.com)](https://github.com/PerkLab/SlicerSandbox/blob/master/LoadRemoteFile/LoadRemoteFile.py) diff --git a/PW40_2024_GranCanaria/Projects/LinkingSegmentationAndImagingDataWithDifferentGeometriesUsingDcmqi/README.md b/PW40_2024_GranCanaria/Projects/LinkingSegmentationAndImagingDataWithDifferentGeometriesUsingDcmqi/README.md new file mode 100644 index 000000000..641d1a81a --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/LinkingSegmentationAndImagingDataWithDifferentGeometriesUsingDcmqi/README.md @@ -0,0 +1,83 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Linking segmentation and imaging data with different geometries using dcmqi +category: DICOM +presenter_location: In-person + +key_investigators: + +- name: Reuben Dorent + affiliation: BWH + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: Inc., USA + +- name: Tina Kapur + affiliation: BWH + country: USA + +- name: Colton Barr + affiliation: Queen's University + country: Canada + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Michael Onken + affiliation: OpenConnections + country: Germany + +- name: David Clunie + affiliation: PixelMed + country: USA + +--- + +# Project Description + + + +This project aims to clarify best practices and discuss possible fixes for linking segmentation and imaging data with different geometries using dcmqi. This question arose during curation of The Brain Resection Multimodal Imaging Database (ReMIND) for TCIA and subsequent challenges with viewing unlinked segmentations using the OHIF viewer. + +## Objective + + + +1. Objective A: Discuss possible fixes for this dcmqi linking issue when using imaging data and segmentations with differing geometries. +2. Objective B: Ensure the ReMIND data available in TCIA has the appropriate linking between segmentations and imaging, and can be viewed using OHIF. + +## Approach and Plan + + + +1. Get input from all stakeholders on best practices and potential fixes for this linking problem. +2. Establish a plan for next-steps. + +## Progress and Next Steps + + +1. Andrey summarized the implementation challenges in this issue - need to hear from @dclunie: [https://github.com/QIICR/dcmqi/issues/489](https://github.com/QIICR/dcmqi/issues/489) + +2. Perhaps consider something like recent RT Structure Set addition to standard that added [SourceSeriesInformationSequence](https://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.8.5.html#para_a625a323-0d2f-4922-b292-6d81fb912774) and [SourceSeriesSequence](https://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.8.6.html#para_ccc7aad7-b3f7-4fdc-b498-5590a1983bdd) ([CP 2296 Provide additional ROI parameters to avoid parsing strings](https://dicom.nema.org/medical/dicom/Final/cp2296_ft_ProvideAdditionalROIParametersToAvoidParsingStrings.pdf)) + +3. Steve and David discussed that a more general solution could be to enable OHIF to render segmentation on top of any data that shares the same FrameOfReferenceUID. This would not require any change in DICOM or in the data and would benefit other usecases. This topic will be researched during the next IDC viewers coordinatation meeting. + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/LongCovidAndTheBrainWhiteMatter/README.md b/PW40_2024_GranCanaria/Projects/LongCovidAndTheBrainWhiteMatter/README.md new file mode 100644 index 000000000..c44043c72 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/LongCovidAndTheBrainWhiteMatter/README.md @@ -0,0 +1,123 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 'Long-COVID and the Brain White Matter ' +category: Other +presenter_location: In-person + +key_investigators: + +- name: Zora Kikinis + affiliation: BWH + country: USA + +- name: Ron Kikinis + affiliation: BWH + country: USA + +- name: Poliana Hartung Toppa + affiliation: MGH + country: USA + +- name: Kayley Haggerty + affiliation: MGH + country: USA + +- name: George Papadimitriou + affiliation: MGH + country: USA + +- name: Anastasia Haidar + affiliation: BWH + country: USA + +- name: Twishi Puri + affiliation: BWH + country: USA + +- name: Bianca Besteher + affiliation: University Hospital Jena + country: Germany + +- name: Carina Heller + affiliation: University in Jena + country: Germany + +- name: Ofer Pasternak + affiliation: BWH + country: USA + +- name: Ed Yeterian + affiliation: MGH + country: USA + +- name: Stefano Pallanti + affiliation: Institute of Neuroscience + country: Italy + +- name: Yogesh Rathi + affiliation: BWH + country: USA + +- name: Marek Kubicki + affiliation: BWH + country: USA + +- name: Jarrett Rushmore + affiliation: MGH + country: USA + +- name: Nikos Makris + affiliation: MGH + country: USA + +--- + +# Project Description + + + +About 10% of COVID-19 survivors experience long-lasting symptoms known as long-COVID. The debilitating symptoms of PASC impact patient’s health-related quality of life, earnings, caretaking activities for loved ones, and healthcare costs. There is no proven cure for PASC. Amongst the variable sequelae of long-COVID, the neuropsychiatric subtype of long-COVID (neuro-long-COVID) is characterized by fatigue, cognitive impairments, and pro-inflammatory cytokines. Our research project aims to understand how changes in the brain, specifically the white matter, contribute to the symptoms of the neuropsychiatric subtype of long-COVID. + +We are particularly interested in the brain white matter fiber system that we term the dorsal vagal complex-corticolimbic fiber system (DVC-CLFS; Kikinis et al., in press). This fiber system interconnects cortical, paralimbic, limbic, and autonomic brain regions. It integrates them with bodily organs via the brainstem and the vagal nerve. The DVC-CLFS underlies brain-immune interactions and involves several centers in the brainstem. The principal fiber tract in the DVC-CFLS is the medial forebrain bundle (MFB). + +While we have previously reconstructed the MFB, our new approach allows us to rebuild the streamlines reaching the brainstem. Using the Harvard Oxford Atlas (HOA, Rushmore et al., 2020), we will use whole brain tractography from 10 patients with long-COVID to select the Medial Forebrain Bundle (MFB) at its entire extension. + +As part of the project, we will demonstrate the method reconstructing new fiber tracts using 3dSlicer. We will offer tutorial of 3DSlicer of segmentation, reconstruction of brain white matter streamlines to new users. + +## Objective + + + +1. To test and apply a semi-automated approach to delineate a fiber tracts that reach the brain stem. Tractography in the brain stem is challenging as most fibers run parallel and very close to each other. Additionally, the nuclei of the brainstem from which tracts originate and are difficult to visualize. +2. Give a tutorial of 3DSlicer segmentation and reconstruction of brain white matter streamlines, specially those connecting the thalamus, cerebellum, pons and frontal cortex. + +## Approach and Plan + + + +1. We will use diffusion and structural MRI, specifically whole brain tractography and T1 images, from 10 patients with long-COVID to select the Medial Forebrain Bundle (MFB) at its entire extension to the brainstem. + +## Progress and Next Steps + + + +1. We were able to reconstruct the MFB of eight cases. 1st picture. +2. We successfully taught how to use 3DSlicer segmentation and tractography settings. 2nd picture. + +# Illustrations + +Screenshot 2024-02-01 at 3 40 38 PM +![tracts](https://github.com/NA-MIC/ProjectWeek/assets/47013972/bfda77ef-f995-4480-9853-f34716534b38) + + + +*No response* + +# Background and References + + diff --git a/PW40_2024_GranCanaria/Projects/LvadImplantationMeasurements/README.md b/PW40_2024_GranCanaria/Projects/LvadImplantationMeasurements/README.md new file mode 100644 index 000000000..b1641cb48 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/LvadImplantationMeasurements/README.md @@ -0,0 +1,73 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: LVAD implantation measurements +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Imre J. Barabás + affiliation: Semmelweis University + country: Hungary + +- name: Attila Tanács + affiliation: University of Szeged + country: Hungary + +- name: Attila Nagy + affiliation: University of Szeged + country: Hungary + +- name: Frida Hauler + affiliation: No Institution + country: UK + +--- + +# Project Description + + + +Left Ventricular Assist Device (LVAD) implantation is a surgical procedure commonly employed in the field of cardiac surgery to support individuals with severe heart failure. This innovative technology involves the insertion of a mechanical pump, known as an LVAD, which assists the weakened left ventricle in pumping blood throughout the body. Knowing the LVAD position and angle after implantation is crucial for several reasons in the realm of cardiac surgery and patient care, such as: Optimal Device Function, +Minimizing Complications, +Improve the Quality of Life, +Long-Term Device Durability, +Postoperative Monitoring of Device Function, +Individualized Patient Care + +## Objective + + + +We aim to create an extension that allows us to conduct measurements using standard methods. + +## Approach and Plan + + + +Collaboratively addressing the issue. + +## Progress and Next Steps + + + +A recent advancement has been achieved with the development of a new extension. This extension is designed to utilize segmented anatomical structures of the heart as input data, leveraging this information to calculate the optimal position for the Left Ventricular Assist Device (LVAD). This innovative approach marks a significant step forward in the field, enhancing precision and efficiency in determining the most suitable placement for LVADs based on segmented anatomical data of the heart. + +# Illustrations + + + +![](https://drive.google.com/file/d/1YmX3jgm_9tjmsZGJIVcs328zHPlAg4Ut/view?usp=sharing) + +# Background and References + + + +* Barabás IJ, Hartyánszky I, Kocher A, Merkely B. A 3D printed exoskeleton facilitates HeartMate III inflow cannula position. Interact Cardiovasc Thorac Surg. 2019 Oct 1;29(4):644-646. doi: 10.1093/icvts/ivz146. PMID: 31230073. +* Barabás JI, Palkovics D, Bognár V, Sax B, Heltai K, Panajotu A, Merkely B, Hartyánszky I. A 3D technológia szerepe a műszívterápiában \[The role of 3D technology in the support of mechanical circulation therapy.]. Orv Hetil. 2023 Jul 2;164(26):1026-1033. Hungarian. doi: 10.1556/650.2023.32804. PMID: 37393547. +* Barabás JI, Merkely B, Hartyánszky I, Palkovics D. Computer-aided Design and Manufacturing of a Patented, Left Ventricle Assist Device Positioning Tool – 3D Navigated Surgical Treatment of End-Stage Heart Failure. Acta Polytechnica Hungarica. 2023 Jan 20(8):9-25. DOI: 10.12700/APH.20.8.2023.8.2 diff --git a/PW40_2024_GranCanaria/Projects/MHubTutorialsAndStructuredReportSupport/README.md b/PW40_2024_GranCanaria/Projects/MHubTutorialsAndStructuredReportSupport/README.md new file mode 100644 index 000000000..201528890 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/MHubTutorialsAndStructuredReportSupport/README.md @@ -0,0 +1,93 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: MHub Tutorials and Structured Report Support +category: DICOM +presenter_location: Onsite + +key_investigators: +- name: Leonard Nürnberg + affiliation: Maastricht University + +- name: Andrey Fedorov + affiliation: BWH, USA + +- name: David Clunie + affiliation: Pixelmed Publishing, USA +--- + +# Project Description + + + +## Objective + + + +1. Objective A. Implement a new MHub core module to export non-segmentation data as DICOM Structured Report. +1. Objective B. Offer support for interested developers explaining how to contribute models to MHub and how to use and configure MHub models for easy and reproducible research. + +## Approach and Plan + + + +Objective A +1. Examine the requirements and limitations of DICOM Structured Reports. +1. Examine existing libraries and frameworks that can help in the creation of DICOM Structured Reports. +1. Check to what extent the internal data representation of MHub complies with the requirements of DICOM Structured Reports and, if necessary, plan an extension of the internal data representation. + +Objective B +1. Organize a break-out session for all interested developers. We can provide tutorials and answer specific questions on request. It would be great to know who is interested in submitting a model to MHub or using MHub models and gather questions in advance to provide customized tutorials. + +Other items suggested by @fedorov: +* revisit MHub Colab notebook +* continue discussions on how the specific series from IDC can be linked with the models, and how this can be exposed in the website and via some API + +## Progress and Next Steps + + + +1. Developers of MHub models must wrap all non-file outputs (e.g. prediction results or classification labels) in mhub-io objects and describe their semantics. +1. MHub currently offers a JSONExporter module that can be used to query MHub's internal data representation and generate a customizable json (or csv). + +Results from PW40 +1. MHub SR Support + 1. We will start by adding support the TID1500 DICOM SR template. + 1. We will use parametrization of highdicom codes for simple annotation of model data. +1. We developed new MHub tutorials for + 1. [T1 - Run TotalSegmentator on IDC Collection](https://github.com/MHubAI/documentation/blob/main/tutorials/run_totalsegmentator_on_idc_collection/mhub_tutorial_001.md) + 1. [T2 - Run Custom MHub Lung Segmentation Workflow on Chest CT in Nifti Format](https://github.com/MHubAI/documentation/blob/main/tutorials/run_lungmask_on_chestct_in_nifti_format/mhub_tutorial_002.md) +1. We now have a full documentation for contributions to MHUb (use-case tutorial will follow soon ;)) + 1. [How to Contribute a Model to MHub](https://github.com/MHubAI/documentation/blob/main/documentation/mhub_contribution/contributing_a_model.md) + + +# Illustrations + + + +![MHub Submission Process](https://raw.githubusercontent.com/MHubAI/documentation/main/documentation/figures/submission_sequence_diagram.png) + +![Slicer MHub Visualization](https://raw.githubusercontent.com/MHubAI/documentation/main/tutorials/run_totalsegmentator_on_idc_collection/figures/slicer_inspect_data.png) + +# Background and References + + + +Documentation +- [MHub documentation](https://github.com/MHubAI/documentation) +- [MHub Handling Logical Output](https://github.com/MHubAI/documentation/blob/main/documentation/mhubio/how_to_write_an_mhubio_module.md#handling-logical-output-data) +- [MHub Report Exporter Documentation](https://github.com/MHubAI/mhubio/blob/main/mhubio/core/RunnerOutput.py) + +Code +- [MHub-IO Logical Output](https://github.com/MHubAI/mhubio/blob/main/mhubio/core/RunnerOutput.py) + +Other stuff +- [JSON Representation of DICOM Structured Reports (DICOM Supplement 219 Trial Use Draft)](https://www.dclunie.com/dicom-status/status.html#Supplement219) including [slides explaining the purpose](https://dicom.nema.org/medical/dicom/Supps/Frozen/sup219_fz_JSONSR_TrialUse_Slides_20200116.pptx) and [sample Java implementation of DICOM SR to/from JSON](https://www.dclunie.com/pixelmed/software/javadoc/com/pixelmed/dicom/JSONRepresentationOfStructuredReportObjectFactory.html) in [PixelMed toolkit](https://www.dclunie.com/pixelmed/software/index.html). +- dcmqi can be used to [create TID 1500 structured reports](https://qiicr.gitbook.io/dcmqi-guide/opening/cmd_tools/sr/tid1500writer) containing measurements derived from segmentations; this is the tool that was used in the recent paper/code for saving radiomics features: [https://github.com/ImagingDataCommons/nnU-Net-BPR-annotations](https://github.com/ImagingDataCommons/nnU-Net-BPR-annotations) diff --git a/PW40_2024_GranCanaria/Projects/MauritanianProject/README.md b/PW40_2024_GranCanaria/Projects/MauritanianProject/README.md new file mode 100644 index 000000000..7051e17d0 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/MauritanianProject/README.md @@ -0,0 +1,123 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: 'Mauritanian Project ' +category: Other +presenter_location: In-person + +key_investigators: + +- name: Ahmedou Moulaye IDRISS + affiliation: Faculté de médecine de Nouakchott + country: Université de Nouakchott, Mauritania + +- name: Sonia Pujol + affiliation: Brigham and Women’s Hospital and Harvard Medical School + country: USA + +- name: Fatimetou Mohamed-Saleck + affiliation: Faculté des Sciences et Techniques + country: Université de Nouakchott, Mauritania + +- name: Moustapha Mohamed Saleck + affiliation: Faculté des Sciences et Techniques + country: Université de Nouakchott, Mauritania + +- name: Mohamed Mahmoud Septy Mohamed bamba + affiliation: Faculté de médecine de Nouakchott + country: Université de Nouakchott, Mauritania + +- name: Mohamed Abdellahi Sidi Mohamed Blal + affiliation: Faculté des Sciences et Techniques + country: Université de Nouakchott, Mauritania + +- name: El Hacen Mohamed Soueilem + affiliation: Faculté des Sciences et Techniques + country: Université de Nouakchott, Mauritania + +- name: Mohamedou Ahmed Mahmoud + affiliation: Faculté des Sciences et Techniques + country: Université de Nouakchott, Mauritania + +- name: Mohamed Boullah + affiliation: Faculté des Sciences et Techniques + country: Université de Nouakchott, Mauritania + +- name: Fatimetou Hademine + affiliation: Faculté des Sciences et Techniques + country: Université de Nouakchott, Mauritania + +--- + +# Project Description + + + +The Mauritanian approach aims to make 3D Slicer accessible to a broader audience of users and researchers in the medical field. Various projects have been completed, initiated, or are currently under consideration, including: + +- The anatomy atlases created by professors Idriss and Yahya in 2018 enabled medical students to visualize and quickly assimilate human body parts through the use of 3D Slicer. +- Other applications for generating 3D models of baby and expectant mother mannequins. +- Implementation and integration of three breast cancer segmentation methods by one of our researchers: An adaptive fuzzy C-means algorithm, An adaptive k-means algorithm, and an adaptive Otsu thresholding. Integration is underway. +- Processing and segmentation of medical images with 3D Slicer and Deep Learning in the context of an integrated approach for analyzing various medical data from Mauritania. This is the subject of an ongoing doctoral thesis. +- Other upcoming applications are planned. + + +## Objective + + + +- Generate 3D models of baby and expectant mother mannequins. +- Contribute to enriching breast cancer segmentation methods by introducing and comparing user choices in 3D Slicer. +- Explore cutting-edge techniques in medical image processing and segmentation, especially those based on Deep Learning, implement them, and evaluate performance. +- Anticipate additional upcoming applications. + + +## Approach and Plan + + + +- 3D Model of Baby and Expectant Mother +- Introduction of 3D Segmentation Methods: +- Segmentation of Medical Images from Mauritania: +- Other upcoming applications: + + +## Progress and Next Steps + + + +- Progress in generating 3D models of the baby and expectant mother. +- Progress in integrating the 3D segmentation methods. +- Progress of the doctoral thesis: Segmentation of Medical Images from Mauritania. +- Other upcoming projects. + + +# Illustrations + + + +We have developed an extension for 3D Slicer to perform medical image (volume) segmentation using the K-Means algorithm. Specifically, we have implemented an adaptive version of K-Means, which allows segmentation based on pixel intensity. + +We encountered several challenges during volume processing and rendering, as well as in finding alternatives to libraries like scikit-learn, NumPy, and OpenCV to integrate them into the 3D Slicer API. + +Ultimately, we successfully segmented the images using both the adaptive K-Means and the classic K-Means methods. However, these results still require improvement and testing on various types of medical images to ensure their reliability + +**Adaptive Algorithm Result** + +[![Image 1](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/apaptive_rim1_1.png)](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/apaptive_rim1_1.png) [![Image 2](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/apaptive_rim1_2.png)](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/apaptive_rim1_2.png) [![Image 3](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/apaptive_rim1_3.png)](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/apaptive_rim1_3.png) + + +**Classic Algorithm Result** + +![Image 1](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/classic_al/classic_algorithm_1.png) ![Image 2](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/classic_al/classic_algorithm_2_99.png) ![Image 3](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/classic_al/classic_algorithm_3_80.png) ![Image 4](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/classic_al/classic_algorithm_4_50.png) ![Image 5](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/classic_al/classic_algorithm_5_20.png) ![Image 6](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/classic_al/classic_algorithm_6_10.png) ![Image 7](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/classic_al/classic_algorithm_7_5.png) ![Image 8](https://github.com/slicermauritanie/AdaptiveSegML/blob/main/result_images/classic_al/classic_algorithm_8_2.png) + + +# Background and References + + + +[Previous ProjectWeek Page](https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/AnatomicalAtlasesMauritania) diff --git a/PW40_2024_GranCanaria/Projects/MonaiBasedImageAugmentator/README.md b/PW40_2024_GranCanaria/Projects/MonaiBasedImageAugmentator/README.md new file mode 100644 index 000000000..811955560 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/MonaiBasedImageAugmentator/README.md @@ -0,0 +1,78 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: MONAI based image augmentator +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Ciro Benito, Raggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Paolo, Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Maria Francesca, Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +--- + +# Project Description + + + +MONAI and PyTorch based medical image augmentation tool that can be integrated in Slicer. +The project aims to be a low-code version of the tool: . + +It's designed to operate on a dataset of medical images and apply a series of specific transformations to each image. This process augments the original dataset, providing a greater variety of samples for training deep learning models. + +## Objective + + + +1. Creating an intuitive graphic interface for the module +2. Parallelising the augmentation process to optimally utilise resources + +## Approach and Plan + + + +1. Create the extension +2. Implement the augmentation process +3. Try to parallelise the process so that it takes as little time as possible on large data sets + +## Progress and Next Steps + + + +1. Implemented graphic interface for loading images and masks, choosing transformations and saving images. +2. Implemented and tested MONAI spatial transformations such as Rotation, RandRotation, Flip, Resize. +3. Partially implemented input validation and MONAI intensity transformations, it will be completed in the future. +4. Partially implemented "Preview" feature, which allows the output of transformations to be viewed directly in the scene before saving them in the OS, will be completed in the future. + +# Illustrations + + +![main](https://github.com/NA-MIC/ProjectWeek/assets/96300975/4f8e8daf-88e2-483b-9849-e19899fb9260) +![filled](https://github.com/NA-MIC/ProjectWeek/assets/96300975/cc595232-fb44-4ff3-84eb-4a5ef52ec10c) + +Files are saved as follows: +![output_folder](https://github.com/NA-MIC/ProjectWeek/assets/96300975/f69f0408-d680-4e60-8675-dfac3e0ac5ed) + +Example of image after transformation in the scene: +![output_scene](https://github.com/NA-MIC/ProjectWeek/assets/96300975/4a06470e-8a1a-4b6b-87ed-82913aecc528) + + + +# Background and References + + +GitHub Project: diff --git a/PW40_2024_GranCanaria/Projects/MrImageNormalization/README.md b/PW40_2024_GranCanaria/Projects/MrImageNormalization/README.md new file mode 100644 index 000000000..0e3472555 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/MrImageNormalization/README.md @@ -0,0 +1,76 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: MR Image Normalization +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Michela Destito + affiliation: University Magna Graecia of Catanzaro + country: Italy + +- name: Paolo Zaffino + affiliation: University Magna Graecia of Catanzaro + country: Italy + +- name: Maria Francesca Spadea + affiliation: Institute of Biomedical Engineering + country: KIT - Karlsruher Institut für Technologie, Germany + +- name: Petros Koutsouvelis + affiliation: Maastricht University + country: Netherlands +--- + +# Project Description + + + +A key step in medical image processing, particularly in MRI images, is normalization of gray level intensities. This normalization is important to ensure that images have a consistent intensity scale, facilitating any future analysis. The purpose is to create a targeted extension for normalization of MR images in Slicer. + +## Objective + + + +* The aim of the project will be to provide an extension to normalize the intensities of MRI images +* It will be possible to choose different normalization methods. + +## Approach and Plan + + + +1. Create an extension in which three normalization methods can be chosen: Zscore, WhiteStripe and Nyul. +2. To be able to compare the different gray levels of images normalized by multiple methods. + +## Progress and Next Steps + + + +1. In this week I created the Extension for Normalization MRI Images with three normalization methods. +2. In the created extension you can choose which method to use. +3. Considering the first two proposed methods (Z-score and WhiteStripe) only the MRI image needs to be loaded and is normalized. +4. Considering the third method (Nyul) one must load in addition to the image to be Normalized, the MRI dataset and only then is the image normalized. +5. Future developments will be to implement new normalization methods proposed in the literature. + +# Illustrations + + + + +# Background and References + + + + +1. https://github.com/Micheladestito/ImageNormalizationSlicer +2. https://github.com/jcreinhold/intensity-normalization diff --git a/PW40_2024_GranCanaria/Projects/MultimodelRegistration/README.md b/PW40_2024_GranCanaria/Projects/MultimodelRegistration/README.md new file mode 100644 index 000000000..f32880bd1 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/MultimodelRegistration/README.md @@ -0,0 +1,73 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Multimodal registration +category: Other +presenter_location: Online + +key_investigators: + +- name: Leroux Gaelle + affiliation: University of Michigan + country: USA + +- name: Claret Jeanne + affiliation: University of Michigan + country: USA + +- name: Cevidanes Lucia + affiliation: University of Michigan + country: USA + +- name: Allemand David + affiliation: Kitware + country: USA + +- name: Prieto Juan Carlos + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + +The "Multimodal Registration Project" aims to develop a system capable of aligning different medical imaging modalities to enhance diagnostic accuracy and patient outcomes. By focusing on the registration of Cone Beam Computed Tomography (CBCT) with Magnetic Resonance Imaging (MRI), we seek to create a unified imaging model that offers comprehensive insights into patient anatomy and pathology. + +## Objective + + + +* The objective is to register MRI images with CBCT data accurately. + +## Approach and Plan + + + +* Collected a robust dataset comprising MRI and CBCT files. +* Compare the performance of three registration approaches : Generative Adversarial Network (GNA), agent-based action learning for non-rigid registration and Elastix's free-form deformation method. +* Validate the model's accuracy through rigorous testing against established benchmarks. + +## Progress and Next Steps + + + +* Data collection phase. +* Reviewing available papers and code. + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/NewInteractionWidgetForTransformsMarkups/README.md b/PW40_2024_GranCanaria/Projects/NewInteractionWidgetForTransformsMarkups/README.md new file mode 100644 index 000000000..6d4649bbf --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/NewInteractionWidgetForTransformsMarkups/README.md @@ -0,0 +1,91 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: New Interaction Widget for Transforms/Markups +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Kyle Sunderland + affiliation: PerkLab - Queen’s University + country: Canada + +--- + +# Project Description + + + +This project aims to implement a new interaction handle widget that can be used to modify linear transform nodes. This implementation will be based on the existing interaction handle pipeline in the Markups module, but with improvements to functionality and appearance. Both the transforms and markups interaction widgets will be derived from the same base class. + +The center of rotation for the transform node can be changed by holding ALT and clicking+dragging on one of the translation handles. The center of rotation can also be change in python using the "vtkMRMLTransformNode::SetCenterOfTransformation" function. + +Transforms can also be scaled uniformly by holding ALT and clicking+dragging on one of the scale handles. + +## Objective + + + +1. Integration of new interaction widget into 3D Slicer. + +## Approach and Plan + + + +1. Provide an installer for precompiled version of Slicer that includes the new interaction widget. +2. Update based on feedback gathered from the project week. +3. Integrate new widget into the Slicer core. + +## Progress and Next Steps + + + +1. Created installer for a version of 3D Slicer containing the new widget. (See link in "Background and References" section). +2. Created [PR](https://github.com/Slicer/Slicer/pull/7562) to integrate changes into 3D Slicer. +3. Add options to enable/disable visualization of specific axes. +4. Add uniform scaling option. +5. Improved visualization based on feedback. +6. Integrated into latest preview release! + +### Next steps + +1. Use a simplified widget visualization when the user is not interacting with it. +2. Add a shortcut to allow users to cancel the transformation. +3. Add shortcut to enable snapping to angles. + +# Illustrations + + +## Existing widgets + +| Transforms| Markups | +|----------|:-------------:| +| ![image](https://github.com/NA-MIC/ProjectWeek/assets/9222709/aa0e1abb-ee47-478e-b712-a9cfd666b311) | ![image](https://github.com/NA-MIC/ProjectWeek/assets/9222709/f65d323f-e84c-481b-94c1-1001eb209ce5) | + +## New widget + +| Transforms| Markups | +|----------|:-------------:| +| ![image](https://github.com/NA-MIC/ProjectWeek/assets/9222709/dbc01ab4-31c0-4b17-b184-6e5c67c35bf7) | ![image](https://github.com/NA-MIC/ProjectWeek/assets/9222709/d0773aef-86d1-46de-9a21-afc53c54ecb8) | + + + +# Background and References + + + +- Installers: + - V1: ~https://1drv.ms/u/s!Al0lwIPqdM2dgql4-5NkWigrmjeU8A?e=lIHd2H~ + - V2: ~https://1drv.ms/u/s!Al0lwIPqdM2dgql7p_gc9DsbDt4New?e=crjT6A~ + - V3: Latest nightly build! +- Pull request: https://github.com/Slicer/Slicer/pull/7562 +- Current branch: diff --git a/PW40_2024_GranCanaria/Projects/NewWindowLevelUiUxInOhifViewer/README.md b/PW40_2024_GranCanaria/Projects/NewWindowLevelUiUxInOhifViewer/README.md new file mode 100644 index 000000000..188d9d216 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/NewWindowLevelUiUxInOhifViewer/README.md @@ -0,0 +1,61 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: New window level UI/UX in OHIF Viewer +category: Cloud / Web +presenter_location: Online + +key_investigators: + +- name: Alireza Sedghi + affiliation: OHIF + country: Accolade Imaging, Canada + +--- + +# Project Description + +This project will aim to update the OHIF Viewer based on the new design of the Window Level interactions. Using per-viewport interactions makes a lot of sense (similar to commercial pacs viewers such as Siemens, and GE) since you can filter the options based on the available context (modality, 3D vs non-3D, fusion etc.) + + +![CleanShot 2024-02-01 at 22 09 07](https://github.com/NA-MIC/ProjectWeek/assets/7490180/9c6ab6d4-6cf1-4610-b8ca-076b226df357) + +## Objective + +1. Move the patient information to the top right instead of per viewport +2. Improve upon current setup for the All-in-one-menu + +## Approach and Plan + +1. Follow zeplin designs and implement the UI components + +## Progress and Next Steps + +1. I successfully completed the UI element for the window level presets and connected it to enable ww/wc changes. +2. Additionally, I added a toggle for the color bar. The color bar component is already integrated into cornerstone3D. + +Next Steps: +- Implementing the ability to track and maintain states for each viewport that displays the color bar. +- Incorporating the colorLUT changes into the UI. + +# Illustrations + + + + + + +# Background and References + +Color bar in cornerstone3D +- https://www.cornerstonejs.org/live-examples/colorbar +- https://www.cornerstonejs.org/live-examples/advancedcolorbar + +OHIF PR +- https://github.com/OHIF/Viewers/pull/3914 diff --git a/PW40_2024_GranCanaria/Projects/NvidiaHoloscanAnd3DSlicer/README.md b/PW40_2024_GranCanaria/Projects/NvidiaHoloscanAnd3DSlicer/README.md new file mode 100644 index 000000000..bc7341ca6 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/NvidiaHoloscanAnd3DSlicer/README.md @@ -0,0 +1,63 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: NVIDIA Holoscan and 3D Slicer +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Mikael Brudfors + affiliation: NVIDIA + country: UK +- name: Steve Pieper + affiliation: Isomics + country: US +- name: Rafael Palomar + affiliation: NTNU + country: Norway +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA +- name: Shreeraj Jadhav + affiliation: Kitware + country: USA +--- + +# Project Description + + + +This project aims at integrating [NVIDIA Holoscan](https://developer.nvidia.com/holoscan-sdk) into 3D Slicer. Holoscan is a hybrid computing platform for real-time streaming data that combines hardware systems for low-latency sensor and network connectivity, optimized libraries for data processing and AI, and core microservices to run surgical video, ultrasound, medical imaging, and other applications anywhere, from embedded to edge to cloud. + +## Objective + + + +Integrate NVIDIA Holoscan with 3D Slicer. Holoscan is both a [hardware platform](https://www.nvidia.com/en-gb/edge-computing/products/igx/) and an [SDK](https://github.com/nvidia-holoscan/holoscan-sdk)). Holoscan SDK runs on both x86 and arm64 platforms, which means it can be deployed on, e.g., regular laptops, as well as NVIDIA Jetson devices, or even in the cloud! + +## Approach and Plan + + + +There are various options, not yet sure which one is the best (or if there are ones I have not considered): + +1. Use [OpenIGTLink](http://openigtlink.org/) to stream data from a Holoscan developer kit to 3D Slicer, over network. This would bypass the fact that 3D Slicer does not easily build on ARM (which is the arch of the IGX Orin). +2. Create operators in 3D Slicer that can pass data to and from Holoscan SDK. This would give a 3D Slicer user access to the API of Holoscan SDK for efficient AI inference, etc. +3. Connect an IGX Orin running Holoscan SDK to the [Plus toolkit](https://plustoolkit.github.io/). + +## Progress and Next Steps + +1. Worked with Kitware on building Slicer natively on Holoscan OS/hardware (progress, but not completed) +2. Protytpe OpenIGTLink connection streaming slice/volume data through Holoscan +3. Made connections for further development + +The implementation for (2) will be made available on the [Holohub repository](https://github.com/nvidia-holoscan/holohub) soon, and will allow a 3D Slicer user to send imaging data to Holoscan SDK, running on a GPU powered device, do efficient inference and the send the results back to 3D Slicer. Another possibility is to send data acquired on the Holoscan device to 3D Slicer for visualization. + +# Illustrations + + + +![holoscan_3dslicer](https://github.com/NA-MIC/ProjectWeek/assets/6413806/c6e5969e-cccc-4228-9c60-e713ef776731) diff --git a/PW40_2024_GranCanaria/Projects/README.md b/PW40_2024_GranCanaria/Projects/README.md new file mode 100644 index 000000000..649ecf405 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/README.md @@ -0,0 +1,18 @@ +# How to create a new project + +- Post questions about the project idea and team on the [Project Week forum][forum], our communication mechanism as of PW28. +- When you are ready, add a new entry in the list of **Projects** by creating a new `README.md` file in subfolder in `Projects` folder, and copying contents of [project description template][project-description-template] file into it. Step-by-step instructions for this are: + +1. Open [project description template][project-description-template] and copy its full content to the clipboard + * If the link does not work (https issues) please try [here](https://github.com/NA-MIC/ProjectWeek/blob/master/PW40_2024_GranCanaria/Projects/Template/README.md) +3. Go back to [Projects](https://github.com/NA-MIC/ProjectWeek/tree/master/PW40_2024_GranCanaria/Projects) folder on GitHub +4. Click on "Create new file" button +5. Type `YourProjectName/README.md` + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** +6. Paste the previously copied content of project template page into your new `README.md` +7. Update at least your project's __title, category, key investigators, location, and project description sections__ +8. Create a [pull request](https://help.github.com/articles/creating-a-pull-request/) with the new page + + +[forum]: https://discourse.slicer.org/c/community/project-week +[project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW40_2024_GranCanaria/Projects/Template/README.md diff --git a/PW40_2024_GranCanaria/Projects/RealTimeVisualizationOfTmsEvokedPotentials/README.md b/PW40_2024_GranCanaria/Projects/RealTimeVisualizationOfTmsEvokedPotentials/README.md new file mode 100644 index 000000000..4269a710d --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/RealTimeVisualizationOfTmsEvokedPotentials/README.md @@ -0,0 +1,70 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Real-Time Visualization of TMS-evoked Potentials +category: Other +presenter_location: Online + +key_investigators: + +- name: Lipeng Ning + affiliation: BWH + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Asif Jamil + affiliation: MGH + country: USA + +- name: Dongjin Sung + affiliation: MGH + country: USA + +--- + +# Project Description + + + +The 'Real-Time Visualization of TMS-evoked Potentials ' project is dedicated to realizing the streaming of EEG, with a specific focus on TMS-evoked potentials (TEPs) with signal processing to assess the quality of TEPs both offline and in real-time. This comprehensive approach aims to not only evaluate TMS-induced neural responses but to further understand and provide valuable insights into brain activity in real-time. Future projects will build upon the real-time TEP visualization tool by extending its goals to include real-time localization of sources associated with the evoked potentials. + +## Objective + + + +1. Create a visualization tool for streaming real-time TEPs +2. Include a module that also enables streaming of existing data +3. Implement online signal processing methods to achieve clean visualization of TEPs + +## Approach and Plan + + + +1. Utilize methods such as Remote Data Access (RDA) and Lab Streaming Layer (LSL) to successfully stream online EEG data with TMS triggers for visualization +2. Implement the visualization of streaming offline data in a real-time manner +3. Create processing methods such as eliminating pulse effects and artifacts + +## Progress and Next Steps + + + +1. The online streaming of EEG data coupled with TMS markers is working, however a specified type of visualization is required for implementation + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/RefreshAFewSelectedSlicerTrainingMaterialsForAMoreRecentVersionOfSlicer/README.md b/PW40_2024_GranCanaria/Projects/RefreshAFewSelectedSlicerTrainingMaterialsForAMoreRecentVersionOfSlicer/README.md new file mode 100644 index 000000000..4ac8b32d1 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/RefreshAFewSelectedSlicerTrainingMaterialsForAMoreRecentVersionOfSlicer/README.md @@ -0,0 +1,59 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Refresh a few, selected Slicer training materials for a more recent version of Slicer +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Csaba Pintér + affiliation: Ebatinca + country: Las Palmas de Gran Canaria, Spain + +- name: Attila Nagy + affiliation: University of Szeged + country: Szeged, Hungary + +--- + +# Project Description + + + +Slicer evolves at an amazing pace, and this makes the tutorials obsoleted pretty quickly. The current task would be to bring some of these up to date manually, but later on maybe this could be done automatically but later on the process could maybe be automated. + +## Objective + + + +1. Review some tutorials and bring the up to date. + +## Approach and Plan + + + +1. Review what changed in recent Slicer versions, that involves the selected tutorials, and do the screenshots. Also, begin refreshing the presentations. + +## Progress and Next Steps + + + +1. Redid the whole 3D printing material, brought it up-to-date, created screenshots, explored new ways to do things in Slicer (right-click menus, enhancements in the Data module, etc), and added these to the tutorial. + +# Illustrations + + + +![kép](https://github.com/NA-MIC/ProjectWeek/assets/242559/50f877d3-dc97-41e1-8188-39e360e14438) +![kép](https://github.com/NA-MIC/ProjectWeek/assets/242559/c76f2d3f-d15b-4cae-93d5-af62a8dce0da) +![kép](https://github.com/NA-MIC/ProjectWeek/assets/242559/bc9090b3-e143-4a25-9cd0-6210c34e4734) + + +# Background and References + + diff --git a/PW40_2024_GranCanaria/Projects/ReviewIgtTrainingRelatedMaterialsThatCouldBeUsedInTrainingMedicieStudents/README.md b/PW40_2024_GranCanaria/Projects/ReviewIgtTrainingRelatedMaterialsThatCouldBeUsedInTrainingMedicieStudents/README.md new file mode 100644 index 000000000..ee5b674c1 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ReviewIgtTrainingRelatedMaterialsThatCouldBeUsedInTrainingMedicieStudents/README.md @@ -0,0 +1,56 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Review IGT training related materials that could be used in training medicine students +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Attila Nagy, Francesca Maria Spadea, Juan Ruiz Alzola, Javier Pascau, Gábor Fichtinger, Tina Kapur, Ron Kikinis, Rafael Palomar, Paolo Zaffino + affiliation: University of Szeged + country: Szeged, Hungary + +--- + +# Project Description + + + +IGT is a special area of medical imaging-related sciences, and is hard to incorporate into the undegraduate curriculum. +Yet, it would be great, because the students like it, and could more easily be engaged. + +## Objective + + + +1. Objective A. Contact people who teach IGT, or IGT-related applications as undegraduate courses. Make contacts, discuss good practices, exchange experiences. + +## Approach and Plan + + + +1. Approach people, have talks, take a look at already existing materials (if available). + +## Progress and Next Steps + + + +1. Had talk, approached people and made great proges overall! :) + +# Illustrations + + +![20240201_121733](https://github.com/NA-MIC/ProjectWeek/assets/242559/04fbad90-d016-42ba-82bf-0a863593064b) +![20240131_163850](https://github.com/NA-MIC/ProjectWeek/assets/242559/4c885624-0673-4d4d-98d5-fd84576a8820) +![20240201_142718](https://github.com/NA-MIC/ProjectWeek/assets/242559/87e27f0b-cd27-4d98-a678-a262a8f76191) + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/ReviewOfPrioritiesAndDevelopmentPlanningOfDcmqi/README.md b/PW40_2024_GranCanaria/Projects/ReviewOfPrioritiesAndDevelopmentPlanningOfDcmqi/README.md new file mode 100644 index 000000000..0ca91d567 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ReviewOfPrioritiesAndDevelopmentPlanningOfDcmqi/README.md @@ -0,0 +1,83 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Review of priorities and development planning of dcmqi +category: DICOM +presenter_location: Remote + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Michael Onken + affiliation: OpenConnections + country: Germany + +- name: Joost van Griethuysen + affiliation: NKI + country: Netherlands + +- name: Ralf Floca + affiliation: DKFZ + country: Germany + +--- + +# Project Description + + + +[dcmqi](https://github.com/QIICR/dcmqi) (DICOM for Quantitative Imaging) is a free, open source C++ library for conversion between imaging research formats and the standard DICOM representation for image analysis results. + +This library has been around for quite some time, and gained some adoption, but has not been actively developed for the past few years, but with the efforts of @michaelonken development restarted. + +The goal of this project would be to discuss what, if anything, could be done to make it more usable and address any of the needs that users might have. + +## Objective + + + +1. Review any outstanding and new topics and feature requests +2. Review issue tracker +3. Review documentation + +## Approach and Plan + + + +1. discuss with @jcfr any topics related to the possible integration with [ITK-Wasm dicom package](https://github.com/InsightSoftwareConsortium/itk-wasm/tree/main/packages/dicom) +2. revisit basic python wrapping, similar to [pyplastimatch](https://github.com/AIM-Harvard/pyplastimatch) done by @denbonte +3. clean up issue tracker +4. discuss support of enhanced multiframe as SEG reference images and confirm understanding of the level of support of enhanced multiframe in Slicer/ITK (@michaelonken tried and failed to load any such images in Slicer) +5. revisit documentation +6. discuss interoperability testing with highdicom and dcmjs and maybe other tools. +7. Help @JoostJM with possible integration into CaseIterator +8. Discuss the use case for running conversion in absence of source image DICOM files, per request from @rfloca and as discussed in . +9. Discuss the process and schedule of upgrading dcmqi in MITK (@rfloca I did submit a PR, but not sure if those are reviewed? ) + +## Progress and Next Steps + + +1. Discussed revisions to API with Ralf, agreed on addressing https://github.com/QIICR/dcmqi/issues/390 +2. Revisited the topic of encoding background voxels, further discussions with David C will be needed https://github.com/QIICR/dcmqi/issues/490 +3. Confirmed with Ralf that github PRs to MITK is the proper mechanism for updating dcmqi version, current PR will be addressed +4. Spent some time cleaning up issue tracker. +5. Started revising documentation. Need further discussions with adopters (Cosmin, Leo, Dennis) to make further progress. + +# Illustrations + + + +*No response* + +# Background and References + + + +* [dcmqi](https://github.com/QIICR/dcmqi) diff --git a/PW40_2024_GranCanaria/Projects/RunKaapanaOnGkeAndImproveConnectivityOfKaapanaToIdc/README.md b/PW40_2024_GranCanaria/Projects/RunKaapanaOnGkeAndImproveConnectivityOfKaapanaToIdc/README.md new file mode 100644 index 000000000..2f6e47b44 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/RunKaapanaOnGkeAndImproveConnectivityOfKaapanaToIdc/README.md @@ -0,0 +1,98 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Improve connectivity of Kaapana to IDC +category: Cloud / Web +presenter_location: Online + +key_investigators: + +- name: Mikulas Bankovic + affiliation: DKFZ + country: Germany + +- name: Vamsi Thiriveedhi + affiliation: BWH + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Hanno Gao + affiliation: DKFZ + country: Germany + +--- + +# Project Description + + + +The aim of this project is to enhance the integration between Kaapana and IDC, specifically enabling Kaapana to interact with an external DICOMweb endpoint, as Google Healthcare API behind IDC for image storage. + +Kaapana is an open-source toolkit designed for platform provisioning in the field of medical data analysis. It leverages a variety of open-source tools relevant to the NA-MIC community, including OHIF Viewer, MITK, and nnU-Net segmentation tools. Kaapana uses DICOM for managing images, image-derived data, and metadata. It offers functionalities such as workflows for large-scale image processing, exploration, visualization, and curation of medical images, extensions for simple integration of new, customized algorithms and applications, an integrated PACS system, and extensive resource and system monitoring for administrators. + +NCI Imaging Data Commons (IDC) is a cloud-based repository of publicly available cancer imaging data co-located with the analysis and exploration tools and resources. IDC is a node within the broader NCI Cancer Research Data Commons (CRDC) infrastructure that provides secure access to a large, comprehensive, and expanding collection of cancer research data. + +## Objective + + + +We aim to add an external DICOMweb-based server specifically, Google Healthcare API DICOM stores, in addition to the internal dcm4chee server, enhancing the ability of Kaapana to process external images. +We aim to achieve the same functionality with external dicomweb server as with we currently have with local dicomweb server. + +## Approach and Plan + + + +The approach involves several steps: + +1. **Establish the connection** from kaapana code-server platform to the external dicom server (REST?) +2. Retrieve all dicom **metadata** from the **external** server +3. Write a workflow to add **metadata** to kaapana **opensearch meta dashboard view** +4. Retrieve dicom **thumbnail images** from the server +5. Write a workflow to add them to **datasets view** +6. **Flag** external dicoms to differentiate between **used storage DICOMWeb servers**. +7. Enable using the data in dicom store as input for existing workflows in kaapana +8. If possible, integrate **OHIF viewer** on kaapana with GCP dicom store + + +## Progress and Next Steps + + + +1. Implemented new dag and operator in kaapana workflow able to **import metadata** and **name the dataset**. (1-3) +2. Able to retrieve thumbnail images, but unable to integrate it to the kaapana yet, due to choosing the wrong path :D (4) +3. During import, I identify tags: **00080016 source entity title** and **00080026 source presentation address** as places where to store meta information, that these metadata are from external sources and address where it is possible to retrieve them. This will be reused later. +4. 7-8 WIP + +![image](https://github.com/NA-MIC/ProjectWeek/assets/33953801/696ce517-7f43-4255-96c4-e6b712891a6d) + + +# Illustrations + + +Kaapana datasets **thumbnail** view: +![Screenshot from 2024-01-23 15-10-36](https://github.com/NA-MIC/ProjectWeek/assets/33953801/4a63ff25-47b0-4b1f-bac6-994e5fb2b05a) + +Ohif Viewer: +![Screenshot from 2024-01-29 09-14-51](https://github.com/NA-MIC/ProjectWeek/assets/33953801/eb30e056-3f55-47f6-9a06-cb9407348e56) + +# Background and References + + + +Kaapana Docs: https://kaapana.readthedocs.io/en/stable/ + +Kaapana Repo: https://github.com/kaapana/kaapana + +Google Healthcare API Dicomweb: https://cloud.google.com/healthcare-api/docs/how-tos/dicomweb + +Google Healthcare API: https://cloud.google.com/blog/topics/healthcare-life-sciences/getting-to-know-the-google-cloud-healthcare-api-part-1 + +Google Dicomweb CLI https://github.com/GoogleCloudPlatform/healthcare-api-dicomweb-cli diff --git a/PW40_2024_GranCanaria/Projects/SlicerHub/README.md b/PW40_2024_GranCanaria/Projects/SlicerHub/README.md new file mode 100644 index 000000000..efd582974 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/SlicerHub/README.md @@ -0,0 +1,66 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Advancing SlicerHub - open issues and vision as part of a virtual hospital +category: Cloud / Web +presenter_location: Online + +key_investigators: +- name: Rafael Nebot Medina + affiliation: Instituto Tecnológico de Canarias (ITC) + country: Spain + +- name: Juan Ruiz Alzola + affiliation: Universidad de Las Palmas de Gran Canaria + +- name: Paula Moreno Fajardo + affiliation: ITC + +--- + +# Project Description + +From the experience of developing SlicerHub (present at 38th edition of NA-MIC) within OpenDx28 project, using Teide HPC infrastructure, open issues and future vision should be discussed to +define work for the forthcoming project, the Virtual Hospital. + +## Objective + + + +1. Talk with experts present at the workshop to gather information to formulate possible solutions to having multiple Kubernetes pods using OpenGL in the same node. +2. Open discussion to define and take note of vision and specific desirable features of having SlicerHub in a virtual hospital. + +## Approach and Plan + + + +1. Show the deployed implementation to interested people. +2. Explain issues. Mostly focus on executing Slicer multiple times in the same Kubernetes node with OpenGL accelerated by an nVidia card. +3. Take note of possible approaches or solutions for the issues. +4. Explain ideas for SlicerHub in the Virtual Hospital to interested people. From resource profiles, to preconfigured sessions (with pre-loaded data), etc. +5. Document new ideas or modify existing ones. + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + +- OpenDx28 project, just finished, allowed to experiment in deployment and integration of services related to health. OpenDx28 Github organization is at [https://github.com/OpenDx28](https://github.com/OpenDx28) +- The specific repository for SlicerHub at [https://github.com/OpenDx28/3dslicerhub](https://github.com/OpenDx28/3dslicerhub) +- The Docker image spawned by SlicerHub at [https://github.com/OpenDx28/docker-slicer](https://github.com/OpenDx28/docker-slicer) +- This image depends on another, at [(https://github.com/OpenDx28/docker-vnc-base](https://github.com/OpenDx28/docker-vnc-base) diff --git a/PW40_2024_GranCanaria/Projects/SlicerSofaIntegration/README.md b/PW40_2024_GranCanaria/Projects/SlicerSofaIntegration/README.md new file mode 100644 index 000000000..998caf06c --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/SlicerSofaIntegration/README.md @@ -0,0 +1,85 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Slicer-SOFA integration +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Rafael Palomar + affiliation: Oslo University Hospita + country: Norway + +- name: Paul Baksic + affiliation: INRIA + country: France + +- name: Steve Pieper + affiliation: Isomics + country: US + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + +SOFA is an open source framework targeted at interactive biomechanical simulation, with an emphasis on medical simulation and robotics. Relying on a C++ implementation, SOFA offers efficient algorithms and methods to solve continuum mechanics problems. Its interactive capability makes it a great tool for off- and on-line medical applications. The SOFA core has a LGPL license, which is permissive and non-contaminating. + +Relying on the Finite Element Methods (FEM), SOFA requires a mesh (space discretization) of the simulated objects as input. Eventhough SOFA offers several numerical strategies to reach the best performances possible (preconditionning, multithreading, etc…), this mesh has a direct impact on the simulation stability and performance. This is usually overcome through a long process of mesh refinement and simulation testing to reach computation time suitable for interactive simulation of deformable bodies. + +Integrating SOFA to Slicer may ease this process and offer a fully integrated pipeline bridging medical imaging, processing and patient-specific simulation. This integration may be of great interest for the Slicer community to help design and bootstrap finite element simulations. + +## Objective + + + +1. Define correctly the integration’s limits. What is desirable and at which point it is meaningless and the user should then switch to SOFA itself + a roadmap for the complete integration. +2. Define through what means the integration will be done : Python bindings / C++. +3. Enable the sharing of data structures to be able to use the meshs without the need of multiple copies. +4. Define the prefabs needed to automatically create a sofa scene without the need of fine tuning directly the scene with the current meshes. +5. Integrate the SOFA scene graph in read/write to be able to interact with sofa objects. +6. Add a way to define bounding boxes for sofa or manually specify indices for boundary conditions. + +## Approach and Plan + + + +1. Discuss with Slicer devs and users that also use SOFA to gather the needs. Defines the actual steps to reach the desired integration and distribute them among the projet members (it may take more time than the actual project week). +2. Define a simple POC with few features that is doable during the remaining days of the week to start working on it directly. + +## Progress and Next Steps + +1. We have discussed different architectures for the integration (Superbuild Extension, ROS2 and Client/Server through OpenIGTLink). We decided to implement the third alternative. +2. A SOFA-OpenIGTLink extension was created to support sending simulation updates to 3D Slicer +3. A Slicer-SOFA extension was created to automate the scene loading and creation of the communication channel with SOFA + + + + While the progress can still be considered more a demo than a generic infrastructure, we have established the first steps towards making Slicer-SOFA an extension that will allow to define, run and analyze simulation using SOFA. + +# Illustrations + + + +![liver2](https://github.com/NA-MIC/ProjectWeek/assets/1978682/cf22da12-5459-43cb-b7e2-1021d5648f69) + +![LiverRendered](https://github.com/NA-MIC/ProjectWeek/assets/1978682/3219f2dc-1cfb-4053-bf1f-f0bd9b34249c) + +![ImagingUSScene_IRCAD_00000005](https://github.com/NA-MIC/ProjectWeek/assets/1978682/02aa0256-a641-476e-a03e-541cfc86192d) + +# Background and References + + + +SOFA 2012 paper: +GitHub: +API doc: diff --git a/PW40_2024_GranCanaria/Projects/SlicerVisualDICOMBrowser/README.md b/PW40_2024_GranCanaria/Projects/SlicerVisualDICOMBrowser/README.md new file mode 100644 index 000000000..ee1bbf091 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/SlicerVisualDICOMBrowser/README.md @@ -0,0 +1,105 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Visual DICOM browser +category: DICOM +presenter_location: In-person + +key_investigators: +- name: Davide Punzo + affiliation: Freelancer + country: France + +- name: Andras Lasso + affiliation: Perk Labs + country: Canada + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA + +- name: anyone is welcome! + affiliation: + country: +--- + +# Project Description +The visual DICOM browser provides a new user interface for quick viewing and retrieval of patient images stored on remote DICOM servers. The new tool is optimized for clinical workflows where the focus is on all the images of a single patient - as opposed to the existing DICOM browsing experience, which was more suitable for bringing together images from many patients. + +Both server and local content are located at the same place and are visualized by thumbnails. All data is retrieved in the background using classic DIMSE networking (most commonly used protocols in hospitals), in multiple concurrently running threads. The currently supported operations are: + +- Browsing and filtering with thumbnails of content of local DICOM database and multiple remote DICOM servers. +- Query/Retrieve data from servers (DIMSE `C-FIND`, `C-GET`, `C-MOVE` SCU). All the operations are done in background and in parallel. Downloaded data is automatically cached in the local DICOM database. A unique feature is the possibility to retrieve images using C-GET protocol (suitable for cases when many Slicer instances are running in docker containers) with a clinical PACS that only supports C-MOVE protocol (most clinical PACS), via a proxy server (such as the free Orthanc). +- Import data from local files. +- Receive data sent from remote PACS (DIMSE `C-STORE` SCP). +- Send data to remote PACS (DIMSE `C-STORE` SCU). +- Quick browsing of all DICOM metadata and pixel data. +- Remove data from local database (not from server). + +The widget is currently an experimental feature in Slicer (DICOM module). Current Roadmap is at [link](https://github.com/commontk/CTK/issues/1162). + +Possible long term ENH to discuss/work during the project week: +- add data streaming from visual brower series widgets to Slicer volume nodes. +- handle jobs queue in the scheduler by file (so we can restart the jobs/workers at application restart). +- implementing send in C++ at ctk level (i.e. adding `ctkDICOMSendJob`, `ctkDICOMSendWorker` and `ctkDICOMSend` with underlining DIMSE `DcmStorageSCU`). This would allow to use the background/parallel operations infrastructure for SEND as well. +- add `DICOMweb`. + +## Objective +Finalize the ctk visual DICOM browser: + +1. Get feedback from users/developers. +1. Prioritize short term ENH-BUG fixes to do. See [Roadmap](https://github.com/commontk/CTK/issues/1162) for more info. +1. Discuss the long term ENH. + +## Approach and Plan + +1. Have a meeting/demo with people interested for colletting feedback. +1. Prioritize/coordinated any future development based on the feedback. + +## Progress and Next Steps + +1. A Job list UI has been implemented/tested. +2. We had a demo on Tuesday and we discussed the roadmap in details. +3. I have started implementating the feedback. See the [roadmap](https://github.com/commontk/CTK/issues/1162) for detailed informations. + +# Illustrations +screenshots: + +|Visual DICOM Browser | Jobs and Settings| +|--- | ---| +| | | + + +video: + + + + +UML Diagram: + + + + +# Background and References + +[PW38 Project](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/) + +[CTK PR1](https://github.com/commontk/CTK/pull/1142) + +[CTK PR2](https://github.com/commontk/CTK/pull/1165) + +[Slicer PR](https://github.com/Slicer/Slicer/pull/7525) + +[Roadmap](https://github.com/commontk/CTK/issues/1162) + +[Logging UI CTK PR](https://github.com/commontk/CTK/pull/1184) diff --git a/PW40_2024_GranCanaria/Projects/Slicerarduino/README.md b/PW40_2024_GranCanaria/Projects/Slicerarduino/README.md new file mode 100644 index 000000000..9ecf50340 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/Slicerarduino/README.md @@ -0,0 +1,72 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: SlicerArduino +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Maria Francesca Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +--- + +# Project Description + + + +[SlicerArduino](https://www.mdpi.com/2306-5354/7/3/109) is a Slicer extension that allows to stream data from/to [Arduino](https://www.arduino.cc/) (and Arduino-like) board. +The extension is already deployed but several improvements can be made. + +## Objective + + + +1. Add Bluetooth support +2. Manage connection drop +3. Think about switching from pyserial to qt serial module + +## Approach and Plan + + + +1. Edit SlicerArduino code having Arduino board with Bluetooth module on the side +2. Discuss with people about switching from pyserial to qt serial module +3. Manage connection drop + +## Progress and Next Steps + + + +1. Bluetooth connection was investigated. Preliminary tests were done just by interfacing Arduino board with the computer, without including Slicer in the loop (for the moment). +2. Connection drop management is now implemented and working. Right now it is in a separate git branch, some code cleanup is required. + When the connection drops a vtkErrorEvent is invoked on the arduinoNode and a popup appears to the user. +4. QSerialPort is not included by default in the current Slicer build but it can be done. + Performance/feasibility tests have to be carried on to understand if migration is worth it. +5. We had a (very nice and useful) meeting to discuss SlicerArduino both from a developer and user point of view. + +# Illustrations + + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/4259198/28afb312-9ae4-4db7-a54a-09a2fc0a9585) + +![image](https://github.com/NA-MIC/ProjectWeek/assets/4259198/e523bf25-cf5c-46e7-85f0-0fb0f3b03fc8) + +# Background and References + + + +[SlicerArduino paper](https://www.mdpi.com/2306-5354/7/3/109) +[SlicerArduino GitHub page](https://github.com/pzaffino/SlicerArduinoController) +[SlicerArduino website](https://pzaffino.github.io/SlicerArduinoController/) diff --git a/PW40_2024_GranCanaria/Projects/StatisticalShapeModelingWithSlicerAndShapeworks/README.md b/PW40_2024_GranCanaria/Projects/StatisticalShapeModelingWithSlicerAndShapeworks/README.md new file mode 100644 index 000000000..1adba83f4 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/StatisticalShapeModelingWithSlicerAndShapeworks/README.md @@ -0,0 +1,89 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Statistical Shape Modeling with Slicer and ShapeWorks +category: Quantification and Computation +presenter_location: Online + +key_investigators: + +- name: Jess Tate + affiliation: University of Utah + country: USA + +--- + +# Project Description + + + +Statistical shape modeling (SSM) is fundamental for quantifying and understanding morphological variations correlated with various biological processes, such as morphogenesis, function, adaptation, and disease. SSM is can be used in impacts a wide spectrum of clinical applications, including pre- and post-operative surgical planning, reconstructive surgery, and design of optimal patient-specific implants and bone surrogates and spacers. Clinical research entailing large cohorts also benefits from population-level SSM. + +As the need for SSM in clinical applications and large data analysis grows, the need for reliable and easy to use tools becomes more imminent. While technology and capabilities of SSM have continually expanded, we have focused the development of ShapeWorks on incorporating these latest technologies with UX and UI designed for ease-of-use. One of the products of this effort is an interface between ShapeWorks and Slicer through Slicer's plugin architecture, expanding the capabilities of both tools. + +We will a compile and present a series of tutorials of SSM tools to provide an introductory skillset to participants looking to incorporate SSM into their research or to broaden their understanding of the available technologies. We will offer tutorials using Slicer's integration with ShapeWorks and advanced features implemented available in ShapeWorks. The planned tutorials will highlight SSM technology that allow beginners and advanced users of SSM to apply these techniques to their Slicer data processing pipelines. + +## Objective + + + +We planned to impart training and knowledge of SSM and available tools to project participants to improve understanding and adoption of SSM technology. We also seek to understand the communities needs and suggestions for how to meet those needs with SSM. + +## Approach and Plan + + + +Present an introductory presentation and series of tutorials for SSM, ShapeWorks, and its integration with Slicer. The planned presentations and tutorials are as follows: + + - Introduction to SSM and ShapeWorks (presentation) + - Slicer's ShapeWorks plugin interface (tutorial) + - Advanced pipeline: Slicer to ShapeWorks + - Additional SSM tools using ShapeWorks + - Advanced SSM in ShapeWorks + + +## Progress and Next Steps + + + +1. Stable distributions of [ShapeWorks](https://github.com/SCIInstitute/ShapeWorks) +2. Alpha version [Slicer Plugin](https://github.com/SCIInstitute/ShapeworksSlicerExtension) +3. [Introduction to SSM and ShapeWorks](https://www.sci.utah.edu/~shapeworks/doc-resources/mp4s/PW40_SW_introduction.mp4) +4. [ShapeWorks Bsic Usage, Slicer Plugin](https://www.sci.utah.edu/~shapeworks/doc-resources/mp4s/PW40_SW_tutorial.mp4) +5. Consultations for individual shape modeling projects + +# Illustrations + + + +![ShapeWorks Pipeline Image](http://sciinstitute.github.io/ShapeWorks/latest/img/home/about-shapeworks.png) + +# Session Recordings + +*Introduction to SSM and ShapeWorks* + + +*ShapeWorks Basic Usage, Slicer Plugin* + + + +# Background and References + + + +- [ShapeWorks Repo](https://github.com/SCIInstitute/ShapeWorks) +- [ShapeWorks Documentation](http://sciinstitute.github.io/ShapeWorks/latest/) +- [ShapeWorks Plugin](https://github.com/SCIInstitute/ShapeworksSlicerExtension) +- [ShapeWorks References](http://sciinstitute.github.io/ShapeWorks/latest/users/papers.html) diff --git a/PW40_2024_GranCanaria/Projects/SurgicalPlanningSystemForColorectalSurgeryBasedOnMixedRealityTechnology/README.md b/PW40_2024_GranCanaria/Projects/SurgicalPlanningSystemForColorectalSurgeryBasedOnMixedRealityTechnology/README.md new file mode 100644 index 000000000..9dca47501 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/SurgicalPlanningSystemForColorectalSurgeryBasedOnMixedRealityTechnology/README.md @@ -0,0 +1,84 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Surgical planning system for colorectal surgery based on Mixed Reality technology +category: VR/AR and Rendering +presenter_location: Online + +key_investigators: + +- name: Juan A. Sánchez-Margallo + affiliation: Jesús Usón minimally invasion surgical centre + country: Spain + +- name: Daniel Caballero + affiliation: Jesús Usón minimally invasion surgical centre + country: Spain + +- name: Lucia Salazar-Carrasco + affiliation: Jesús Usón minimally invasion surgical centre + country: Spain + +--- + +# Project Description + + + +The objective of this project is to implement the required tools for the development of a surgical planning system for colorectal interventions combining segmentation techniques and Mixed Reality (MR) technology. These tools are based on CT studies using 3D slicer as a graphical support and python as a programming language, as well as Unity as a MR engine. + +## Objective + + + +1. Objective A. + The general idea is to implement a semi-automatic method of the colon anatomy using a CT scan under Slicer 3D platform. This 3D model of the colon will be used for marking interest points such as lesions (polyps and tumors) and resection planes for the subsequent surgical intervention. +2. Objective B. + To mplement the result of the project with the model that we obtain in 3D Slicer using Mixed Reality (Hololens). This wil offer surgeons an interactive aplication based on MR for the surgical planning of the subsequent colorectal intervention, including the previously defined points of interest (lesions) and resection lines. + +## Approach and Plan + + + +1. Analyze the existing literature about the tools to be used in this project (3D Slicer, Python, and Unity). +2. Optimize the segmetation process of the 3D model. +3. Improve the method of annotation of the points of interest. +4. Improve the method of annotation of the resection lines. +5. Import the 3D model that we obtain in 3DSlicer to Unity. +6. Do the scripts we need in Unity with the model to be read. +7. Show the points of interest and the resection lines in the model with the information that we obtain in 3DSlicer. +8. Upload the final 3D model to the MR device (Hololens) with all the information for the surgeon interation. +9. Use the aplication in the surgical site to improve the planning of colorectal surgical activities. + +## Progress and Next Steps + + + +1. We have done the semi-automatic method of the colon anatomy using a CT scan under Slicer 3D platform based on region growing algorithms. +2. We are working in the scripts in Unity to develop the MR application, so that we can read the 3D model and points of interest and resection lines of the surgical planning. + +# Illustrations + + + + + +FotoColonCaseib 1 + +# Background and References + + + +* C Lobato-Gómez, L Salazar-Carrasco, JA Sánchez-Margallo, FM Sánchez-Margallo. Diseño e implementación de un planificador quirúrgico para cirugía colorrectal junto con un visualizador basado en realidad mixta. CASEIB 2023 Cartagena, Spain. +* C Lobato, E Pedregosa, JA Sánchez-Margallo, I Sánchez-Varo, B Durán, FM Sánchez-Margallo. Design of a colon model for training in transanal surgery using semi-automatic segmentation tools and 3D printing techniques. 34th Annual International Society for Medical Innovation and Technology Conference (iSMIT 2023). 19/10/23-21/10/23, Lukang (Taiwan). +* Pedregosa E, Sánchez-Margallo JA, Lobato C, Sánchez-Varo I, Plaza C, Durán B, Sánchez-Margallo. Preliminary validation of training tools for colorectal surgery based on 3D printing and mixed reality. 31st International Congress of the European Association for Endoscopic Surgery (EAES). 20/6/23-23/6/23, Roma (Italia). +* Juan A. Sánchez Margallo, Carlos Lobato Gómez, Francisco M. Sánchez Margallo. Colon segmentation using 3DSlicer. Certamen Internacional de Cine Médico y Salud (VIDEOMED 2022). 28/11/22-1/12/22, Badajoz (España). +* JA Sánchez-Margallo. The role of mixed reality in MIS. 30th International Congress of the European Association for Endoscopic Surgery (EAES). 5/7/22-8/7/22, Kraków (Poland). diff --git a/PW40_2024_GranCanaria/Projects/Template/README.md b/PW40_2024_GranCanaria/Projects/Template/README.md new file mode 100644 index 000000000..270e7d02f --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/Template/README.md @@ -0,0 +1,58 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Write full project title here +category: Uncategorized +presenter_location: Online + +key_investigators: +- name: Person Doe + affiliation: University + +- name: Person2 Doe2 + affiliation: University2 + country: Spain +--- + +# Project Description + + + +## Objective + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW40_2024_GranCanaria/Projects/Template/README.md.j2 b/PW40_2024_GranCanaria/Projects/Template/README.md.j2 new file mode 100644 index 000000000..01c72fcf0 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/Template/README.md.j2 @@ -0,0 +1,56 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: {{ title | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +category: {{ category | regex_replace("\n$|\n\.\.\.\n$", "") }} +presenter_location: {{ presenter_location | regex_replace("\n$|\n\.\.\.\n$", "") }} + +key_investigators: +{% for investigator in investigators %} +- name: {{ investigator.name | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} + affiliation: {{ investigator.affiliation | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- if investigator.country %} + country: {{ investigator.country | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- endif %} +{% endfor %} +--- + +# Project Description + + + +{{ description }} + +## Objective + + + +{{ objective }} + +## Approach and Plan + + + +{{ approach }} + +## Progress and Next Steps + + + +{{ progress }} + +# Illustrations + + + +{{ illustrations }} + +# Background and References + + + +{{ background }} diff --git a/PW40_2024_GranCanaria/Projects/ThirdMolarExtractionClassification/README.md b/PW40_2024_GranCanaria/Projects/ThirdMolarExtractionClassification/README.md new file mode 100644 index 000000000..5d7b9ed29 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/ThirdMolarExtractionClassification/README.md @@ -0,0 +1,80 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Third molar extraction classification +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Roberto Veraldi + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Amerigo Giudice + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Maria Francesca Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +--- + +# Project Description + + + +The classification of third molar extraction is a key factor in oral surgery. Developing a deep learning model to classify the difficulty score of extraction would be useful for surgeons and dentists. +This project aims to create a Slicer module that allows clinicians to obtain an extraction-difficulty grade by providing just the patient CT. + +## Objective + + + +To expose an already developed deep learning classifier in Slicer. + +## Approach and Plan + + + +1. Identification of optimal classification parameters +2. Expose weights into Slicer +3. Generate extension + +## Progress and Next Steps + + + +Done during this week: +1. Obtained pth file with the model for deep learning classification. +2. Implemented module extention in Slicer. +3. Tested if the same label obtained in testing was the same that appeared in output in Slicer. + +Future steps: +1. Integrating weight files for the specific classification (maybe giving to the clinicians the possibility to download locally the right weights for their specific tasks). +2. Specify what label score means. +3. Other modifications for a general usage of the extention. + +# Illustrations + + + + +# Background and References + + +1. GitHub Project Page: https://github.com/robsver/3DSlicerClassificator +2. Classification score table for third molar extraction: Juodzbalys, Gintaras, and Povilas Daugela. "Mandibular third molar impaction: review of literature and a proposal of a classification." Journal of oral & maxillofacial research 4.2 (2013). diff --git a/PW40_2024_GranCanaria/Projects/TrainingDeepClassifiersForBLineDetectionInLungUltrasoundVideosUsingCrowdsourcedLabels/README.md b/PW40_2024_GranCanaria/Projects/TrainingDeepClassifiersForBLineDetectionInLungUltrasoundVideosUsingCrowdsourcedLabels/README.md new file mode 100644 index 000000000..0b5fdfa84 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/TrainingDeepClassifiersForBLineDetectionInLungUltrasoundVideosUsingCrowdsourcedLabels/README.md @@ -0,0 +1,96 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Training deep learning models for B-line detection and localization in lung ultrasound videos using crowdsourced labels +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Mike Jin + affiliation: Brigham and Women's Hospital + country: USA + +- name: Tamas Ungi + affiliation: Queen's University + country: Canada + +- name: Ameneh Asgari-Targhi + affiliation: Brigham and Women's Hospital + country: USA + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + +AI models for medical imaging need labeled data, which can be hard to obtain with the required volume and/or accuracy. + +Crowdsourced labels on medical imaging data can help bridge that gap, as suggested by our data showing that crowdsourced classifications and segmentations for B-lines on lung ultrasound videos have comparable or better accuracy than annotations from medical experts with advanced training in lung ultrasound [(Duggan 2023)](https://arxiv.org/pdf/2306.06773.pdf) [(Jin 2023)](https://arxiv.org/pdf/2312.10198.pdf). + +To demonstrate this, we will train deep learning B-line classification and segmentation models using crowdsourced annotations on lung ultrasound videos collected from 483 patients and compare their performance to models trained on manual annotations from experts. + +## Objective + + + +Produce models for classification and segmentation of lung ultrasound videos with comparable or improved performance to models trained on manual annotations from experts. + +## Approach and Plan + + + +1. Using crowdsourced labels, train CNN-RNN deep learning models to **classify** lung ultrasound videos as having no B-lines, discrete B-lines, or confluent B-lines. +2. Using crowdsourced labels, train deep learning models to **segment** individual frames within lung ultrasound videos for the presence and location of individual B-lines using open-source code from [(Lucassen 2023)](https://ieeexplore.ieee.org/document/10143623). +3. Compare the performance of crowdlabel-trained models to models trained on manual annotations from experts. + +## Progress and Next Steps + + + +Collected 330,000 (177,000 at start of PW40) crowdsourced segmentation opinions to form high-quality segmentations of 21,000 (8,500 at the start of PW40) frames within the videos from 483 patients. + +In progress: +1. Collecting ~3,000 additional high-quality frame segmentations per day with a target of 20,000 frames remaining which will be completed this month (41,000 total). +2. Training video classification models on high-quality classifications of 4,030 ultrasound videos from 483 patients. +3. Preparing training data for segmentation models. + +# Illustrations + + + +What are B-lines in lung ultrasound? Here is a picture. The white beams are B-lines, the dark sectors are shadows from ribs. + +Screenshot 2024-01-29 at 1 14 33 PM + +For crowdsourcing, we collect opinions from 5 experts and combine their opinions, like so. Yellow is the expert consensus: + +Screenshot 2024-01-29 at 1 16 56 PM + +Then, we collect opinions from crowd using a gamified system very similar to Google's RECAPTCHA. We take the most reliable opinions and combine them to get a high-quality consensus. Here are the crowd opinions for the same image frame, with the expert consensus in yellow: + +Screenshot 2024-01-29 at 1 19 19 PM + +For this image, here are the crowd consensus and expert consensus both overlaid: + +Screenshot 2024-01-29 at 1 21 08 PM + +We will be continuing to crowdsource B-line segmentations throughout Project Week! Here is the visualization of the progress so far (mid-day 2024-01-29): + +Screenshot 2024-01-29 at 1 22 02 PM + + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWsi/README.md b/PW40_2024_GranCanaria/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWsi/README.md new file mode 100644 index 000000000..a2bf56f15 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWsi/README.md @@ -0,0 +1,70 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Tutorials on working with DICOM annotations in pathology WSI +category: DICOM +presenter_location: In-person + +key_investigators: + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Chris Bridge + affiliation: MGH + country: USA + +- name: David Clunie + affiliation: PixelMed + country: USA + +- name: André Homeyer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +--- + +# Project Description + + + +This project aims to create tutorials on how to work with DICOM annotations in pathology whole-slide images (WSIs). We will focus on region annotations stored as DICOM Structured Reports (SR) for a dataset of Rhabdomyosarcoma, which was recently ingested into the Imaging Data Commons (IDC). We want to create an easy-to-follow workflow that extracts images and annotations from the IDC and uses established (python) libraries for model training and evaluation. +If time permits we will continue working on nuclei annotations stored as DICOM Microscopy Bulk Simple Annotations (MBSA). This work was started in the last project week in Montreal, but hindered by technical issues (see [here](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWholeSlideImages/)). + +## Objective + + + +1. Objective A: Have a Colaboratory notebook ready that explains work with DICOM SR and uses the annotations for some exemplary use case. +2. Objective B (optional): Have a Colaboratory notebook ready that exemplifies work with DICOM MBSA. + +## Approach and Plan + + + +1. Set-up Google Healthcare DICOM store holding the available DICOM SR annotations. +2. Investigate which libraries are suitable to show-case an easy to follow workflow, e.g. slideflow or HistomicsTK. + +## Progress and Next Steps + +1. Set-up Google Healthcare DICOM store holding the available DICOM SR annotations. +2. Investigating libraries suitable to show-case and easy to follow workflow took way more time than expected as most publicly available state-of-the-art algorithms don't work with DICOM images, neither do the work with DICOM annotation objects, like SR or SEG. Instead they usually require some specifically formatted csv file, which is very counterproductive to our main goal: providing easy workflows from DICOM annotations in the IDC to analysis algorithms. However, I had several valuable discussions about other people's best practices and summarized available libraries capabilities trying to figure out the main barriers that prevent direct usage of DICOM objects. +3. [Documentation](https://docs.google.com/document/d/1xI9ZbZOk_nTz8YDP3xeozRspN1T-2zxQ79ecPufFfH0/edit?usp=sharing) and [Code](https://colab.research.google.com/drive/1aM3IgvPSk7OEzmg1YIoxGT-A4AaDxhqn?usp=drive_link) is still work-in-progress and will be extended after the Project Week. + +# Illustrations + +![Overview DICOM structured reports IOD](./overview_dicom_sr.png) \ +*Overview DICOM structured reports IOD. Taken from https://doi.org/10.1038/s41467-023-37224-2.* + +# Background and References + +- [DICOM Structured Reports](https://dicom.nema.org/dicom/2013/output/chtml/part20/sect_A.3.html) +- Documentation [slideflow](https://slideflow.dev/) +- Documentation [HistomicsTK](https://digitalslidearchive.github.io/HistomicsTK/api-docs.html) diff --git a/PW40_2024_GranCanaria/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWsi/overview_dicom_sr.png b/PW40_2024_GranCanaria/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWsi/overview_dicom_sr.png new file mode 100644 index 000000000..7eeff38d7 Binary files /dev/null and b/PW40_2024_GranCanaria/Projects/TutorialsOnWorkingWithDicomAnnotationsInPathologyWsi/overview_dicom_sr.png differ diff --git a/PW40_2024_GranCanaria/Projects/UpgradeOfAq3Dc/README.md b/PW40_2024_GranCanaria/Projects/UpgradeOfAq3Dc/README.md new file mode 100644 index 000000000..ff61b92cc --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/UpgradeOfAq3Dc/README.md @@ -0,0 +1,80 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Upgrade of AQ3DC +category: Quantification and Computation +presenter_location: Online + +key_investigators: + +- name: Leroux Gaelle + affiliation: University of Michigan + country: USA + +- name: Claret Jeanne + affiliation: University of Michigan + country: USA + +- name: Cevidanes Lucia + affiliation: University of Michigan + country: USA + +- name: Hutin Nathan + affiliation: CPE Lyon + country: France + +- name: Allemand David + affiliation: Kitware + country: USA + +- name: Prieto Juan Carlos + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + +Developing an enhancement for the AQ3DC (Automatic Quantification 3D Computation) Slicer module. The upgrade aims to incorporate a comprehensive statistics output feature to complement the existing clinician-focused output, enabling rapid computation and analysis of volumetric data. + +## Objective + + + +To enhance the AQ3DC Slicer module by integrating a statistics output feature for detailed data analysis, thereby improving its utility for research and clinical purposes. + +## Approach and Plan + + + +* Review the existing AQ3DC module to identify the current output capabilities and limitations. +* Design a statistics output framework that aligns with the needs of end-users, ensuring it is intuitive and user-friendly. +* Implement the statistics output feature, incorporating feedback from beta testing with a select group of clinicians and researchers. +* Conduct thorough testing to validate the accuracy and reliability of the statistical computations. +* Prepare documentation and tutorials for the new feature to facilitate easy adoption by users. + +## Progress and Next Steps + + + +* Completed the initial review of the AQ3DC module's output capabilities. +* Drafted a preliminary design for the statistics output feature and gathered feedback from potential users. + +# Illustrations + + + +![AQ3DC_first_screenshot](https://github.com/NA-MIC/ProjectWeek/assets/91245687/99e96c55-25fd-481c-9bbe-c6123795f884) +![AQ3DC_second_screenshot](https://github.com/NA-MIC/ProjectWeek/assets/91245687/20bd079a-7987-4324-9a49-cd38ac97c939) + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/UsingOnnxRuntimeToFacilitateTheUsageOfPytorch3DModelsInWindows/README.md b/PW40_2024_GranCanaria/Projects/UsingOnnxRuntimeToFacilitateTheUsageOfPytorch3DModelsInWindows/README.md new file mode 100644 index 000000000..5a0f12c57 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/UsingOnnxRuntimeToFacilitateTheUsageOfPytorch3DModelsInWindows/README.md @@ -0,0 +1,69 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Using ONNX runtime to facilitate the usage of PyTorch3D models in Windows +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Sal Choueib + affiliation: Ebatinca + country: Spain + +- name: Csaba Pinter + affiliation: Ebatinca + country: Spain + +- name: Juan Ruiz Alzola + affiliation: Ebatinca + country: Spain + +--- + +# Project Description + + + +The ONNX runtime is an intermediary machine learning framework that allows users to easily convert between different machine learning frameworks. The aim in this project is to leverage ONNX to utilize PyTorch3D models in Windows. + +## Objective + + + +1. Build ONNX on a windows machine +2. Use the ONNX runtime to load and run the following PyTorch model in windows: +3. Use the runtime to tune the performance of the model in windows +4. Deploy the model + +## Approach and Plan + + + +1. Investigate the ONNX runtime environment +2. Attempt to export the given PyTorch model in ONNX format +3. Attempt to import the model and run it in the ONNX runtime in windows +4. Investigate the performance tuning capabilities of ONNX +5. Outline a pipeline to streamline the conversion of PyTorch models to be used on windows systems + +## Progress and Next Steps + + + +*No response* + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/VirtualMetalPlateRegistrationForOrbitalFracture/README.md b/PW40_2024_GranCanaria/Projects/VirtualMetalPlateRegistrationForOrbitalFracture/README.md new file mode 100644 index 000000000..b239aa9e4 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/VirtualMetalPlateRegistrationForOrbitalFracture/README.md @@ -0,0 +1,89 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: virtual metal plate registration for orbital fracture +category: Other +presenter_location: Online + +key_investigators: + +- name: Chi Zhang, PhD + affiliation: Texas A&M University School of Dentistry + country: USA + +- name: Andrew Read-Fuller, MD, DDS + affiliation: Texas A&M University School of Dentistry + country: USA + +- name: Braedon Gunn + affiliation: Texas A&M University School of Dentistry + country: USA + +--- + +# Project Description + + + +Orbital fracture usually involve large areas in the floor and medial wall of the orbit. During surgery, surgeons usually remove cracked floor and medial wall of the orbit and use the plate to reconstruct the orbit. Thus, we aim to develop a module for register 3D model of preformed metal plates to orbital fracture sites for surgical planning and measuring the adaptability of plates to patients. **The registered plate should sit just above the unfractured bone of the orbit.** +![Screenshot 2024-01-07 at 7 15 58 PM](https://github.com/NA-MIC/ProjectWeek/assets/80793828/57ac9554-c731-4469-ae0d-96c396b80331) + +![Screenshot 2024-01-07 at 7 49 41 PM](https://github.com/NA-MIC/ProjectWeek/assets/80793828/1d87c9e6-fddc-40d8-96d7-eb1703b8ddf4) + +(MatrixORBITAL™ Preformed Orbital Plates) + +## Objective + + +1. Simulate the process of how surgeons would place the plate to fracture sites: + + - Plate can only rotate as it is fixed at the landmark "posterior stop, which marks the orbital process of the palatine bone. This area is usually preserved in orbital fracture cases and the most important landmark to screw the plate. + - ![image](https://github.com/NA-MIC/ProjectWeek/assets/80793828/ce362a75-ca3b-4953-94ff-20015473d77a) + - ![image](https://github.com/NA-MIC/ProjectWeek/assets/80793828/5c795578-a7ad-4dc7-9ff8-6d9774f17ed2) + + + - Allow the plate sit above the unfractured area of the orbit rather than being superimposed with the orbit. + + +2. Interactive tool for fine tuning the plate registration, such as adding an interactive handler for the 3D model. + +3. Automated segmentation of the fractured orbit as a future goal. + +## Approach and Plan + + + +1. Using Fiducial Registration Wizard to do a pre-registration of the plate to the fractured orbit, then further registration the posterior stop landmark on the plate to the actual posterior stop on the orbit. +2. Improved registration based on only allow the plate to rotate around the posterior stop: select more surgical landmarks at the peripheral areas of the orbit and the plate. Register the plate again with it pivoted on the posterior stop. +3. Further refined registration to allow the plate to sit above the unfractured bone of the orbit. +4. Add an interactive handler for adjusting the plate manually. + +## Progress and Next Steps + + + +1. I am able to make the plate landmark set to rotate around a particular landmark, the posterior stop, to do a rigid registration by changing the rotation center from the centroid to the posterior stop and then do a singular value decomposistion to generate a transformation matrix. +image + +2. The imporved interaction handle widget by Kyle Sunderland (https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/NewInteractionWidgetForTransformsMarkups/) has lots of potential for fine tuning the plate position after an initial plate registration. I can drag the center of rotation of the handle to the posterior stop or set up a rotation center in Python to rotate the plate around the posterior stop. I will discuss with the surgeon I am working with about some potential usage and perhaps new development for the plate registration. + +image + + +3. The challenging is to further rotate the plate until it sits above the unfractures bone of the orbit. + +# Illustrations + + + +*No response* + +# Background and References + + + +*No response* diff --git a/PW40_2024_GranCanaria/Projects/VisualizationAndReviewOfSegmentations/README.md b/PW40_2024_GranCanaria/Projects/VisualizationAndReviewOfSegmentations/README.md new file mode 100644 index 000000000..2e36e4672 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/VisualizationAndReviewOfSegmentations/README.md @@ -0,0 +1,101 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: Visualization and review of segmentations +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital + country: USA + +- name: Vamsi Krishna Thiriveedhi + affiliation: Brigham and Women's Hospital + country: USA + +- name: Cosmin Ciausu + affiliation: Brigham and Women's Hospital + country: USA + +- name: Ron Kikinis + affiliation: Brigham and Women's Hospital + country: USA + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + +We have multiple sets of segmentation results that we would like to review with radiologists and clinicians. The first set of results are from running TotalSegmentator on patients from the National Lung Screening Trial (NLST). The second set includes results from work that Deepa and Cosmin are performing with training a model for abdominal MR/CT segmentation using synthesized data. + +We would like to review this work, and showcase some results during project week. + +## Objective + + + +1. Review results from TotalSegmentator on NLST patients. Use the Netter Atlas to learn about anatomy, and correlate this with the data that TotalSegmentator used for training, to further understand our results. +2. Review results from the abdominal segmentations on both MR and CT patients. + +## Approach and Plan + + + +1. We will run TotalSegmentator on a sample of 1000 patients from NLST, convert to DICOM representation, and create OHIF links. +2. We will create DICOM SEG representations of the abdominal segmentations on IDC data, and create OHIF links. + +## Progress and Next Steps + + + +General points: +1. How do we evaluate segmentations without ground truth? +2. What is also the best way to interpret our NLST segmentation results, using information we know about the data the pretrained model used? I think we should take a closer look at the training data to understand our results. For example clinical information, disease differences, etc. +3. How do we do outlier detection on large, heterogenous datasets? +4. What other radiomics features can we use besides volume? We are extracting the shape features, first order features, and general features from pyradiomics. +5. How do we curate patients/segments to be used for further analysis? For instance, not including patients that have incomplete segmentations -- without having ground truth. +6. How do we make sure that we are correctly identifying cases where the laterality is incorrect? +7. Can we use information about the topology/atlas-based info to determine if segmentations are correct? Like we know left lower lobe is more inferior to left upper lobe. + +Specific points: +1. What analysis can we do for the lung regions? What features besides the volume can we interpret? Can we take advantage of the NLST clinical tables? (smoking vs non smoking, etc). +2. For the vertebrae, are there heuristics we can do? +3. Are there heuristics that we develop for NLST that will work for SynthSeg evaluation? + +*** Work accomplished this weeek *** + +We had multiple sessions and discussions with Ron, where we: +- learned about anatomy +- tried to figure out what we could focus on for the NLST analysis and interpretation, and +- brainstormed how to develop better pipelines to view our data and segmentations + +What we decided: +1. Focus on the liver and see if we can make correlations between liver health and lung cancer. For instance, fatty liver and cirrhosis and correlation with featuers we extract such as volume. +2. Brainstorm and develop better ways to quickly visualize our segmentations -- using mrb file creation, CaseIterator, etc. + +# Illustrations + +Example of TotalSegmentator analysis on an NLST patient in OHIF: +![](https://github.com/NA-MIC/ProjectWeek/assets/59979551/604c7923-6f42-4865-8fe5-b18cd59231f6) + +Example of liver analysis of NLST patient in Slicer: +![2024-02-01-Scene](https://github.com/NA-MIC/ProjectWeek/assets/59979551/89b6ce27-f3bc-4016-ace3-57bce4d2959a) + + +# Background and References + + + +[TotalSegmentator ](https://github.com/wasserth/TotalSegmentator) +[SynthSeg](https://github.com/BBillot/SynthSeg) diff --git a/PW40_2024_GranCanaria/Projects/Volumeaxi/README.md b/PW40_2024_GranCanaria/Projects/Volumeaxi/README.md new file mode 100644 index 000000000..1972885fa --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/Volumeaxi/README.md @@ -0,0 +1,93 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: VolumeAXI +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: + +- name: Jeanne CLARET + affiliation: University of Michigan + country: USA + +- name: Gaëlle Leroux + affiliation: University of Michigan + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: Claudia Mattos + affiliation: University of Michigan + country: USA + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + + + +--- + +# Project Description + + + +Volume Analysis, eXplainability and Interpretability, Volume-AXI, is an explainability approach for classification of bone and teeth structural defects in CBCT scans gray-level images. We propose to develop interpretable AI algorithms to visualize diagnostic features in dental and craniofacial conditions. This work is built on neural network models in Python, specifically using the MONAI framework, + +The first clinical application of Volume-AXI is related to dentistry, aiming to identify the position of tooth impaction and damage to adjacente structures. + +## Objective + + + +1. Create AI algorithms capable of visualizing diagnostic features in dental and craniofacial conditions using CBCT (Cone Beam Computed Tomography) scan gray-level images. +2. Integrate the developed AI algorithms with clinical workflows. +3. Enhancing Explainability and Interpretability in Medical Imaging + +## Approach and Plan + + + +1. Data Preparation and Pre-processing + +2. Model Development and Training: Explore and select appropriate neural network architectures (e.g., CNNs, U-Nets) for image classification and feature visualization. + +3. Explainability and Visualization Techniques: Implement methods to make AI decisions transparent and understandable such as Grad-CAM. + +4. Validation and Testing + +5. Documentation and Training: Create comprehensive documentation and user guides explaining the functionality and benefits of the AI tools. + + +## Progress and Next Steps + + + +1. Done different preprocessing steps on the CBCT scans. +2. Tried to train with EfficientNetBN. + +Next step: + +1. Think about a new implementation of training. +2. Try to reduce the image to regions of interest. +3. Use of transformations in the training loop to increase the dataset. + + +# Illustrations + + +![individualImage](https://github.com/NA-MIC/ProjectWeek/assets/91120559/1852a8b3-4162-4c3f-b0d7-c00a612e80fc) + + +# Background and References + + + +[VolumeAXI](https://github.com/Jeanneclre/VolumeAXI) diff --git a/PW40_2024_GranCanaria/Projects/WsiDicomImprovementFromViewerToAnalysis/README.md b/PW40_2024_GranCanaria/Projects/WsiDicomImprovementFromViewerToAnalysis/README.md new file mode 100644 index 000000000..8988a6816 --- /dev/null +++ b/PW40_2024_GranCanaria/Projects/WsiDicomImprovementFromViewerToAnalysis/README.md @@ -0,0 +1,144 @@ +--- +layout: pw40-project + +permalink: /:path/ + +project_title: WSI-DICOM Improvement - From Viewer to Analysis +category: DICOM +presenter_location: In-person + +key_investigators: + +- name: Fabian Hörst + affiliation: Institute for Artificial Intelligence in Medicine (IKIM) + country: Germany + +- name: Lukas Heine + affiliation: Institute for Artificial Intelligence in Medicine (IKIM) + country: Germany + +- name: Moon Kim + affiliation: Institute for Artificial Intelligence in Medicine (IKIM) + country: Germany + +- name: Fin H. Bahnsen + affiliation: Institute for Artificial Intelligence in Medicine (IKIM) + country: Germany + +- name: Jens Kleesiek + affiliation: Institute for Artificial Intelligence in Medicine (IKIM) + country: Germany + +--- + +# Project Description + + + +Despite various existing solutions for the conversion of WSI data into DICOM, there is a distinct lack of conversion tools (vendor agnostic) that result in DICOM files. Current solutions fall short in generating DICOM files compatible with OpenSlide (4.0.0) and OHIF/SLIM-Viewer, including a PACS, impeding seamless integration and compromising overall performance. + +Our objective is to 1. assess available conversion tools, 2. examine their seamless integration capabilities, and 3. enhance or develop our own solutions for WSI-DICOM conversion that integrates into PACS systems connected to web-based viewers (OHIF/SLIM), but also works locally with open-source Viewers such like QuPath (newest version 0.5.0). As automatic slide analysis with AI algorithms (mostly Python) is a cornerstone of computational pathology, OpenSlide integration is another necessary requirement. + +## Objective + + + +This project aims to test existing software solutions for vendor-agnostic WSI to DICOM conversion in digital pathology and deliver/develop an open-source, community-maintained software solution. The tool must adhere to established software design patterns, ensuring ease of contribution from the community. + +## Approach and Plan + + + +1. Provide a testing suite for testing resulting DICOM files, consisting of PACS/Viewer/Analysis-Components +2. Test existing WSI DICOM solutions and find shortcomings +3. Develop/Improve WSI DICOM conversions +4. Deliver key insights into shortcomings to push conversion forward + +## Progress and Next Steps + +1. Test Bench has been published under: [WSI-DICOM-TESTBench](https://github.com/FabianHoerst/WSI-DICOM-TESTBench) + + Tools and Notebooks are going to be updated soon +3. Compared Tools: + + | OrthancWSIDicomizer | bfconvert | dicom_wsi | GCP WSI to DICOM | pixelmed | IMI Big Picture | + |-------------------------------------------------------------|-----------|-----------|------------------|----------|-----------------| + | Working, but excessive metadata generation. Multiple edge cases with established tools (inconsistent compatibility) | Slow for large WSI, precompressed option results in color splits (shifted color due to RGB - YBR issue) | Too slow |Not checked yet | Still working for svs and tiff, stable backup solution | Decent solution, but ICC color profile is not transfered to the respective DICOM tags. Should not be that hard to fix. | + +4. Viewer + + ***Link:*** + Find the code here: [Slim-Orthanc](https://github.com/diatools/slim-orthanc/) + + ***Tools Tested:*** + Several viewers were tested, including the [Slim-Viewer](https://github.com/ImagingDataCommons/slim) (both native and with [OHIF](https://ohif.org/) integration), [OpenLayers](https://openlayers.org/), and integration with [Orthanc](https://www.orthanc-server.com/) as a PACS system (refer to Image 1). + + ***Performance Issues:*** + During testing, performance issues were observed at high zoom levels, with delays attributed to extended wait times for server responses. The problem was identified in the WebDICOM adapter (refer to Image 2). + + ***Comparison with Another PACS System:*** + In comparison with another PACS system ([DCM4CHE](https://www.dcm4che.org/)), no such performance issues were encountered. + + ***DICOM Web Plugin and Delays:*** + The [DICOM Web plugin](https://www.orthanc-server.com/static.php?page=dicomweb) appears to introduce delays, possibly due to implementation issues, specifically with pathological microscopy images. This aspect will be further evaluated in the next project phase. + + ***Next Steps:*** + The [healthcare-dicom-dicomweb-adapter](https://github.com/GoogleCloudPlatform/healthcare-dicom-dicomweb-adapter) will be evaluated as an alternative to the native DICOM-web plugin for Orthanc in the upcoming phase of the project. + +# Illustrations + + + +![idea](https://github.com/NA-MIC/ProjectWeek/assets/67600643/1c0d0f88-f302-4cd7-9499-b77be854411f) + +![result_slim](https://github.com/NA-MIC/ProjectWeek/assets/67600643/0955c066-ee14-4b64-8204-f64a66fa2bbf) + + +# Background and References + + + +### Overview of Tools for WSI-DICOM Conversion: + +**thanks to @dclunie and @fedorov** + +1. bfconvert (BioFormats): + Converting a file to different format — Bio-Formats 7.1.0 documentation. + Link: [https://bioformats.readthedocs.io/en/v7.1.0/users/comlinetools/conversion.html](https://bioformats.readthedocs.io/en/v7.1.0/users/comlinetools/conversion.html) +2. dicom_wsi + Gu Q, Prodduturi N, Jiang J, Flotte TJ, Hart SN. Dicom_wsi: A Python Implementation for Converting Whole-Slide Images to Digital Imaging and Communications in Medicine Compliant Files. J Pathol Inform. 2021;12(1):21. doi:[10.4103/jpi.jpi_88_20](https://doi.org/10.4103/jpi.jpi_88_20) + Link: [https://github.com/Steven-N-Hart/dicom_wsi](https://github.com/Steven-N-Hart/dicom_wsi) +3. GoogleCloudPlatform. WSI to DICOM Converter. + Google Cloud Platform; 2022. + Link: [https://github.com/GoogleCloudPlatform/wsi-to-dicom-converter](https://github.com/GoogleCloudPlatform/wsi-to-dicom-converter) +4. wsidicomizer. Sectra AB + Sectra AB. wsidicomizer. imi-bigpicture; 2021. + Link: [https://github.com/imi-bigpicture/wsidicomizer](https://github.com/imi-bigpicture/wsidicomizer) +5. Jodogne S, Lenaerts É, Marquet L, Erpicum C, Greimers R, Gillet P, et al. Open Implementation of DICOM for Whole-Slide Microscopic Imaging: In: Proceedings of the 12th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications. Porto, Portugal: SCITEPRESS - Science and Technology Publications; 2017. p. 81–7. Available from: https://orbi.uliege.be/handle/2268/204498 doi:[10.5220/0006155100810087](https://doi.org/10.5220/0006155100810087) +6. Clunie D. com.pixelmed.convert.TIFFToDicom. + Link: [http://www.dclunie.com/pixelmed/software/javadoc/com/pixelmed/convert/TIFFToDicom.html](http://www.dclunie.com/pixelmed/software/javadoc/com/pixelmed/convert/TIFFToDicom.html) +7. Pocock J. wsic. 2023. + Link: [https://github.com/John-P/wsic](https://github.com/John-P/wsic) +8. Orthanc WSI "Dicomizer" + Link: [https://www.orthanc-server.com/static.php?page=wsi](https://www.orthanc-server.com/static.php?page=wsi) + Documentation: [https://orthanc.uclouvain.be/book/plugins/wsi.html](https://orthanc.uclouvain.be/book/plugins/wsi.html) + +### Background Information +DICOM-WSI: [https://dicom.nema.org/dicom/dicomwsi/](https://dicom.nema.org/dicom/dicomwsi/) + +### Test Data + +OpenSlide: [https://openslide.org/](https://openslide.org/) +Test data can be downloaded there for some vendors. + +Imaging Data Commons has >23TB of DICOM WSI (converted from original SVS): [https://portal.imaging.datacommons.cancer.gov/explore/filters/?Modality_op=OR&Modality=SM](https://portal.imaging.datacommons.cancer.gov/explore/filters/?Modality_op=OR&Modality=SM) + +NEMA ftp server including WG 26 Connectathon ECDP 2023 data from vendors (some have issues; older data is more variable in quality): ftp://medical.nema.org/MEDICAL/Dicom/DataSets/WG26/ + +### Other Resources +Test-Suite: TBD + +Link to Lean Study Host: [https://github.com/TIO-IKIM/LeanStudyHost](https://github.com/TIO-IKIM/LeanStudyHost) + +Validation tool (checks compliance with standard): [https://www.dclunie.com/dicom3tools/dciodvfy.html](https://www.dclunie.com/dicom3tools/dciodvfy.html) diff --git a/PW40_2024_GranCanaria/README.md b/PW40_2024_GranCanaria/README.md new file mode 100644 index 000000000..4a9ee309e --- /dev/null +++ b/PW40_2024_GranCanaria/README.md @@ -0,0 +1,321 @@ +--- +permalink: /:path/ +redirect_from: +- /PW40_2024_GranCanaria/README.html +- /PW40_2024_GranCanaria/Readme.html + +project_categories: +- Early Presenter +- DICOM +- VR/AR and Rendering +- IGT and Training +- Segmentation / Classification / Landmarking +- Quantification and Computation +- Cloud / Web +- Infrastructure +- Other +--- + + + +# Welcome to the web page for the 40th Project Week! + +[This event](https://projectweek.na-mic.org/PW40_2024_GranCanaria/README.html) took place January 29 - February 2, 2024 in Las Palmas, Gran Canaria, Spain. Project Week 40 was a hybrid event with a strong in-person component. If you have any questions, you can contact the [organizers](#organizers). + +## Preparation meetings + +We held weekly preparation meetings at 10am on Tuesdays on Zoom, starting November 21, 2023. The links were posted here the week before. + +## Venue + +| Hotel NH Imperial Playa | +| -----| +|| + +- **Recommended hotels (special rates) and maps** + - NH Imperial Playa [Map](https://cutt.ly/twjO0PO) + - Booking: [Discounted room block available](https://www.nh-hotels.com/es/event/namic-workshop) + - NH Las Palmas Playa Las Canteras [Map](https://cutt.ly/vwkkTDE) + - Booking: [Discounted room block available](https://www.nh-hotels.com/es/event/grupo-na-mic-workshop) + +- **Transportation** from the airport to the city (Las Palmas de Gran Canaria): + - Taxi (line at the airport) + - [Bus line 60](https://guaguasglobal.com/lineas-horarios/linea/?id=60) + - [Map: Airport - San Telmo bus station](https://www.google.com/maps/dir/Gran+Canaria+Airport,+GC-1,+s%2Fn,+35230+Las+Palmas+de+Gran+Canaria,+Las+Palmas/Estacion+De+Guaguas+SAN+TELMO,+35002+Las+Palmas+de+Gran+Canaria,+Las+Palmas/@28.0191886,-15.4859935,12z/data=!3m1!4b1!4m14!4m13!1m5!1m1!1s0xc40a266c3662d1d:0x824bcf7e159f85d4!2m2!1d-15.3874042!2d27.9289223!1m5!1m1!1s0xc40958500f0b3f5:0x3693fb0e3c418af2!2m2!1d-15.4158957!2d28.109201!3e3?entry=ttu) +- The city has good bus/taxi service and is also walkable. + +## Registration + +- All participants (both remote and in-person) have to register using the [this form](https://forms.gle/iToYPfiE1xa7pYnN9). +- Registration for **remote** participants is free. +- Registration fee for **in person** participants will be 450€ per person. **In-person registration is now closed.** + +**Important: The deadline for registration and payment of the fee is Jan 15th**. If you have problems with the payment or registration, contact [namic@ebatinca.com](mailto:namic@ebatinca.com). + +**Note:** EU regulations require 2FA for all payments. European banking standards and most credit cards support this by default. If your payment doesn't process due to 2FA issues, please reach out for a custom payment link at [namic@ebatinca.com](mailto:namic@ebatinca.com). + +## Discord + +The **Discord** application is used to communicate between team members and organize activities before and during Project Week. Please join the Project Week [Discord server](https://discord.gg/qq5pyACuhE) as soon as possible and explore its functionality before the workshop. For more information on the use of Discord before and during Project Week, please visit [this page](../common/Discord.md). + +## Agenda + +{% include calendar.md from="2024-01-29" to="2024-02-02" %} + +## Breakout sessions + +1. Monday: [DICOM](https://docs.google.com/document/d/1Rd9MxRHypCM-JEemvCd59NQyLvE_-tsbbxYXnBkYkxg/edit#heading=h.9f3umithmdm) +1. Tuesday: [3D Slicer](BreakoutSessions/Slicer/README.md) +1. Wednesday: [Imaging Data Commons](https://docs.google.com/document/d/1GfzaiUcqLSTmqlumyny1yjJjGEViADwzFllr-sKNgl8/edit?usp=sharing) +1. Thursday: [Future of Rendering in VTK, ITK and Slicer](BreakoutSessions/Rendering/README.md) + +## Projects + +To learn how to create or update project pages, please refer to the [contributing project pages](ContributingProjectPages.md) section. + +{% include projects.md %} + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. + +List of registered participants so far (names will be added here after processing registrations): + + + + +Updated on 2024-02-01. + +1. Steve Pieper, Isomics, Inc., USA, (In-person, Confirmed) +1. Theodore Aptekarev, Slicer Community, Montenegro, (Online) +1. Csaba Pinter, EBATINCA, Spain, (In-person, Confirmed) +1. Attila Nagy, University of Szeged, Dept. of Medical Physics and Informatics, Hungary, (In-person) +1. Rafael Palomar, Oslo University Hospital, Norway, (In-person, Confirmed) +1. Hathaichanok Parakarn, Khon Kaen University, Thailand, (Online) +1. Tatpong Katanyukul, Khon Kaen University, Thailand, (Online) +1. Andrey Fedorov, BWH, USA, (Remote, Confirmed) +1. Hans Knutsson, Linkoping University, Sweden, (In-person, Confirmed) +1. Juan Ruiz-Alzola, University of Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Lucas Sanchez Silva, USP, Brazil, (In-person, Confirmed) +1. Tina Kapur, Brigham and Women's Hospital, Harvard Medical Schools, USA, (In-person, Confirmed) +1. Simon Drouin, École de technologie supérieure, Canada, (Online) +1. Andras Lasso, PerkLab, Queen's University, Canada, (In-person, Confirmed) +1. Luiz Murta, University of São Paulo, Brazil, (In-person, Confirmed) +1. Felix Hofmann, Department of General, Visceral and Transplantation Surgery, LMU University Hospital, Munich, Germany, (Undecided) +1. Mike Jin, Brigham and Women's Hospital, Harvard Medical School; Centaur Labs, USA, (In-person, Confirmed) +1. Erke Can Tellal, apoQlar GmbH, Germany, (Online) +1. Paolo Zaffino, Magna Graecia University of Catanzaro, Italy, (In-person, Confirmed) +1. Michela Destito, Magna Graecia University of Catanzaro, Italy, (In-person, Confirmed) +1. Roberto Veraldi, Magna Graecia University of Catanzaro, Italy, (In-person, Confirmed) +1. Ron Kikinis, Brigham and Women's Hospital and Harvard Medical School, USA, (In-person, Confirmed) +1. Ahmedou Moulaye IDRISS, Faculty of Medicine / Nouakchott University, Mauritania, (In-person) +1. Domonkos Tatár, University of Szeged, Albert Szent-Györgyi Medical School, Hungary, (Online) +1. Felix von Haxthausen, Image Guided Therapy Research Group, Universidad Carlos III de Madrid, Spain, (In-person, Confirmed) +1. Michael Onken, Open Connections GmbH, Germany, (In-person, Confirmed) +1. Zora Kikinis, Brigham and Women's Hospital and Harvard Medical School, USA, (In-person, Confirmed) +1. Constantin CONSTANTINESCU, Universidad de Las Palmas de Gran Canaria, Romania, (In-person, Confirmed) +1. Eszter Asztalos-Zsembery, University of Szeged, Hungary, (In-person, Confirmed) +1. Joël Spaltenstein, Agora Care SA, Switzerland, (In-person, Confirmed) +1. Laura Gui Levy, Agora Care, Switzerland, (In-person, Confirmed) +1. José Carlos Mateo Pérez, Universidad de Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Pablo Sergio Castellano Rodríguez, University of Las Palmas de Gran Canaria, Spain, (In-person, Confirmed) +1. Davide Punzo, Freelancer, France, (In-person, Confirmed) +1. Attila Tanács, University of Szeged, Hungary, (In-person, Confirmed) +1. Brianna Burton, 3D Side SA, Belgium, (In-person, Confirmed) +1. Maria Monzon, ETH Zürich, Switzerland, (In-person, Confirmed) +1. Buddhathida Wangsrimongkol, Khon Kaen University, Thailand, (Online) +1. Francisco Carlos Felipe Rodríguez , Universidad de Las Palmas de Gran Canaria, Spain, (In-person) +1. Philipp Schader, German Cancer Research Center, Germany, (In-person, Confirmed) +1. Jeanne Claret, University of Michigan , USA, (Online) +1. Vitaliy Petrov, Lviv Medical University, Ukraine, (Online) +1. Kyle Sunderland, Queen's University, Canada, (In-person, Confirmed) +1. Hanno Gao, DKFZ, Germany, (Online) +1. Javier Pascau, Universidad Carlos III de Madrid, Spain, (In-person, Confirmed) +1. Alicia Pose Díez de la Lastra, Universidad Carlos III de Madrid, Spain, (In-person, Confirmed) +1. Monica Garcia Sevilla, Universidad Carlos III de Madrid, Spain, (In-person, Confirmed) +1. Dr. Daniel Palkovics, Semmelweis University, Hungary, (In-person, Confirmed) +1. Rafael Oddone Scatena, Universidade de São Paulo, Brazil, (Undecided) +1. Douglas Samuel Gonçalves, USP, Brazil, (Online) +1. Ida Granö, Aalto University, Finland, (In-person, Confirmed) +1. David Clunie, PixelMed (IDC), USA, (In-person, Confirmed) +1. Federico Gnesotto, ImFusion GmbH, Germany, (In-person, Confirmed) +1. Okeowo Adedoyin Esther , Lagos State University Teaching Hospital , Nigeria, (Online) +1. Niklas Wahl, German Cancer Research Center (DKFZ), Germany, (In-person, Confirmed) +1. Nora Penzel, Massachusetts General Hospital, Harvard Medical School, Boston, USA, (In-person, Confirmed) +1. Deepa Krishnaswamy, Brigham and Women's Hospital, USA, (In-person, Confirmed) +1. Martin Matilla, ImFusion GmbH, Germany, (In-person, Confirmed) +1. Mohamed Alalli Bilal, University Cheikh Anta diop of Dakar (Ecole superieure polytechnique), Mauritania, (Online) +1. Leonard Nürnberg, Harvard AIM / Maastricht University, Netherlands, (In-person, Confirmed) +1. Sam Horvath, Kitware, USA, (In-person, Confirmed) +1. Marie NDIAYE, Université Assane Seck de Ziguinchor, Senegal, (In-person) +1. Fabian Hörst, Institute for Artificial Intelligence in Medicine (IKIM), University Hospital Essen, Germany, (In-person, Confirmed) +1. Jens Kleesiek, Institute for Artificial Intelligence in Medicine (IKIM), University Hospital Essen, Germany, (In-person, Confirmed) +1. Joost van Griethuysen, The Netherlands Cancer Institute, Netherlands, (In-person, Confirmed) +1. Daniel Caballero, Minimally Invasive Surgery Center Jesús Usón, Cáceres, Spain, (Online) +1. Juan A. Sánchez-Margallo, Jesús Usón Minimally Invasive Surgery Centre, Spain, (Online) +1. Lucia Salazar Carrasco, Minimally Invasive Surgery Center Jesús Usón, Cáceres, Spain, (Online) +1. Marco Nolden, German Cancer Research Center (DKFZ), Heidelberg, Germany, (In-person, Confirmed) +1. Christopher Bridge, Massachusetts General Hospital, USA, (In-person, Confirmed) +1. Pablo Polosecki, IBM Research, USA, (In-person, Confirmed) +1. Carl-Fredrik Westin, Harvard Medical School, USA, (In-person, Confirmed) +1. Idafen Santana, EBATINCA, Spain, (In-person, Confirmed) +1. Rudolf Bumm, Kantonsspital Graubünden, Switzerland, (In-person, Confirmed) +1. WBUZAR MUBARAK OMER OSMQN, Al fashir University, Sudan, (Online) +1. Igor Octaviano, Radical Imaging / OHIF, Brazil, (Online) +1. Chi Zhang, Texas A&M University School of Dentistry, USA, (Online) +1. Moon Kim, Institute for Artificial Intelligence in Medicine (IKIM), University Hospital Essen, Germany, (In-person) +1. Ciro Benito Raggio , Karlsruhe Institute of Technology, Germany, (In-person) +1. Domenico Riggio, Karlsruhe Institute of Technology, Germany, (In-person) +1. Takayoshi Suzuki, Hokkaido University, Japan, (Online) +1. Dong Jin Sung, Mass General Brigham (MGB), USA, (Online) +1. Amaia Iribar Zabala, Fundación Vicomtech, Spain, (In-person) +1. Rafeal Benito Herce, Funcación Vicomtech, Spain, (In-person) +1. Frida Hauler, No institution, UK, (In-person) +1. Maximilian Fischer, German Cancer Research Center (DKFZ) Heidelberg, Germany, (Undecided) +1. Diego Larriera Kiriakidis, Técnicas Competitivas SA, Spain, (In-person) +1. sara Fernandez Vidal, ICM, France, (Online) +1. Adela Moravova, University of Innsbruck, Austria, (Online) +1. Alireza Sedghi, open health imaging foundation - OHIF, Canada, (Online) +1. Bahnsen, Fin Hendrik, Institute for AI in Medicine (IKIM), Essen, Germany, (In-person) +1. Odile Elias, Deutsches Krebsforschungszentrum Heidelberg, Germany, (In-person) +1. Andres Diaz-Pinto, NVIDIA, UK, (Online) +1. Gabor Fichtinger, Queen's University, Canada, (In-person) +1. Fatimetou Mohamed-Saleck, University of Nouakchott and University of Las Palmas Gran Canaria, Mauritania, (In-person) +1. Patrick Remerscheid, BWH, Switzerland, (Online) +1. Lukas Heine, Institute for AI in medicine, University Medicine Essen, Germany, (In-person) +1. Vamsi Thiriveedhi, Brigham and Women's Hospital , USA, (In-person) +1. Rafael Nebot Medina, Instituto Tecnológico de Canarias, SA, Spain, (Online) +1. Pablo Cabrales Miró-Granada, Grupo de Física Nuclear, Dpto EMFTEL & IPARCOS, Facultad de Ciencias Físicas, Universidad Complutense de Madrid, Spain, (In-person) +1. Mamadou Samba CAMARA, Cheikh Anta Diop University of Dakar, Senegal, (Online) +1. Mohamed Abdellahi Sidi Mohamed Blal, Université de Nouakchott , Mauritania, (Online) +1. João Pedro Alves Januário, University os São Paulo, Brazil, (Online) +1. Professor Klaus Maier-Hein, German Cancer Research Center (DKFZ), Germany, (In-person) +1. Francesca Spadea, Karlsruhe Institute of Technology, Italy, (In-person) +1. Ashis Ravindran, DKFZ, Heidelberg, Germany, (Online) +1. Imre János Barabás, MD, Semmelweis University, Hungary, (In-person) +1. Mikael Brudfors , NVIDIA, UK, (In-person) +1. Badiaa AIT AHMED, Instituto de Astrofísica de Canarias, Spain, (Online) +1. Maximilian Fischer, German Cancer Research Center (DKFZ) Heidelberg, Germany, (In-person) +1. José Andrés Avellaneda González, Universidad Complutense de Madrid, Spain, (Online) +1. Simon Oxenford, Charité Berlin, Germany, (Online) +1. Nayra Pumar Carreras, EBATINCA, Spain, (In-person) +1. Moustapha Mohamed Saleck, Faculté des Sciences et Techniques (FST) , Université de Nouakchott, Mauritania, (Online) +1. Baksic Paul, INRIA, France, (In-person) +1. Valeria Gómez Valdes , Universidad Autónoma del Estado de México , Mexico, (Online) +1. Jean-Christophe Fillion-Robin, Kitware, USA, (In-person) +1. Leroux, University of Michigan, USA, (Online) +1. Balint Kovacs, Division of Medical Image Computing - German Cancer Research Center (DKFZ), Germany, (In-person) +1. Daniela Schacherer, Fraunhofer MEVIS, Germany, (In-person) +1. Nikolaos Makris, Massachusetts General Hospital, USA, (In-person) +1. Poliana Hartung Toppa, Massachusetts General Hospital, USA, (In-person) +1. Kayley Haggerty, Massachusetts General Hospital, USA, (In-person) +1. Gabriella d'Albenzio, The Intervention Centre, Norway, (In-person) +1. Mohamed bamba, Faculté de médecine de Nouakchott , Mauritania, (Online) +1. Petros Koutsouvelis, Maastricht University, Netherlands, (In-person) +1. Mohamed El Moctar, FMPOS, Mauritania, (Online) +1. Hossein Rahmani, Maastricht University, Netherlands, (In-person) +1. Vianney Muñoz-Jiménez, Universiad Autónoma del Estado de México, Mexico, (Online) +1. Victor Manuel Montaño Serrano, Universidad Autónoma del Estado de México, Mexico, (Online) +1. André Homeyer, Fraunhofer MEVIS, Germany, (Online) +1. Monserrat Ríos Hernández, Universidad Autónoma del Estado de México, Mexico, (Online) +1. Ole Vegard Solberg, SINTEF, Norway, (Online) +1. Adriana H. Vilchis González, Facultad de Ingeniería UAEMex, Mexico, (Online) +1. Juan Carlos Avila Vilchis , Facultad de Ingeniería UAEMex, Mexico, (Online) +1. Enrique Hernandez Laredo, Universidad Autónoma del Estado de México, Mexico, (In-person) +1. Ofer Pasternak, Harvard Medical School, USA, (In-person) +1. Tae Young Park, KIST, South Korea, (Online) +1. Aída García Limas, UAEMéx, Mexico, (Online) +1. Mikhail Milchenko, Washington University in Saint Louis, USA, (Online) +1. Fatimetou Hademine , Faculty of science and technology university of Nouakchott , Mauritania, (Online) +1. Aichetou N’DIAYE, Université de Nouakchott, Mauritania, (Online) +1. Mohamed Boullah Mohamed , University of Nouakchott , Mauritania, (Online) +1. Abigail Mercado Ponciano, Universidad Autónoma del Estado de México , Mexico, (Online) +1. Paula, Instituto Tecnológico de Canarias, Spain, (Online) +1. David Garcia Mato, Apolo AI, Spain, (Online) +1. Mikulas Bankovic, DKFZ, Germany, (Online) +1. Khaled Younis, MedAiConsult, USA, (Online) +1. Jess Tate, University of Utah, USA, (Online) +1. Juan Carlos Prieto, University of North Carolina, USA, (Online) +1. Stephen Schaumann, DKFZ Heidelberg, Germany, (Online) +1. Kevin Guan, New York University, USA, (Online) +1. Umang Pandey, Universidad Carlos III de Madrid, Spain, Spain, (Online) +1. Felicia Alfano, Universidad Politécnica de Madrid, Spain, (Online) +1. Mo Al Sa’d, Imperial College London, UK, (Online) +1. Mohamedou Ahmed Mahmoud , University of Nouakchott , Mauritania, (Online) +1. El Hacen Mohamed Soueilem , Nouakchott University , Mauritania, (Online) +1. Gordon Harris, MGH / OHIF, USA, (Online) +1. Joel Zagoya, UAEMEX , Mexico, (Online) +1. Birgitt Peeters, KU Leuven, Belgium, (Online) +1. Jose Tadeo Borjas Gómez, Instituto de Investigación del Hospital Universitario La Paz, Spain, (Online) +1. Andinet Enquobahrie, Kitware, USA, (Online) +1. Mauro I. Dominguez, Independent, Argentina, (Online) +1. Sankhesh Jhaveri, Kitware Inc., USA, (Online) +1. García Matias Xitlaly , Universidad del Valle de México , Mexico, (Online) +1. Mark Pearson, CNI Molecular Imaging, Australia, (Online) +1. Maria Prosszer, GE Healthcare Hungary, Hungary, (Online) +1. Andor Kenyeres, GE Healthcare, Hungary, (Online) +1. Ágnes Kocsis, GE Healthcare Magyarország Kft., Hungary, (Online) +1. Krisztian Koos, GE Healthcare, Hungary, (Online) +1. Eszter Kiss, GE Healthcare, Hungary, (Online) +1. Hans Meine, Fraunhofer MEVIS, Germany, (Online) +1. Sal Choueib, Ebatinca, Canada, (Online) +1. Leonardo Campos, none, Brazil, (Online) +1. Christian Herz, Children's Hospital of Philadelphia, USA, (Online) +1. Subhra Sundar Goswami, Universidad Politecnica de Madrid, Spain, (Online) +1. Matt McCormick, Kitware, USA, (Online) +1. Fatou Bintou NDIAYE, Cheikh Anta Diop University of Dakar, Senegal, (Online) +1. Mouhamed DIOP, Cheikh Anta Diop University of Dakar, Senegal, (Online) +1. Mario Francisco Ramírez Pizaña, UVM, Mexico, (Online) +1. Valeria Joselin Villanueva Reynoso , UVM , Mexico, (Online) +1. Jesús Ángel Estévez Vázquez , UVM, Mexico, (Online) +1. Eric Nicolas Cruz, Universidad del Valle de Mexico, Mexico, (Online) +1. Edna Fonseca Díaz , Universidad del Valle de México , Mexico, (Online) +1. Erick Emmanuel Callejo Rubio , Universiaas del Valle de Mexico , Mexico, (Undecided) +1. Edna Fonseca Díaz , Universidad del Valle de México , Mexico, (Online) +1. Edna Fonseca Díaz , Universidad del Valle de México , Mexico, (Undecided) +1. Christian Jahir Flores González, Universidad del Valle de México, Mexico, (Online) +1. María Fernanda Carmona Warnke, Universidad del Valle de México, Mexico, (Online) +1. Gerardo Damián Villanueva Suárez, Universidad del Valle de México, Mexico, (Online) +1. Maidelyn Isabella , UVM campus Toluca , Mexico, (Undecided) +1. ANA GABRIELA SÁNCHEZ LARA , UVM , Mexico, (Online) +1. Sandra Fabiola Olmos Domínguez , Universidad del Valle de México , Mexico, (Online) +1. Melissa Padilla Sánchez , Universidad del valle de México , Mexico, (Online) +1. Renata Valderrama D'oleire, Universidad del Valle de México, Mexico, (Online) +1. Renata Valderrama D'oleire, Universidad del Valle de México, Mexico, (Online) +1. Diana Karla Vilchis Jimenez , Universidad del Valle de México , Mexico, (Online) +1. Aranza Daniela Garcia Rojas, Universidad del Valle de México, Mexico, (Undecided) +1. Karol Rafael, Universidad del Valle de México , Mexico, (Online) +1. Karla Paulina Sánchez , Uvm Toluca México , Mexico, (Online) +1. Shreeraj Jadhav, Kitware Inc, USA, (Online) +1. Rubi Gonzalez , Universidad del Valle de México, Mexico, (Online) +1. Subhra Sundar Goswami, Universidad Politecnica de Madrid, Spain, (Online) +1. Martín García , UVM, Mexico, (Online) + + + +## Statistics + +Participation statistics + +## Organizers + +### Local organizing committee + +- Juan Ruiz-Alzola, PhD, Professor of Imaging Technologies, director of the Grupo de Tecnología Médica y Audiovisual (GTMA), [Instituto Universitario de Investigaciones Biomédicas y Sanitarias (IUIBS)](https://www.iuibs.ulpgc.es/), [Universidad de Las Palmas de Gran Canaria (ULPGC)](https://www.ulpgc.es/) +- Idafen Santana-Pérez, PhD, Information Systems and Technology Consulting Director, [EBATINCA, S.L.](https://ebatinca.com/equipo) +- Csaba Pintér, PhD, CTO, [EBATINCA, S.L.](https://ebatinca.com/equipo) +- Javier Pascau, PhD, Professor of Bioengineering, [Universidad Carlos III de Madrid](https://igt.uc3m.es/jpascau) + +### Global Project Week organizing committee + +- [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](http://www.spl.harvard.edu/pages/People/tkapur)), +- [@rkikinis](https://github.com/rkikinis) ([Ron Kikinis, MD](http://www.spl.harvard.edu/pages/People/kikinis)), +- [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://drouin-simon.github.io/ETS-web//)) +- [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)) +- [@piiq](https://github.com/piiq) ([Theodore Aptekarev](https://discourse.slicer.org/u/pll_llq)) +- [@sjh26](https://github.com/sjh26) ([Sam Horvath, PhD](https://www.kitware.com/samantha-horvath/)) + +## History + +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). diff --git a/PW40_2024_GranCanaria/statistics.svg b/PW40_2024_GranCanaria/statistics.svg new file mode 100644 index 000000000..3c44e968c --- /dev/null +++ b/PW40_2024_GranCanaria/statistics.svg @@ -0,0 +1,16641 @@ + + + + + + + + 2024-02-01T19:54:08.440955 + image/svg+xml + + + Matplotlib v3.8.2, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/PW41_2024_MIT/ContributingProjectPages.md b/PW41_2024_MIT/ContributingProjectPages.md new file mode 100644 index 000000000..c57511d90 --- /dev/null +++ b/PW41_2024_MIT/ContributingProjectPages.md @@ -0,0 +1,85 @@ +--- +--- +{% comment %}Extract event name (e.g "PW39_2023_Montreal"). {% endcomment %} +{%- assign event_name = page.path | split: '/' | first -%} + +# Contributing Project Pages + +## Creating new project pages + +With the [Project Week GitHub Issue page](https://github.com/NA-MIC/ProjectWeek/issues/new/choose), you have three options to create your Project Page: + +1. [Create a Proposal](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=proposal%2Cevent%3A{{ event_name }}&projects=&template=proposal.yml&title=Proposal%3A+) issue: If you have an idea for a project page but are not quite ready to create it yet, you can create a “Proposal” issue. You will still need to create a project page later. + +2. [Create a Project](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) issue: If you are ready to create your page, you can simply create a “Project” issue. This issue will allow you to fill out a convenient form to provide the necessary details. The Project Week website team will then review the issue and trigger the page creation pull request. + +3. [Create the project page yourself using the template](Projects/README.md): If you prefer to create the Project Page yourself, you can still do so by using the provided template and submitting a pull request. + +## Project Creation Tips + +- Get your project pages created early! The day before is best to make sure everything you need for you presentation is available. The ProjectWeek site will be closed to edits for the ***10 minutes before*** both the opening and closing presentation session to ensure site generation. After this 10 minute period edits will be re-enabled. + +- If you are [creating the project page yourself using the template](Projects/README.md), **don't reuse a project page template from a previous year.** We have made significant updates to the template to support auto-generation of project pages, so previous years' templates will not function properly. + + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** + - Make sure to fill out / update all of the information at the top of the README file (title, category, location, etc) + +- Remember to fill out the title for your project when using the [project creation issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) + +- Check the formatting on the Key Investigators list when creating a [project issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) (this is critical for page generation): + + `- Firstname Lastname (Affiliation, Country)` + +## Updating existing project pages + +Here are the steps using the GitHub web interface: + +1. Navigate to your project's `README.md` on the GitHub website. For instance, if you want to update a project called **YourProjectName**, visit the URL like the following: + + ``` + https://github.com/NA-MIC/ProjectWeek/blob/master/{{ event_name }}/Projects/YourProjectName/README.md + ``` + +2. Click the edit button, as shown in this screenshot: ![Screenshot 2023-06-12 10 43 35](https://github.com/NA-MIC/ProjectWeek/assets/25040869/ab01a7bf-c1e4-4c23-9aca-e2c6421ca530) + +3. You can now edit the page, add images by dragging and dropping, and more. + +4. Once done, click "Commit Changes", and follow the instructions to create a fork and a pull request to add your changes to the webpage. See this screenshot for reference: ![Screenshot 2023-06-12 10 50 50](https://github.com/NA-MIC/ProjectWeek/assets/25040869/180e81bb-d4f9-4f65-8569-a93192b2828e) + +## Videos in project pages + +Here are some steps to make sure all of your awesome videos render correctly: + +1. Videos added by drag and drop will render correctly when viewed through GitHub, but need some extra tweaks to work in the final generated website. + + + In your `README.md`, if you have a video link that looks like this: + + ``` + https://github.com/NA-MIC/ProjectWeek/assets/66890913/8f257f29-fa9c-4319-8c49-4138003eba27 + ``` + + Update it to: + + ```html + + ``` + +2. Links to externally hosted videos (such as YouTube) will need an iframe. + + Replace: + + ``` + https://youtu.be/ZWxE5QcGvE8 + ``` + + with + + ````html + + ```` diff --git a/PW41_2024_MIT/Projects/3D Slicer Internationalization/README.md b/PW41_2024_MIT/Projects/3D Slicer Internationalization/README.md new file mode 100644 index 000000000..7b58ebda9 --- /dev/null +++ b/PW41_2024_MIT/Projects/3D Slicer Internationalization/README.md @@ -0,0 +1,77 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 3D Slicer Internationalization +category: Infrastructure +presenter_location: In-person and Online + +key_investigators: +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital, Harvard Medical School + +- name: Mamadou Samba Camara + affiliation: Cheikh Anta Diop University + country: Senegal + +- name: Mouhamed DIOP + affiliation: Cheikh Anta Diop University + country: Senegal + +- name: Fatou Bintou Ndiaye + affiliation: Cheikh Anta Diop University + country: Senegal + +- name: Mohamed Alalli Bilal + affiliation: Nouakchott Al-Asriya University + country: Mauritania + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Steve Pieper + affiliation: Isomics + country: USA + +--- + +# Project Description + + +The project aims to develop a novel software infrastructure to enable the localization of 3D Slicer into multiple languages. + +## Objective + + + +1. Objective A. Validation of manual translations +1. Objective B. Outreach to external Weblate contributors +1. Objective C. Identification of international Slicer community members interested in new Slicer activities in their languages +1. Objective D. Translation to French of SlicerIGT tutorials + +## Approach and Plan + + + +### Slicer Internationalization sessions with members of the Slicer community +* Tuesday, June 25, 1:30-2:30 pm Slicer Internationalization Session +* Wednesday, June 26, 1:30-2:30 pm Slicer for Latin America Session +* Thursday, June 27, 1:30-2:30 pm Joint Slicer Internationalization Session and Slicer for Latin America Session + +International PW41 participants, please complete the [3D Slicer Internationalization form](https://forms.gle/h7b92rDzbUzFqL7d9) to participate in the sessions. + + + +## Progress and Next Steps + + + SlicerInternationalizationPW41 + +1. 32 participants from five countries participated in the PW41 Slicer internationalization sessions. +SlicerInternationalization_PW41_invitedTalk + +2. New Collaborations with Ecole Militaire de Santé and Université Gaston Berger, Senegal +3. Future plans for a 3D Slicer Localization extension diff --git a/PW41_2024_MIT/Projects/3DCuboidSegmentationAggregation/README.md b/PW41_2024_MIT/Projects/3DCuboidSegmentationAggregation/README.md new file mode 100644 index 000000000..35d5cfe09 --- /dev/null +++ b/PW41_2024_MIT/Projects/3DCuboidSegmentationAggregation/README.md @@ -0,0 +1,88 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 3D Cuboid Segmentation Aggregation +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Mike Jin + affiliation: Brigham and Women's Hospital; Centaur Labs + country: USA + +- name: Tamas Ungi + affiliation: Queen's University + country: Canada + +- name: Amene Asgari + affiliation: Brigham and Women's Hospital + country: USA + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + + +Combining multiple segmentations from skilled annotators can improve accuracy when creating ground truth image segmentations. +The Centaur annotation platform has new support for collecting and displaying annotations on DICOM studies using OHIF viewer. +We are currently in the middle of a 1-month task of sourcing and creating segmentations for radiology findings from hundreds +of annotators on 30,000 brain and abdominal CTs. + + + +## Objective + + + + +We would like to extend an existing 2D clustering algorithm for combining multiple 2D segmentations to work +for 3D cuboid segmentations. + + + +## Approach and Plan + + + + +1. Create 3D representation objects for cuboid annotations submitted through OHIF viewer +2. Implement clustering algorithm using a 3D instead of 2D distance metric + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + + + +# Illustrations + + + + +https://www.dropbox.com/scl/fi/0clde5ufr468jccyijk0n/Screenshot-2024-06-24-at-12.35.17-PM.png?rlkey=hefwskhabqlhlkmd9v32c8ky8&dl=0 + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/3DSlicerForLatinAmerica/README.md b/PW41_2024_MIT/Projects/3DSlicerForLatinAmerica/README.md new file mode 100644 index 000000000..7d581ad51 --- /dev/null +++ b/PW41_2024_MIT/Projects/3DSlicerForLatinAmerica/README.md @@ -0,0 +1,132 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 3DSlicerForLatinAmerica +category: Infrastructure +presenter_location: Online + +key_investigators: +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital, Harvard Medical School + country: USA + +- name: Luiz Murta + affiliation: Universidade de São Paulo + country: Brazil + +- name: Douglas Samuel Gonçalves + affiliation: Universidade de São Paulo + country: Brazil + +- name: Lucas Sanchez Silva + affiliation: Universidade de São Paulo + country: Brazil + +- name: Paulo Eduardo de Barros Veiga + affiliation: Universidade de São Paulo + country: Brazil + +- name: Adriana Herlinda Vilchis González + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Enrique Hernández Laredo + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Victor Manuel Montaño Serrano + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Monserrat Ríos-Hernández + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Juan Carlos Avila Vilchis + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Vianney Muñoz Jiménez + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Mariana Alvarez-Carvajal + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Valeria Gómez Valdes + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +--- + +# Project Description + +The goal of this project is twofold: first, to leverage 3D Slicer’s internationalization infrastructure to localize the software into Spanish and Portuguese, and second, to develop a novel software infrastructure for localizing tutorials. + + +## Objective + + + +1. Internationalize the Tutorial Maker module +2. Translate the step-by-step Tutorial Maker guide to Portuguese and Spanish. +3. Upgrade the Slicer tutorials in Portuguese with the Portuguese version of Slicer. +4. Solve functional issues in the Tutorial Maker repository, such as changing the infrastructure to a more human-readable one. +5. Translate MonaiLabel extension to Portuguese and Spanish. + +## Approach and Plan + + + +1. Verify if the Qt widgets are all marked as translatable and enclose the strings with the tr class. Also, verify the necessary configuration to add these strings in Weblate. +2. The Slicer for Latin America translation team will work on translating the user manual and tutorials to guarantee that the Tutorial Maker module can be used by English, Portuguese, and Spanish users. +3. The Slicer for Latin America engineering team will work with the Slicer developer community to solve the issues opened in the repository. +4. The Slicer for Latin America translation team will work on translating the MonaiLabel extension to Spanish and Portuguese using Slicer Weblate + +## Progress and Next Steps + + + +1. An incomplete ts file is committed to the SlicerLanguageTranslations repository—[Link to the Pull Request](https://github.com/Slicer/SlicerLanguageTranslations/pull/543/commits). Still need to mark some errors as translatable. +2. Slicer for Latin America session + + SlicerLA_PW41 +3. Tutorial Maker Demo +Screenshot 2024-06-27 at 11 41 50 PM + +Progress in resolving functional issues in the Tutorial Maker repository, such as changing the infrastructure to a more human-readable one: During the sessions a Demo of the Tutorial Maker module was shown, in which a stable version of the module was shown, where problems were solved, such as larger quantities in the drawing directions, elimination of undesirable effects of flashing on the screen when generating images, and the incorporation of the function of generating the tutorial in PDF format, previously limited to HTML and Md. + +4. This week has started the translation of the step-by-step Tutorial Maker guide into Spanish for Latin America. The goal is that this new module allows the creation of Tutorials for every user of the software. The progress of this translation until now is 80%. + +5. Tutorial Maker creates three files with the same information (images, annotations, descriptions): markdown, html and pdf + + SlicerLA_PW41 + +PDF example: + +pdf + +6. Translation of the tutorials to Portuguese with the interface in Portuguese. +![image](https://github.com/NA-MIC/ProjectWeek/assets/28208639/1158351c-46b0-48a9-9728-755e47cab2ee) + +7. + +# Background and References + + +### GitHub Repository + +[TutorialMaker](https://github.com/SlicerLatinAmerica/TutorialMaker) diff --git a/PW41_2024_MIT/Projects/3DTeethLandmarkDetection/README.md b/PW41_2024_MIT/Projects/3DTeethLandmarkDetection/README.md new file mode 100644 index 000000000..4ee60ca7c --- /dev/null +++ b/PW41_2024_MIT/Projects/3DTeethLandmarkDetection/README.md @@ -0,0 +1,157 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 3D Teeth Landmark Detection +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +- name: Lucie Dole + affiliation: University of North Carolina + country: USA + +- name: Florian Davaux + affiliation: University of North Carolina + country: USA + +- name: Jeanne Claret + affiliation: University of Michigan + country: USA + +- name: Gaëlle Leroux + affiliation: University of Michigan + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +--- + +# Project Description + + + + +## Background: + +Accurate landmark identification is crucial in various dental procedures. Manual identification is time-consuming and prone to errors. An AI-driven model will automate this process, offering a reliable, scalable solution. + +## Goals: + +1. Develop an Accurate Model: Use advanced machine learning algorithms for high-precision landmark identification. +2. Training and Validation: Train using the 3DTeethLand-MICCAI2024 dataset and validate with diverse samples. +3. Integration: Seamlessly integrate with other dental software. +4. User Interface: Create an intuitive interface in 3D Slicer for this new extension + + +## Methodology: + +Data Collection: Use the 3DTeethLand-MICCAI2024 dataset, containing annotated 3D dental models. +Model Development: Employ deep learning techniques, such as convolutional neural networks (CNNs), to develop the model. + +## Validation and Testing: + +Perform extensive testing with validation datasets to ensure robustness and accuracy. +Software Integration: Collaborate with dental software developers for smooth integration. +User Training: Provide training and support for effective model utilization. + + + +## Objective + + + + +Develop and validate an advanced AI model to accurately identify anatomical landmarks in dental models, enhancing precision in dental treatments, diagnostics, and educational applications. + + + +## Approach and Plan + + + + +## Day 1 + +### Data Download & Preprocessing and Exploration + +Download the 3DTeethLand-MICCAI2024 dataset and explore the data structure. Visualize landmarks, surface models and setup the data for training. + +## Day 2: Model Development, Training, and Testing + +### ShapeAXI models for shape analysis + +Use point cloud (PC) and multi-view approaches for shape analysis. + +### Initial Model Training: +Begin training the model using the preprocessed dataset. + +## Day 3: Model Validation and Fine-Tuning + +### Model Validation and model refinement + +- Hyperparameter Tuning +- Architecture changes + +## Day 4: Integration, UI Development + +### UI Design and Implementation: + +Create a basic user interface in 3D Slicer. +Documentation and Presentation Preparation. + +## Key Deliverables by End of Week: + +- Preprocessed and augmented dataset. +- CNN model implemented and trained in PyTorch. +- Validated and tested model with performance metrics. +- APIs/plugins for integration with dental software. +- Functional UI in 3D Slicer. +- Project documentation and presentation materials. + + + +## Progress and Next Steps + + + + +1. Download the data. + + + + +# Illustrations + + + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/7086191/4732bb2b-b24f-4833-91c0-ca1b8f519b07) + + + + +# Background and References + + + + +Baquero, Baptiste, Maxime Gillot, Lucia Cevidanes, Najla Al Turkestani, Marcela Gurgel, Mathieu Leclercq, Jonas Bianchi et al. "Automatic Landmark Identification on IntraOralScans." In Workshop on Clinical Image-Based Procedures, pp. 32-42. Cham: Springer Nature Switzerland, 2022. + + +# Results of the approach + +![image](https://github.com/NA-MIC/ProjectWeek/assets/7086191/02a65a32-940c-4284-b3d9-cb68d5789aff) +![image](https://github.com/NA-MIC/ProjectWeek/assets/7086191/9bcde2d2-3209-4ba5-9d24-60bb97f91a27) + +image diff --git a/PW41_2024_MIT/Projects/AiBasedUltrasoundImagingSimulator/README.md b/PW41_2024_MIT/Projects/AiBasedUltrasoundImagingSimulator/README.md new file mode 100644 index 000000000..1b351ddc3 --- /dev/null +++ b/PW41_2024_MIT/Projects/AiBasedUltrasoundImagingSimulator/README.md @@ -0,0 +1,95 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: AI-based ultrasound imaging simulator +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Xihan Ma + affiliation: WPI + +- name: Junichi Tokuda + affiliation: BWH + +- name: Simon Leonard + affiliation: JHU + +- name: Laura Connolly + affiliation: Queen's + +- name: Haichong + affiliation: Kai) Zhang (WPI + +--- + +# Project Description + + + + +We will discuss strategies to integrate AI-based ultrasound imaging simulators in multiple platforms for medical robotics and IGT, including the Gazebo dynamic simulator and PLUS. + +Demo video is available at [GitHub](https://github.com/MXHsj/rus_sim_visuals/) + +![2024-03-18_rus_sim1 0_beta](https://github.com/MXHsj/ProjectWeek/assets/31639301/7be20c1a-608b-4207-8e63-44429c663118) + +## Objective + + + +1. **Model** Improve the model - make the simulated ultrasound image more realistic +2. **Architecture** Explore the fast way to package the model into applications. + + +## Approach and Plan + +1. Model improvement + - The current model doesn't take account of tissue attenuation properties. Explore the use of CT segmentation data (or total segmentator). + - To use the neuron network to accelerate the computation speed. +2. Architecture + - Create an independent library for CT-ultrasound conversion. This library takes a 2D resampled CT data that is aligned to the (virtual) ultrasound probe, and generate a corresponding simulated ultrasound image. + - Integration with existing platforms, including Gazebo, Slicer, PLUS (to be discussed with the community) + - + + + +## Progress and Next Steps + + + + +- Progress + 1. Built pipeline to generate ```sound speed map``` and ```density map``` from CT total segmentation. + + ![fig1](https://github.com/MXHsj/ProjectWeek/assets/31639301/378084c3-90fe-48ca-bdb5-bb0299c05800) + + 2. Built pipeline using k-wave to simulate B-mode ultrasound in 3-dimensional, i.e., the ultrasound beam thickness is taken into account. Tissue speed of sound and density are taken into acount in the simulation. + +- Next steps + 1. Debug simulation settings, including transducer properties, beam transmit/receive pattern, etc. + 2. Generate sufficient CT(segmented)-to-simulated ultrasound pairs for neural network training. The network will learn the mapping from CT to ultrasound and achieve real-time processing speed + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/AnIgtMastersCourseBasedOnSlicer/README.md b/PW41_2024_MIT/Projects/AnIgtMastersCourseBasedOnSlicer/README.md new file mode 100644 index 000000000..dd2738037 --- /dev/null +++ b/PW41_2024_MIT/Projects/AnIgtMastersCourseBasedOnSlicer/README.md @@ -0,0 +1,70 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: An IGT Masters course based on Slicer +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Simon Drouin + affiliation: ETS + country: Canada + +- name: Rafael Palomar + affiliation: Oslo University Hospital / NTNU + country: Norway + +--- + +# Project Description + +The goal of this project is to improve the lecture and demo material for the Master's level at ETS Montreal entitled "Surgical Technologies" (see course website [here](https://www.etsmtl.ca/etudes/cours/gts880a), in French). + +The objective of the "Surgical Technologies" course is to familiarize students with the technologies that enable the planning and execution of minimally invasive surgical operations. These technologies combine various medical imaging acquisition techniques, precise image processing, 3D tracking of surgical tools, and several mechanisms and algorithms that allow for the alignment and visualization of this information, enabling surgeons to perform the most precise and minimally invasive operations possible for their patients. + +By the end of the course, students expected to: +- Understand the operating principles and limitations of the various devices involved in surgical guidance systems; +- Understand the main algorithms used during the planning and guidance of minimally invasive operations and their limitations; +- Analyze the context and constraints of a new clinical application and propose a design based on the software and hardware components studied; +- Design and implement a simple prototype guidance system for a specific clinical application. + +The practical work in the course is all based on the use of 3D Slicer. + +## Objective + +1. Identify existing tutorials that can help students of the course learn about the Slicer features they can take advantage of in their term project +2. List existing surgical technologies that are not already covered in the class and determine whether a prototype can easily be built based on existing Slicer modules. +3. List publicly available datasets that can be used to build IGT prototypes for different types of procedures. + +## Approach and Plan + +1. Find people who have an interest in a similar course and discuss material they already have to train people, even in other context +2. List existing tutorial and new material on this page +3. Identify a format that would enable the sharing of course material under a creative commons license (pick a licence) +4. List existing sources of publicly available medical images and their surgical application + +## Progress and Next Steps + +1. Tamas: Shared very nice slides on coordinate transformations and how to go from one space to another that complement existing course material +2. Sara Vidal Fernandez: Provides DBS data for use in project files +3. Nazim: Indicated the Zenodo database as an interesting source of publicly available patient scans that can be used in student projects +4. Andrey Fedorov: Showed how to find relevant medical images from the IDC database + 5. Discussed the limitations of the web interface for searching + 6. Pointed to tutorials on how to use the Python interface for advanced search: [IDC getting started](https://learn.canceridc.dev/getting-started-with-idc) + +# Illustrations + + + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/AutomaticClassificationOfMrScanSequenceType/README.md b/PW41_2024_MIT/Projects/AutomaticClassificationOfMrScanSequenceType/README.md new file mode 100644 index 000000000..6a6e18081 --- /dev/null +++ b/PW41_2024_MIT/Projects/AutomaticClassificationOfMrScanSequenceType/README.md @@ -0,0 +1,114 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 'Automatic classification of MR scan sequence type ' +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: BWH + country: USA + +- name: Vamsi Krishna Thiriveedhi + affiliation: BWH + country: USA + +- name: Pedro Moreira + affiliation: BWH + country: USA + +- name: Cosmin Ciausu + affiliation: BWH + country: USA + +- name: Megha Kalia + affiliation: BWH + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +Data curation is a necessary step before using many AI or ML models, but it can be difficult and time-consuming to do manually. For instance in prostate cancer, most tools use multiple types of MR sequences as input to develop models and perform tasks such as segmentation. + +In this project, we will develop methods for automatic classification of MR sequences. We had some great discussions and headway [last project week](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/DicomSeriesClassificationAndVisualizationOfParameters/), and are continuing this work. + +We also made some progress since last project week and developed a few methods for classification of T2 axial, diffusion weighted (DWI), apparent diffusion coefficient (ADC) images, and dynamic contrast enhanced (DCE) images. We used combinations of image data and DICOM metadata as input, and developed a random forest classifier, and also two CNN-based classifiers -- see our [paper here](https://openreview.net/forum?id=1GEz81GU3g) and [code here](https://github.com/deepakri201/DICOMScanClassification). + +This project week, we'd like to talk to more people, address limitations of our work, and hopefully work on developing a more robust method for classification of scans. + + + +## Objective + + + + +1. We would like to discuss the limitations of our previous work, and brainstorm new ideas for automatic classification of the MR series type. +2. We would like to create an easy colab notebook for people to try out the methods +4. We would like to think about developing a more robust method + + + +## Approach and Plan + + + + +1. We will talk to people to discuss limitations of our method. For instance, what types of metadata should we use for the classification? Should we have a class for unknown scan type? Should we do a hierarchical classification method? How can we make the model agnostic to the area scanned? + + + + +## Progress and Next Steps + + + + +1. [Colab notebook](https://github.com/deepakri201/DICOMScanClassification_pw41/blob/main/DICOMScanClassification_user_demo.ipynb) - we download data from IDC, and run inference using the three pretrained models. +2. [Check out our HuggingFace space demo!](https://huggingface.co/spaces/deepakri201/DICOMScanClassificationDemo) - we download data from IDC, and run inference using the three pretrained models. We display the classification results and the image used for classification. Later, we want to allow the user to upload their own images. + + + + +# Illustrations + + + +HuggingFace space demo: + +Here the user can select a specific collection --> patient --> study --> series to perform the classification. Then you run inference using the three models we developed. +DICOMClassification_demo1 + +Then the results of the classification are displayed, along with the image chosen for the classification. The user can also download the output colab notebook. +DICOMClassification_demo2 + +Video of the HuggingFace space demo: + + + +_No response_ + +# Background and References + + + + +[Progress from previous project week](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/DicomSeriesClassificationAndVisualizationOfParameters/) + +[Current work](https://openreview.net/forum?id=1GEz81GU3g) + +[Current code](https://github.com/deepakri201/DICOMScanClassification) diff --git a/PW41_2024_MIT/Projects/BamfNnunetMriBreastModel/README.md b/PW41_2024_MIT/Projects/BamfNnunetMriBreastModel/README.md new file mode 100644 index 000000000..d70dc3eed --- /dev/null +++ b/PW41_2024_MIT/Projects/BamfNnunetMriBreastModel/README.md @@ -0,0 +1,91 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: BAMF nnUNet MRI Breast Model +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: + +- name: Rahul Soni + affiliation: BAMF Health + country: Singapore + +- name: Jithendra Kumar + affiliation: BAMF Health + country: Singapore + +- name: Jeff Van Oss + affiliation: BAMF Health + country: US + +- name: Gowtham Murugesan + affiliation: BAMF Health + country: US + +--- + +# Project Description + + + + +We trained an nnUNet model to segment Breast, Fibroglandular Tissue, and Structural Tumor from MRI scans. We prepared the training set from multiple source such as [Breast-MRI-NACT-Pilot](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=22513764), [Duke-Breast-Cancer-MRI](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226903), and [ISPY1-Tumor-SEG-Radiomics](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=101942541) + + + +## Objective + + + + +1. `Model Container`: We would like to bring our model resources into a deployable container that can be used by research community to use our models for Breast segmention of MRI Scans +2. `Ease of Use`: Anyone should be able to pull the container (without access issues), mount the input scans and get segmentations +3. `Flexibility`: Target users should be able to get outputs in different formats like `dicom`, `nifti`, `nrrd` etc +4. `Scalability`: Support for single point inference as well as batched inference + + + +## Approach and Plan + + + + +We already have trained model weights and a python module that provides an interface for segmentation using the aforesaid model. Next steps for us would be to: +- Publish models weights in Zenodo +- Wrap around segmentation module using Mhub core APIs +- Build and test the container +- Create documentation + + + +## Progress and Next Steps + + + + +1. Trained the Breast and tumor segmentation model +2. Wrote a segmentation / inference module using trained model weights +3. Added support for `nifti` and `nrrd` outputs + + + +# Illustrations + + + + +Coming soon + + + +# Background and References + + + + +Coming soon diff --git a/PW41_2024_MIT/Projects/BrainTumorSegmentationWithMissingData/README.md b/PW41_2024_MIT/Projects/BrainTumorSegmentationWithMissingData/README.md new file mode 100644 index 000000000..f7473a3b2 --- /dev/null +++ b/PW41_2024_MIT/Projects/BrainTumorSegmentationWithMissingData/README.md @@ -0,0 +1,102 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Brain Tumor segmentation with Missing Data +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Reuben Dorent + affiliation: BWH + country: USA + +- name: Tina Kapur + affiliation: BWH + country: USA + +- name: Sarah Frisken + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +This project aims to create a Slicer extension that can automatically segment brain tumors in brain multi-parametric MRI, even in the presence of missing data. + +This project will focus on two use cases where: +- all MR sequences (T1, contrast-enhanced T1, T2, FLAIR) are available +- only pre-contrast T1 and contrast-enhanced T1 + +The algorithm will not only segment the scans but also perform the required pre-processing steps (co-registration and skull-stripping). + + + + +## Objective + + + + +1. Develop a Slicer module that can automatically perform brain tumor segmentation +3. Create a module that has the flexibility to handle two potential sets of input data +4. Integrate pre-processing steps for end-to-end inference +5. Validate the module with a subset of BraTS and clinical data + + + + +## Approach and Plan + + + + +1. Train two combinations of nnUnet using the BraTS dataset. +2. Integrate the pre-trained nnUnet frameworks into Slicer using the TotalSegmentator Slicer plugin as a template +3. Leverage Slicer tools to perform the BraTS preprocessing steps +4. Collect clinical data for validation + + + + +## Progress and Next Steps + + + + +- nnUnet have been trained for two combinations of input (ceT1 + T1 and ceT1 + T1 + T2 + FLAIR). +- Preprocessing has been implemented using base Slicer extensions. +- The extension has been tested on Windows (GPU and CPU) and MacOS (CPU). +- Next steps: clean the GitHub [repository](https://github.com/ReubenDo/SlicerTumorSegmentator). + + + + + + +# Illustrations + + + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/17268715/24b9168d-832d-49e9-a6d9-fbe3d08a8870) + +image + + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/ComputerAssistedVesselSegmentationFor2DAngiography/README.md b/PW41_2024_MIT/Projects/ComputerAssistedVesselSegmentationFor2DAngiography/README.md new file mode 100644 index 000000000..d72fe8801 --- /dev/null +++ b/PW41_2024_MIT/Projects/ComputerAssistedVesselSegmentationFor2DAngiography/README.md @@ -0,0 +1,85 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Computer assisted vessel segmentation for 2D angiography +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Sarah Frisken + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + + +I have written a C++ library for computer-assisted vessel segmentation in DSA images. I have a stand-alone application that demos this system. The application is also in C++ and uses Qt and OpenGL. I hope to provide this to the Slicer community as a Slicer module. + + + +## Objective + + + + +1. Get feedback from people at project week about whether they would find this useful. +2. Learn how to build a loadable C++ Slicer module. +3. Find out if Slicer can support the necessary OpenGL functionality. If not investigate what would be needed to support it. +4. Reach goal: build a loadable module with the bare-bones functionality of the system. + + + + +## Approach and Plan + + + + +1. Demo the system to anyone who is interested. +2. Read the Slicer pages on building a loadable module. Seek help from experienced Slicer developers. +3. Talk to Slicer OpenGL gurus about OpenGL issues. +4. Stub out a loadable Slicer module and try to get it to run. Add OpenGL functionality for specialized rendering if possible. + + + + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/CtImageBasedPredictionModelInHeadAndNeckCancerContributionToMhub/README.md b/PW41_2024_MIT/Projects/CtImageBasedPredictionModelInHeadAndNeckCancerContributionToMhub/README.md new file mode 100644 index 000000000..ccaf4e77b --- /dev/null +++ b/PW41_2024_MIT/Projects/CtImageBasedPredictionModelInHeadAndNeckCancerContributionToMhub/README.md @@ -0,0 +1,90 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 'CT image based prediction model in head and neck cancer: contribution to MHub' +category: DICOM +presenter_location: Online + +key_investigators: + +- name: Kate Akhmad + affiliation: ??? + country: ??? + +--- + +# Project Description + + + + +Head and neck cancers account for [nearly 4% of all cancers in the United States](https://www.cancer.gov/types/head-and-neck/head-neck-fact-sheet#how-can-people-who-have-had-head-and-neck-cancers-reduce-their-risk-of-developing-a-second-primary-new-cancer). Although there have been improvements in treatment and understanding of the disease, survival hasn't significantly improved in the last decades for the head neck cancer population in general. The CNN uses tumor delineations from the pre-treatment CT heard and neck scans to predict distant metastasis, loco-regional failure, and overall survival. + +The model is peer-reviewed and was [open-sourced published](https://github.com/MaastrichtU-CDS/hn_cnn/tree/main?tab=readme-ov-file#description) . While the model is open-sourced, it still requires some additional settings for its implementation. To make it easily available, it's interesting to add this model to the standardized I\O framework as MHub platform. + +Model characteristics: +- Model input: DICOM files of CT head and neck +- Preprocessing steps: slice selection and cropping around the tumour region, transformation in png format. +- Model output: prediction of loco-regional failure, overall survival and distant metastasis. +- Metrics Table [3](https://www.nature.com/articles/s41598-023-45486-5#Tab3): +The performance of our network varied for different outcomes: +-- the 2-year distant metastasis prediction had the highest AUC, around 0.90, across the training, validation, and testing sets; +-- 4-year overall survival AUC 0.78; +-- 2-year loco-regional failure prediction AUC. + + + + +## Objective + + + + +Objective A. To make the model easy available through a standardized I/O framework in MHub. +Objective B. To estimate reproducibility and quality of the model on external datasets (if it applicable). +Objective C. To be acquainted with the contribution pipeline in MHub. + + + +## Approach and Plan + + + + +1. To go through the contribution pipeline of MHub platform +2. To make it available through a standardized I/O framework. +3. To test the excitability of the model in the framework + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- The model was developed by [CDS group at Maastricht University](https://github.com/MaastrichtU-CDS/hn_cnn/tree/main?tab=readme-ov-file#description ) +- [Citation](https://doi.org/10.1038/s41598-023-45486-5) +- License: MIT license diff --git a/PW41_2024_MIT/Projects/Dcm2Parquet/README.md b/PW41_2024_MIT/Projects/Dcm2Parquet/README.md new file mode 100644 index 000000000..4244b1f7d --- /dev/null +++ b/PW41_2024_MIT/Projects/Dcm2Parquet/README.md @@ -0,0 +1,66 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: dcm2parquet +category: DICOM +presenter_location: In-person + +key_investigators: + +- name: Vamsi Thiriveedhi + affiliation: BWH + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA +--- + +# Project Description + +As of now, we are not aware of a tool other than Google Cloud's HealthCare API that can extract DICOM Header for a dataset and enable querying the metadata via SQL. We aim to explore if it is possible to do this 'in-house' so that those researchers who can not upload their data to Google DICOM stores or do not have access to Google Cloud can benefit. To that end, we found duckdb, a fast in-process analytical database/client to be able to query highly complex nested data. Using duckdb, and pydicom, our goal is extract DICOM header in a way that is similar to Bigquery export feature in Google Cloud's HealthCare API. + +## Objective + +1. Convert DICOM header to parquet preserving the nesting +2. Figure out a way to dynamically update schema and data manipulations necessary +3. Make the tool available on Hugging Face by integrating with idc-index, to seamlessly experiment with existing data in IDC + + +## Approach and Plan + +1. Create a function to extract metadata at the series level first, assuming schema is consistent with in a series. +2. Identify which columns and fields in the nested hierarchy, have inconsistent schema in a dataset, and choose most exhaustive datatype. For example b/w a string and array of strings, string datatype will be updated to array. Fill the missing columns, fields with nulls + + +## Progress and Next Steps + +1. Able to extract metadata at series level with out any problems. The [app](https://huggingface.co/spaces/vkt1414/dcm2parquet) reflects the progress made up to this point +2. Next, inspired from how Bigquery displays the schema, we aim to replicate that. After, we will compare the common columns between Image series and determine if any data manipulation is necessary. + ![image](https://github.com/NA-MIC/ProjectWeek/assets/115020590/b18deb90-5934-436b-a04d-0a047b8e017c) +3. Able to replicate how Bigquery displays the schema locally now. +4. Found several blogs on how others were thinking about schema evolution. + - https://kontext.tech/article/381/schema-merging-evolution-with-parquet-in-spark-and-hive + - https://blog.devgenius.io/data-processing-with-spark-schema-evolution-4d6032e3737c + - https://spark.apache.org/docs/latest/sql-data-sources-parquet.html + - https://medium.com/@shenoy.shashwath/performance-optimization-in-apache-spark-with-parquet-file-format-994273742c4f + - https://github.com/mplacko/ParquetSchemaMerging + - https://medium.com/analytics-vidhya/building-a-notebook-based-etl-framework-with-spark-and-delta-lake-b0eee85a8527 + + +# Illustrations +We hosted the app on Hugging Face space at https://huggingface.co/spaces/vkt1414/dcm2parquet + +![image](https://github.com/NA-MIC/ProjectWeek/assets/115020590/30a0d0b3-13f7-4ad4-8ee1-0b8eb9a4c98a) + + +# Background and References + + +- [https://medium.com/expedia-group-tech/practical-schema-evolution-with-avro-c07af8ba1725](https://medium.com/expedia-group-tech/practical-schema-evolution-with-avro-c07af8ba1725) diff --git a/PW41_2024_MIT/Projects/EnablingPytorch3DOnWindowsAndOptimizingMinicondaForSlicerExtensions/README.md b/PW41_2024_MIT/Projects/EnablingPytorch3DOnWindowsAndOptimizingMinicondaForSlicerExtensions/README.md new file mode 100644 index 000000000..b01e97698 --- /dev/null +++ b/PW41_2024_MIT/Projects/EnablingPytorch3DOnWindowsAndOptimizingMinicondaForSlicerExtensions/README.md @@ -0,0 +1,131 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Enabling PyTorch3D on Windows and Optimizing Miniconda for Slicer Extensions +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Leroux Gaelle + affiliation: University of Michigan + country: USA + +- name: Claret Jeanne + affiliation: University of Michigan + country: USA + +- name: Cevidanes Lucia + affiliation: University of Michigan + country: USA + +- name: Hutin Nathan + affiliation: CPE Lyon + country: France + +- name: Allemand David + affiliation: Kitware + country: USA + +- name: Prieto Juan Carlos + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + + +This project focuses on enhancing compatibility and usability in two key areas. Firstly, we aim to enable the use of PyTorch3D on the Windows platform. By leveraging the Windows Subsystem for Linux (WSL2) and a virtual Miniconda environment, we intend to bypass the traditional limitations and provide Windows users with full access to PyTorch3D’s capabilities. Secondly, the project seeks to improve the integration of Miniconda with Slicer extensions. Our goal is to simplify the process of creating and managing virtual environments for Slicer extensions, thereby making the procedure more intuitive. This will not only ease the use of various analytical tools and libraries but also streamline the setup process within WSL, especially for tools incompatible with Windows. This approach aims to bridge the gap in functionality and user experience across different platforms. + + + +## Objective + + + + +Our project aims to achieve three primary objectives: + +Operationalizing PyTorch3D on Windows: The first goal is to make PyTorch3D, typically unsupported on the Windows platform, fully functional. We plan to employ the Windows Subsystem for Linux (WSL2) combined with a virtual Miniconda environment to overcome this limitation. This strategy is designed to provide Windows users with complete access to the extensive functionalities of PyTorch3D. + +Improving Miniconda Integration for Slicer Extensions: Our second objective is to enhance the use of Miniconda as a virtual environment manager specifically for Slicer extensions. We aim to streamline the process of creating and managing new virtual environments that are utilized by Slicer extensions, making the procedure more intuitive and user-friendly. This advancement will facilitate the use of specialized libraries required for a variety of analytical tools, which are currently not integrable directly into Slicer. Additionally, this approach will assist in the setup of Miniconda3 and the creation of new environments within WSL, particularly for tools that are not available on Windows. + +Updating Modules Previously Unavailable on Windows Due to PyTorch3D: The third goal is to leverage the advancements made in the second objective to update several modules that were previously inaccessible on Windows due to PyTorch3D's limitations. Specifically, we aim to enhance: +- SlicerDentalModelSeg: A module dedicated to the segmentation of teeth. +- SlicerAutomatedDentalTools: Including two key components: + - ALI_IOS: Automated Landmarks Identification for Intra Oral Scan. + - AREG_IOS: Automated Registration of Intra Oral Scan. + + + + +## Approach and Plan + + + + +- Investigate PyTorch3D and Windows compatibility, pinpointing causes of incompatibility. +- Catalog PyTorch3D dependencies for WSL2 and outline requisite system configurations. +- Operationalize PyTorch3D on Windows using WSL2 and Miniconda. +- Develop an Automated Installer for WSL2 Setup on Windows. +- Create a Module to Streamline Miniconda for Slicer Extensions +- Update Modules Previously Unavailable on Windows Due to PyTorch3D: + - SlicerDentalModelSeg: Adapt and validate the module for Windows + - SlicerAutomatedDentalTools: + - ALI_IOS: Ensure compatibility and validate automated landmark identification. + - AREG_IOS: Adapt and validate automated registration of intraoral scans. +- Conduct comprehensive testing and validation of all updated modules on Windows. + + + + +## Progress and Next Steps + + + + +Progress : +- Completed Initial Research on PyTorch3D and WSL2 Compatibility +- Successfully ran PyTorch3D on WSL2 +- Created an installer for WSL2 +- Developed SlicerConda, a module to manage Miniconda. +- Updated SlicerDentalModelSeg. +- Updated SlicerAutomatedDentalTools: ALI_IOS. + +Next Step: +- Update FlexReg. + + + +# Illustrations + + + +#### SlicerConda Icon +SlicerConda Icon + +#### SlicerConda User Interface +SlicerConda User Interface + +### AREG Icon +AREG Icon + +### ALI Icon +ALI Icon + + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/EnhancingTheSlicermorphHiResScreenCaptureModuleFor3DSlicer/README.md b/PW41_2024_MIT/Projects/EnhancingTheSlicermorphHiResScreenCaptureModuleFor3DSlicer/README.md new file mode 100644 index 000000000..2e3b1fbce --- /dev/null +++ b/PW41_2024_MIT/Projects/EnhancingTheSlicermorphHiResScreenCaptureModuleFor3DSlicer/README.md @@ -0,0 +1,97 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Enhancing the SlicerMorph Hi-Res Screen Capture Module for 3D Slicer +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Oshane Thomas + affiliation: Seattle Children's Research Institute + country: USA + +- name: Murat Maga + affiliation: Seattle Children's Research Institute + country: USA + +--- + +# Project Description + + + + +The HiRes Screen Capture module in the SlicerMorph extension for 3D Slicer is designed to capture high-resolution screenshots of 3D views. It allows users to specify filename, output folder, and resolution, capturing all visible objects in the selected render window. This functionality is crucial for producing detailed visual documentation of 3D renderings used in research and presentations. + +The goal of this project is to prepare the HiRes Screen Capture module for the next update of 3D Slicer by addressing current limitations and enhancing its functionality. Specifically, the project aims to: + +- Fix issues causing spontaneous crashes on macOS. +- Correct image resolution errors related to custom display scaling. +- Provide accurate estimates of the scaled output image based on user specifications. +- Ensure stable operation across all major operating systems (Windows, macOS, Linux). + +High-resolution screenshots are essential for researchers and professionals who need to present detailed and accurate visual data. Enhancing this module will improve the reliability and usability of 3D Slicer, making it a more robust tool for the scientific community. + + + +## Objective + + + + +1. To enhance the HiRes Screen Capture module for 3D Slicer by fixing macOS crash issues, correcting image resolution with custom display scaling, providing accurate scaled output estimates, and ensuring stable operation across Windows, macOS, and Linux. + + + +## Approach and Plan + + + + +1. Identify and Fix Crashes on macOS: + 1. Conduct thorough testing to reproduce and identify crash scenarios. + 1. Implement solutions to handle these issues and ensure stable performance on macOS. +2. Correct Image Resolution with Custom Display Scaling: + 1. Investigate how custom display scaling affects screenshot resolution. + 1. Adjust the module to correctly interpret and apply user-defined scaling settings. +3. Provide Accurate Scaled Output Estimates: + 1. Develop principled approach to accurately predict the output image size based on user specifications. + 1. Update the user interface to display these estimates before capturing the screenshot. +4. Ensure Cross-Platform Stability: + 1. Perform extensive testing on Windows, macOS, and Linux. + 1. Resolve any platform-specific issues to ensure consistent and reliable operation. + + + +## Progress and Next Steps + + + + +Thus far, the first version of the HiRes Screen Capture module has been integrated into the latest preview version of SlicerMorph. Issues with undisplayed markup and annotations have been resolved. The module is operational except for the aforementioned issues. Users can specify magnification and receive an estimated output resolution before exporting images, although this estimate may be inaccurate if custom display scaling is applied. + + + +# Illustrations + + + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18602669/1d4fc9b0-357e-4f42-a08b-2cbb1a7cb075) + + + + +# Background and References + + + + +The current version of HiRes Screen Capture can be found at: + +[https://github.com/SlicerMorph/SlicerMorph/tree/master/HiResScreenCapture](https://github.com/SlicerMorph/SlicerMorph/tree/master/HiResScreenCapture) diff --git a/PW41_2024_MIT/Projects/EpistimAs3DSlicerExtension/README.md b/PW41_2024_MIT/Projects/EpistimAs3DSlicerExtension/README.md new file mode 100644 index 000000000..1422679dd --- /dev/null +++ b/PW41_2024_MIT/Projects/EpistimAs3DSlicerExtension/README.md @@ -0,0 +1,180 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: epiSTIM as 3d slicer extension +category: --- +layout: pw41-project + +permalink: /:path/ + +project_title: epiSTIM as 3d slicer extension +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: + +- name: Carole-Anne COS + affiliation: ICM + country: France + +- name: Martin GREGORIO + affiliation: ICM + country: France + +- name: Sara FERNANDEZ VIDAL + affiliation: ICM + country: France + +--- + +# Project Description + + + + +We are proposing a new extension to 3D Slicer designed to help clinicians and researchers to manage neuroimaging data needed for SEEG procedures. This is a 3d slicer extension that is based on former code from the epiSTIM project by Sara Fernandez Vidal. + +**SEEG Introduction** : Intracerebral electrodes are used in the context of stereo-electroencephalography (SEEG) recordings in patients with pharmacoresistant epilepsy. SEEG is an invasive method used to determine which anatomical structures of the brain generate epileptic seizures. SEEG is usually used when non-invasive explorations are inconclusive. A precise localization and 3D visualization of brain anatomy is crucial to precisely plan intracerebral electrode trajectories and then localize the origine and spreading of the seizures. + + + + +## Objective + + + + +Bundle all our previous sparse code into a self-contained slicer extension that works with docker. + + + + +## Approach and Plan + + + +Our previous work focused on converting existing code (epiSTIM) into user friendly slicer modules that loosely work with each other. We used the software docker to bundle parts of the processing to improve portability and version handling. Docker images were used to run Brainvisa, Freesurfer and Fastsurfer, sMRIprep, and more. + +**Plan** +- Clean up the current code and create a general repo of all our individual modules. +- Research the process of bundling our modules into an extension. +- Build the extension. +- Determine a platform for the application to run on : Personal computers of clinicians or a server. + + + + +## Progress and Next Steps + + + + +Several key modules have already been written for the SEEG pipeline. Some are to be finished before the week so that we can concentrate on the extension. + + + + +# Illustrations + + + + +![SEEGetEpiSTIM EN](https://github.com/NA-MIC/ProjectWeek/assets/45314202/08031a72-d90d-45e1-a7a1-76a281b43e6f) + + + + +# Background and References + + + + +_No response_ + +presenter_location: Online + +key_investigators: + +- name: Carole-Anne COS + affiliation: ICM + country: France + +- name: Martin GREGORIO + affiliation: ICM + country: France + +- name: Sara FERNANDEZ VIDAL + affiliation: ICM + country: France + +--- + +# Project Description + + + + +We are proposing a new extension to 3D Slicer designed to help clinicians and researchers to manage neuroimaging data needed for SEEG procedures. This is a 3d slicer extension that is based on former code from the epiSTIM project by Sara Fernandez Vidal. + +**SEEG Introduction** : Intracerebral electrodes are used in the context of stereo-electroencephalography (SEEG) recordings in patients with pharmacoresistant epilepsy. SEEG is an invasive method used to determine which anatomical structures of the brain generate epileptic seizures. SEEG is usually used when non-invasive explorations are inconclusive. A precise localization and 3D visualization of brain anatomy is crucial to precisely plan intracerebral electrode trajectories and then localize the origine and spreading of the seizures. + + + + +## Objective + + + + +Bundle all our previous sparse code into a self-contained slicer extension that works with docker. + + + + +## Approach and Plan + + + +Our previous work focused on converting existing code (epiSTIM) into user friendly slicer modules that loosely work with each other. We used the software docker to bundle parts of the processing to improve portability and version handling. Docker images were used to run Brainvisa, Freesurfer and Fastsurfer, sMRIprep, and more. + +**Plan** +- Clean up the current code and create a general repo of all our individual modules. +- Research the process of bundling our modules into an extension. +- Build the extension. +- Determine a platform for the application to run on : Personal computers of clinicians or a server. + + + + +## Progress and Next Steps + + + + +Several key modules have already been written for the SEEG pipeline. Some are to be finished before the week so that we can concentrate on the extension. + + + + +# Illustrations + + + + +![SEEGetEpiSTIM EN](https://github.com/NA-MIC/ProjectWeek/assets/45314202/08031a72-d90d-45e1-a7a1-76a281b43e6f) + + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/EvaluatingUncertaintyVisualizationThroughAGameIn3DSlicer/README.md b/PW41_2024_MIT/Projects/EvaluatingUncertaintyVisualizationThroughAGameIn3DSlicer/README.md new file mode 100644 index 000000000..4dca2e25b --- /dev/null +++ b/PW41_2024_MIT/Projects/EvaluatingUncertaintyVisualizationThroughAGameIn3DSlicer/README.md @@ -0,0 +1,98 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Evaluating Uncertainty Visualization through a game in 3D slicer +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Mahsa Geshvadi + affiliation: ' Brigham and Women''s Hospital' + country: USA + +- name: Sarah Frisken + affiliation: ' Brigham and Women''s Hospital' + country: USA + +--- + +# Project Description + + + + +Uncertainty is present in all medical images and originates from different sources. Uncertainty is difficult to interpret due to its probabilistic nature, and communicating it is equally difficult. In this project, we developed an uncertainty visualization module on 3D Slicer, enabling users to explore different uncertainty visualization techniques. The key challenge now is evaluating these techniques for real-world applicability. To address this, we implemented a game for quantitative evaluation of uncertainty visualization. In the game, users perform tasks requiring decision-making under uncertainty, and we measure their performance with and without visualization using scores. Specifically, the game simulates decision-making during tumor resection surgery, where MRI images have uncertainty due to brain shifts. Users must decide whether to carve out the tumor at specific locations, reflecting the real surgical decision-making process. + + + +## Objective + + + + +1. Objective A: Uncertainty visualization helps make better decisions under uncertainty. +2. Objective B: Exploring and evaluating the Uncertainty Visualization module with a game helps improve the understanding of uncertainty. + + + +## Approach and Plan + + + + +1. Training level: helps participants become familiar with uncertainty in the game and explore different visualization techniques. Players can see the ground truth segmentation and their scores, allowing them to observe the impact of their decisions directly. + + + + + + + + +3. The challenge phase builds on skills learned during training, where players perform tasks without seeing the ground truth segmentation or their scores and cannot undo actions. It consists of two steps: first, making decisions without uncertainty visualizations on a different case, and second, repeating the task with uncertainty visualizations on the same case, without any prior feedback or guidance. + + + + + + + + +## Progress and Next Steps + + + + +1. Explore uncertainty visualization evaluation through a game in more scenarios. +2. Make the game more complex by adding new levels. + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- [https://github.com/mahsageshvadi/UncertaintyVisualization](https://github.com/mahsageshvadi/UncertaintyVisualization) diff --git a/PW41_2024_MIT/Projects/EvaluationOfAiMethodsForMriSegmentationOnIdcData/README.md b/PW41_2024_MIT/Projects/EvaluationOfAiMethodsForMriSegmentationOnIdcData/README.md new file mode 100644 index 000000000..9f753d12e --- /dev/null +++ b/PW41_2024_MIT/Projects/EvaluationOfAiMethodsForMriSegmentationOnIdcData/README.md @@ -0,0 +1,118 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Evaluation of AI methods for MRI segmentation on IDC data +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Cosmin Ciausu + affiliation: Brigham and Women's Hospital + country: US + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital + country: US + +- name: Megha Kalia + affiliation: Brigham and Women's Hospital + country: US + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: US + +--- + +# Project Description + + + + +We previously studied the application of a contrast-agnostic approach for MRI/CT abdomen organs segmentation, based on the generation of synthetic data. This synthetic data was then further used as a training set for a fully-supervised U-Net network. +Since this study was performed, other methods aiming to segment MR abdominal organs have been published. Our goal is to evaluate these new methods on IDC MR abdominal-focused data and see how it compares to our method. + + + + +## Objective + + + + +1. Evaluate performance of MR abdominal organs methods on IDC data. +2. Get feedback on our own method. + + + +## Approach and Plan + + + + +1. Select a subset of IDC MR abdominal-focused IDC data. +2. Create evaluation notebooks for newly published methods on this subset. +3. Compare to our method. + + + +## Progress and Next Steps + + + + +1. [GitHub repo](https://github.com/deepakri201/mr_seg) for colab notebooks for evaluation of MR segmentation methods +2. Look into methods like STAPLE for consensus of segmentations - WIP +3. Perform a comparison of the methods to ground truth - WIP + + + + +# Illustrations + + + + + +Comparison of MR segmentation methods on a subject from AMOS dataset: +- Top left = ground truth expert segmentations +- Top right = our approach +- Bottom left = TotalSegmentator +- Bottom middle = MRSegmentator +- Bottom right = our approach + + +Comparison of MR segmentation methods on a subject IDC TCGA-LIHC subject: +- 3D = our approach +- Left = our approach +- Middle = TotalSegmentator +- Right = MRSegmentator +tcga_lihc_3DSlicer + +Comparison of MR segmentations on a subject from TotalSegmentator: +(ground truth in bold) +- Top row = our approach +- Middle row = TotalSegmentator +- Bottom row = MRSegmentator +totalsegmentator_3DSlicer + +**Dice distributions between AI segmentations and expert annotations on AMOS22 MR training split.** +![image](https://github.com/NA-MIC/ProjectWeek/assets/72577931/9b2cc0dc-7a74-465c-b08e-e876e16db8e9) + +# Background and References + + + + +- Our method + - [Towards Automatic Abdominal MRI Organ Segmentation: Leveraging Synthesized Data Generated From CT Labels](https://arxiv.org/abs/2403.15609) +- New published methods + - [MRSegmentator: Robust Multi-Modality Segmentation of 40 Classes in MRI and CT Sequences](https://arxiv.org/pdf/2405.06463) + - [TotalSegmentator MRI: Sequence-Independent Segmentation of 59 Anatomical Structures in MR images ](https://arxiv.org/abs/2405.19492) + - [MRISegmentator-Abdomen: A Fully Automated Multi-Organ and Structure Segmentation Tool for T1-weighted Abdominal MRI](https://arxiv.org/abs/2405.05944) + - [TotalVibeSegmentator: Full Torso Segmentation for the NAKO and UK Biobank in Volumetric Interpolated Breath-hold Examination Body Images](https://arxiv.org/abs/2406.00125) diff --git a/PW41_2024_MIT/Projects/EvaluationOfSubcorticalSegmentationResultsOfUnestModelAndCreatingASlicerModule/README.md b/PW41_2024_MIT/Projects/EvaluationOfSubcorticalSegmentationResultsOfUnestModelAndCreatingASlicerModule/README.md new file mode 100644 index 000000000..77c347957 --- /dev/null +++ b/PW41_2024_MIT/Projects/EvaluationOfSubcorticalSegmentationResultsOfUnestModelAndCreatingASlicerModule/README.md @@ -0,0 +1,78 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Evaluation of subcortical segmentation results of Unest Model and creating a slicer + module +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: + +- name: Ghazal Danaee + affiliation: ÉTS + country: Canada + +- name: Sylvain Bouix + affiliation: ÉTS + country: Canada + +- name: Jarrett Rushmore + affiliation: Boston University + country: USA + +--- + +# Project Description + + + + +We will inspect the results of subcortical segmentations of Unest model visually and will try to improve these results. Also we will try to build a module in slicer . + + + +## Objective + + + + +_No response_ + + + +## Approach and Plan + + + + +_No response_ + + + +## Progress and Next Steps + +I debugged the initial error for the extension. Currently, I am working on the inference section of the unest model and the next step would be to implement the ensemble section of it. + + +_No response_ + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/FiniteElementModelPreprocessingModuleForComputingVertebralFailure/README.md b/PW41_2024_MIT/Projects/FiniteElementModelPreprocessingModuleForComputingVertebralFailure/README.md new file mode 100644 index 000000000..18e610549 --- /dev/null +++ b/PW41_2024_MIT/Projects/FiniteElementModelPreprocessingModuleForComputingVertebralFailure/README.md @@ -0,0 +1,140 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Finite element model preprocessing module for computing vertebral failure +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Ron Alkalay + affiliation: Beth Israel Deaconess Medical Center + country: US + +- name: Zahra Soltani + affiliation: Beth Israel Deaconess Medical Center + country: US + +- name: Steve Pieper + affiliation: Isomics + country: US + +- name: Csaba Pinter + affiliation: Ebatinka + country: Spain + +- name: Gnaneswar Chundi, + affiliation: Rutgers University + country: US + +--- + +# Project Description + + + + +Finite element simulations of the spine allow insight into the effect of bone metastatic lesions, affecting up to 70% of cancer patients at advanced stages of the disease, on the mechanism of pathologic vertebral fractures that cannot be measured in human subjects noninvasively. However, establishing personalized FE models from clinical imaging is complex and time-consuming, historically requiring manual vertebrae segmentation, creation and optimization of the model's mesh parameters, interrogation of the image data for the application of material models, and applying these material models at the element level. These operations often require expensive commercial applications with little control over the method used and, critically, the ability to extend their capabilities or incorporate new capabilities without significant financial expense. + +In collaboration with MIT, our group established a damage-based FE framework to investigate the effect of bone metastatic lesions on the failure and, more recently, the post-failure mechanical response of the human spines in cancer patients. Based on our ongoing collaboration with members of the 3D Slicer community, our group has developed DL models for segmenting human thoracic and lumbar vertebrae in cancer patients and developed scripts for meshing and applying bone modules based on the CT data. We propose to integrate our DL segmentation models, the Gmsh open-source meshing application, work on optimizing our material allocation algorithm, and create a parser for input file for the FE simulations (ABAQUS, MIT-Summit) within an extension framework to enable the complete pipeline (CT-data- FE input model). + +Having such an open-source model in 3d Slicer will significantly contribute to the scientific and clinical community for cancer patient research and to studying the effect of vertebral fractures on morbidity not only in cancer patients but also in the elderly populations and surgical outcomes. + + + +## Objective + + + + +1. Create an open-source Slicer extension to integrate DL spine segmentation, Gmsh, and our material allocation scripts, allowing the preparation of an input file for FE simulations. +2. Discuss the possible integration of tools for the DL segmentation of the intervertebral disc and the derivation of disc tissue material models from MR data (DTI) and, finally, assembly of vertebrae-disc models (boundary conditions from our muscle models and joint interactions). Simulations within 3Dslicer😊 + + + +## Approach and Plan + + + + +1. Discuss the current analysis and management scripts pipeline. Integration of the DL-based masks for generating data for the pipeline. Visualization and optimization options of mesh quality and applied bone modulus. +a. The key gap is how best to optimize the mesh-based material allocation, which requires the computation of bone properties at the location of each mesh element. +2. What issues must be solved for this integration within the extension mechanism? Build an integration plan emphasizing a framework for modularity and code expansion. +3. Discuss methods of results presentation. + + + +## Progress and Next Steps + + + + +We follow the following steps: +1. The element size on the boundary is set manually: in 3DSlicer we use "Surface Toolbox" under "Surface Models" to increase the size of boundary elements in the .vtk file (change the Number of points in Uniform remesh) and save the .vtk file. +2. We create and save a basic geo file to generate volume out of the .vtk file as below: +``` +Merge "Model.vtk"; +//+ +Surface Loop(1) = {1}; +//+ +Volume(1) = {1}; +``` +3. We generate optimized 3D mesh using Gmsh by running the following command +`gmsh $geo_file -3 -optimize -format msh2` +4. For calculating average BMD, we use the concept of shape functions to find which voxels belong to an element, and then we average among the values greater than 0. To do this, first, in 3D Slicer, we find the coordinates of voxels and the HU values. Then, for each element, a voxel will belong to the element if the summation of 4 shape functions at the voxel's location equals 1. For linear tetrahedrons, the shape function of each vortex is the volume of the tetrahedron made with three other vertices and the point. The below figure, for example, shows these volumes for an arbitrary point inside the element. + + ![Picture1](https://github.com/NA-MIC/ProjectWeek/assets/49168951/47d36e51-a130-4d2a-b25c-3283d44979da) + - We are eager to use any other approach for better numerical efficiency. + - In this approach, each voxel is assessed based on the coordinates of its center. Is there any possibility that one can calculate partial inclusions of voxels in the element? This approach will give us the capability of going to the smaller resolutions. + + + + + + +# Illustrations + + + + + +![Graphical abstract](https://github.com/NA-MIC/ProjectWeek/assets/49168951/e841d76c-012c-4c38-b3fa-cd3cbc4421c2) + +Figure 2: Graphical summary of the intended pipeline.. + + + +# Background and References +- [CT-based finite element simulating spatial bone damage accumulation predicts metastatic human vertebrae strength and stiffness](https://www.frontiersin.org/articles/10.3389/fbioe.2024.1424553/abstract) + +# Results + +**Extension Creation** +Screenshot 2024-06-28 at 9 39 14 AM + +**GMSH integration** +Screenshot 2024-06-28 at 10 11 55 AM + +**Visualization of Mesh in Slicer** +Screenshot 2024-06-28 at 10 12 45 AM + +**Future Steps** +- Add more advanced GMSH options for user + - Size fields (finer control over mesh size) + - Sampling Rate + - Rate of change + - etc. +- Add functionality to interrogate CT volume for HU data to assign modulus values to each element. +- Add QA functionality to evaluate and visualize mesh quality and fix bad elements. +- Add implementation into existing SegmentMesher + +Screenshot 2024-06-28 at 10 13 54 AM + + + + + diff --git a/PW41_2024_MIT/Projects/GridBasedSemiLandmarking/GridInitial.png b/PW41_2024_MIT/Projects/GridBasedSemiLandmarking/GridInitial.png new file mode 100644 index 000000000..bb488f8fd Binary files /dev/null and b/PW41_2024_MIT/Projects/GridBasedSemiLandmarking/GridInitial.png differ diff --git a/PW41_2024_MIT/Projects/GridBasedSemiLandmarking/README.md b/PW41_2024_MIT/Projects/GridBasedSemiLandmarking/README.md new file mode 100644 index 000000000..93a38bf98 --- /dev/null +++ b/PW41_2024_MIT/Projects/GridBasedSemiLandmarking/README.md @@ -0,0 +1,65 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Grid-based Semi-Landmarking via Surface Markups +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: +- name: Sara Rolfe + affiliation: Seattle Children's Research Institute + country: US + +- name: Murat Maga + affiliation: Seattle Children's Research Institute + country: US +--- + +# Project Description + + + +## Objective + +A grid-based semi-landmarking functionality comes up often as a request in context of Slicer/SlicerMorph users. Currently this only exists in proprietary software packages. We aim to provide the following functionality: +1. Objective A. Create a grid of equidistant landmark points can be projected onto a model surface +2. Objective B. As individual landmark points are adjusted manually, the grid is updated and resampled. +3. Objective C. A module will be created to support the user interactions + +## Approach and Plan + +The [Surface Landmark Extension](https://github.com/SlicerHeart/SlicerSurfaceMarkup/tree/master) developed by the Slicer Heart group provides the grid point structure that will be needed for this project. The remaining steps will be to: +1. Implement and test projection method to snap grid points to a model +2. Implement methods to update grid points when a single point is moved manually +3. Create a module to handle user interactions + +## Progress and Next Steps + + + +1. Developed projection method to snap Surface Markups to a model +2. Implemented and compared methods for update/resampling of Surface Markups +3. Developed a module to make user interactions more convenient +4. Pushed [test version of module](https://github.com/SlicerMorph/SlicerMorph/tree/PlaceLandmarkGrid) to SlicerMorph repo. + +# Illustrations + + + +![Initial Grid](https://github.com/NA-MIC/ProjectWeek/assets/43060230/5294ce21-2d71-4c87-b2b5-e403847609ed) + + + +# Background and References + + diff --git a/PW41_2024_MIT/Projects/ImplementSupportOfExternalDICOMServersInKaapana/README.md b/PW41_2024_MIT/Projects/ImplementSupportOfExternalDICOMServersInKaapana/README.md new file mode 100644 index 000000000..e2ea2142d --- /dev/null +++ b/PW41_2024_MIT/Projects/ImplementSupportOfExternalDICOMServersInKaapana/README.md @@ -0,0 +1,69 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Implement support of external DICOM servers in Kaapana +category: Cloud / Web +presenter_location: In-person + +key_investigators: + +- name: Vamsi Thiriveedhi + affiliation: Brigham and Women's Hospital + country: Boston +- name: Mikulas Bankovic + affiliation: DKFZ + country: Germany +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: Boston + +--- + +# Project Description + + + +The goal of this project is to enhance the capabilities of Kaapana, an open-source platform, by enabling it to work seamlessly with DICOM data stored in Google Cloud's HealthCare API DICOM stores. Currently, Kaapana relies on an internal dcm4chee DICOM, which means all of the data needs to be managed on the same VM as Kaapana itself, and in the situation where data already exists in an external DICOM store, it needs to be replicated to the Kaapana dcm4chee. We would like to make it possible to reuse existin Google Healthcare DICOM store without copying its content. Building upon the [previous](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/RunKaapanaOnGkeAndImproveConnectivityOfKaapanaToIdc/) project week's progress, we aim to further improve the connectivity and compatibility of Kaapana with Google's Healthcare API. + + + +## Objective + + + + +1. Build on the progress done by Miki from Kaapana team on the PR in progress +2. Review GCP authentication's procedure currently implemented + + + + +## Approach and Plan + + + + +1. Evaluate the current state of the feature by building docker images from the outstanding PR +2. Impement GCP authentication by asking the user to upload a service account key file with necessary permissions + + + + +## Progress and Next Steps + +1. Currently authentication is handled by supplying a service account key as a string +2. Miki mentioned that the [PR](https://codebase.helmholtz.cloud/kaapana/kaapana/-/tree/feature/external_dicomweb_gcloud) is now ready for testing. +3. Vamsi created and tested an airflow workflow to send dicom files from local storage to GCP Dicom store with GCP's dicomweb package + - Adapting to kaapana architechture is pending +4. Vamsi is going to build and deploy from Miki's git branch to test + +# Illustrations +Created a form for collecting the info about the GCP DICOM store +![image](https://github.com/NA-MIC/ProjectWeek/assets/115020590/c32c112e-3a8c-4d9d-a106-170beeab9bb3) + + +# Background and References + +[https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/RunKaapanaOnGkeAndImproveConnectivityOfKaapanaToIdc/](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/RunKaapanaOnGkeAndImproveConnectivityOfKaapanaToIdc/) diff --git a/PW41_2024_MIT/Projects/IntegrateCpuFriendlyAutoSegmentationAndCtUtilityModelsIntoMhub/README.md b/PW41_2024_MIT/Projects/IntegrateCpuFriendlyAutoSegmentationAndCtUtilityModelsIntoMhub/README.md new file mode 100644 index 000000000..4d9bae927 --- /dev/null +++ b/PW41_2024_MIT/Projects/IntegrateCpuFriendlyAutoSegmentationAndCtUtilityModelsIntoMhub/README.md @@ -0,0 +1,132 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Integrate CPU friendly auto segmentation and CT utility models into mhub +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Suraj Pai + affiliation: Brigham and Women's Hospital + country: Boston + +- name: Leonard Nürnberg + affiliation: Brigham and Women's Hospital + country: Boston + +- name: Andriy Fedorov + affiliation: Brigham and Women's Hospital + country: Boston + +- name: Hugo Aerts + affiliation: Brigham and Women's Hospital + country: Boston + +--- + +# Project Description + + + + +This project will aim to integrate two categories of models into [mhub.ai](mhub.ai) + +**1. CPU friendly (whole-body) auto-segmentation models** + +**2. CT utility model for image QA** + + + +## Objective + + + + +1. Working example of a CPU-friendly (whole-body) auto-segmentation model through the `mhub.ai` platform +2. Working example of a QA pipeline using CT utility tools through `mhub.ai` platform + + + + +## Approach and Plan + + + + +**CPU-friendly auto-seg** +Several auto-segmentation models have been integrated into slicer recently through [https://github.com/lassoan/SlicerMONAIAuto3DSeg/releases/tag/ModelsTestResults](https://github.com/lassoan/SlicerMONAIAuto3DSeg/releases/tag/ModelsTestResults) + +While the quick version of these models run fast on CPU, the slower versions take a couple of mins. It would be interesting to explore if CPU related optimizations would work to increase the speed and reduce memory of the full resolution versions while making the quick versions even faster. + +Some initial thoughts on optimization techniques could include, +1. Converting models to OpenVINO format for optimized inference on CPU ([https://docs.openvino.ai/2024/home.html](https://docs.openvino.ai/2024/home.html), [https://docs.openvino.ai/2024/omz_demos_3d_segmentation_demo_python.html](https://docs.openvino.ai/2024/omz_demos_3d_segmentation_demo_python.html)). This could provide faster inference and make models more lightweight offering a better user experience as well. + +2. For a majority of these auto-seg models, sliding window inferer implementation results in major differences in memory (with higher batch-size) and inference time (with larger overlap ratios). Is there an optimal configuration to save memory and increase speed? +3. Another ticket item is that the the memory consumption largely increases when predicting more output classes in the softmax, is there a way to efficienlty address this issue as well. Perhaps a more restrictive implementation of the sliding window inferer with a accuracy-efficiency trade-off? +4. Distilling models to smaller ones that run faster (might be something that takes longer than PW): [https://github.com/VaticanCameos99/knowledge-distillation-for-unet](https://github.com/VaticanCameos99/knowledge-distillation-for-unet) + + +**CT utility models** +Implementing CT image inspection utility models, namely, body part regression - [https://github.com/MIC-DKFZ/BodyPartRegression](https://github.com/MIC-DKFZ/BodyPartRegression). This model allows determining the body part examined and if there are anomalies in certain slices in the processed image (nifti). + + +Integrating this into Mhub would allow users to perform this QA by providing DICOM inputs directly + + + + +## Progress and Next Steps + +### CPU friendly auto-seg +Tested the abdominal-organs-v2.0.0 segmentation as the reported times on CPU were approx ~6 mins. + +![New Note](https://github.com/NA-MIC/ProjectWeek/assets/10467804/0d9be486-dbf4-45fe-965c-15ad2b6053ae) + + + +#### Default CPU version: +Screenshot 2024-06-27 at 19 40 24 + + +#### OpenVINO compiled model (most significant gain) + Reducing overlap ratio: +Screenshot 2024-06-27 at 18 54 49 + +PS: OpenVINO models are FP16 compressed and are half the size. + +### Body part regression +![BPREG](https://github.com/NA-MIC/ProjectWeek/assets/10467804/11ec7e93-c747-46e3-a86e-049aba5c82d3) + + +### Next Steps + +- Current conversion to OpenVINO is manually done for the abdominal model. This can be automated across models in a script and pushed to Github for download. +- CPU benchmarked on is AMD, benchmark on Intel and ARM. +- Implement Mhub models for CPU friendly autoseg. and interface with Andras' SlicerMONIAAutoSeg3D extension (work on some slicer extension specifics). +- Complete conribution workflow for Bpreg - model is ready to go! + +# Illustrations + + + + + +BPREG: + +![image](https://github.com/NA-MIC/ProjectWeek/assets/10467804/db57f0d3-6e36-4bb1-85a8-93089f158f68) + + + + +# Background and References + + + + +1. [https://github.com/lassoan/SlicerMONAIAuto3DSeg](https://github.com/lassoan/SlicerMONAIAuto3DSeg) +2. [https://docs.openvino.ai/2024/home.html](https://docs.openvino.ai/2024/home.html) +3. [https://github.com/MIC-DKFZ/BodyPartRegression](https://github.com/MIC-DKFZ/BodyPartRegression) +4. [https://mhub.ai](https://mhub.ai) diff --git a/PW41_2024_MIT/Projects/IntegrateMrsegmentatorAndDeepspaModelsIntoMhubAi/README.md b/PW41_2024_MIT/Projects/IntegrateMrsegmentatorAndDeepspaModelsIntoMhubAi/README.md new file mode 100644 index 000000000..7736fb80e --- /dev/null +++ b/PW41_2024_MIT/Projects/IntegrateMrsegmentatorAndDeepspaModelsIntoMhubAi/README.md @@ -0,0 +1,109 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Integrate MRSegmentator and DeepSpA models into mhub.ai +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Felix Dorfner + affiliation: Charité Universitätsmedizin Berlin + country: Germany + +- name: Hartmut Häntze + affiliation: Charité Universitätsmedizin Berlin + country: Germany + +- name: Keno Bressem + affiliation: Technical University of Munich + country: Germany + +--- + +# Project Description + + + + +This project will aim to integrate two models into mhub.ai. + +**1. MRSegmentator:** +- Is a segmentation model that can accurately segment 40 organs and structures in human MRI scans of the abdominal, pelvic and thorax regions. The model works on different sequence types, including T1- and T2-weighted, Dixon sequences and even CT images. +- Paper: [https://arxiv.org/abs/2405.06463](https://arxiv.org/abs/2405.06463) + +**2. DeepSpA:** +- Is classification model that incorporates anatomical awareness to detect radiographic sacroiliitis. Detecting radiographic sacroiliitis plays an essential role in diagnosing and classifying axial Spondyloarthritis (axSpA). +- Paper: [https://arxiv.org/abs/2405.07369](https://arxiv.org/abs/2405.07369) + + + + + +## Objective + + + + +1. Objective: Working implementation of MRSegmentator into the mhub.ai platform. +2. Objective: Working implementation of DeepSpA into the mhub.ai platform + + + + + +## Approach and Plan + + + + + + + +## Progress and Next Steps + +### During Project Week +- MRSegmentator is wrapped in the MhubAI framework and ready to be tested + - Segmentations are registered as DICOM-SEG and can easily compared to other segmentation models + +- DeepSpA is wrapped in the MhubAI framework and ready to be tested + - Model produces both visual and classification outputs, both are organized and saved by mhub + + +### After Project Week +- Complete the testing process and publish both models on MHub.ai + + + + +# Illustrations + + + + + + + +Illustrations of both models can be seen on their respective GitHub pages, linked below: + + + +# Background and References + + + + +**MRSegmentator:** +- Code: [https://github.com/hhaentze/MRSegmentator](https://github.com/hhaentze/MRSegmentator) +- Paper: [https://arxiv.org/abs/2405.06463](https://arxiv.org/abs/2405.06463) + +**DeepSpA:** +- Code: [https://github.com/FJDorfner/Anatomy-Aware-Classification-axSpA](https://github.com/FJDorfner/Anatomy-Aware-Classification-axSpA) +- Paper: [https://arxiv.org/abs/2405.07369](https://arxiv.org/abs/2405.07369) diff --git a/PW41_2024_MIT/Projects/IntegratingItmtModelIntoMhubAiPlatform/README.md b/PW41_2024_MIT/Projects/IntegratingItmtModelIntoMhubAiPlatform/README.md new file mode 100644 index 000000000..6cedc3279 --- /dev/null +++ b/PW41_2024_MIT/Projects/IntegratingItmtModelIntoMhubAiPlatform/README.md @@ -0,0 +1,80 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Integrating ITMT Model into MHub.ai Platform +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Reza Mojahed-Yazdi + affiliation: Brigham and Women's Hospital + country: Boston, MA + +--- + +# Project Description + + + + +The MHub.ai is a platform for deploying deep learning models in medical imaging in a user-friendly and standardized environment. This project focuses on integrating the ITMT model (Automated Deep Learning TM-segmentation to accurately measure temporalis muscle thickness) into the MHub platform. + + + +## Objective + + + + +The objective of this project is to successfully integrate and test the ITMT model into the MHub platform. Additionally, we aim to get feedback on our integration method to improve its efficiency and effectiveness. + + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/43614153/c24ffdcd-a183-4bd1-964d-382dfb6df0d3) + + + +## Approach and Plan + + + + +- Develop integration scripts to deploy ITMT model with MHub. +- Validate the accuracy of the ITMT model within the MHub environment. + + + + + + +## Progress and Next Steps + + + +During the week: + The integration script was prepared during the week. + +After the project week: + Debug the code from the original Repo and finalize the implementation on MHUB.AI + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- [https://github.com/AIM-KannLab/itmt](https://github.com/AIM-KannLab/itmt) diff --git a/PW41_2024_MIT/Projects/IntegrationOfSlicerAndRos/README.md b/PW41_2024_MIT/Projects/IntegrationOfSlicerAndRos/README.md new file mode 100644 index 000000000..5ccfb7041 --- /dev/null +++ b/PW41_2024_MIT/Projects/IntegrationOfSlicerAndRos/README.md @@ -0,0 +1,113 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Integration of Slicer and ROS +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Junichi Tokuda + affiliation: BWH + +- name: Anton Deguet + affiliation: JHU + +- name: Steve Pieper + affiliation: Isomics + +- name: Mariana Bernardes + affiliation: BWH + +- name: Laura Connolly + affiliation: Queen's University + +- name: Simon Leonard + affiliation: JHU + +--- + +# Project Description + +SlicerROS2 is an extension that enables direct communication between the Robot Operating System 2 (ROS2) and 3D Slicer. The ROS is a set of software libraries and tools for building robot applications. ROS2 has been developed and distrubted using an open-source model and widely used in the robotics community. The goal of SlicerROS2 is to facilitate the integration of 3D Slicer and ROS to build systems for image-guided robot-assisted intervention. + +SlicerROS2 provides UI and API to communicate with other ROS nodes through Data Distribution Service (DDS), the publish-subscribe data transport middleware used in ROS2, allowing 3D Slicer to sychnonize its scen graph (MRML) with ROS's [tf](https://wiki.ros.org/tf2). It also has an interface to load a visual model of the robot onto the Slicer scene from robot description data in the URDF format published on the ROS system. + +## Objective + +The objectives of this project are as follows: +1. **Improve existing implementation based on feedback from Slicer experts.** We will discuss use-cases with current and potential users in the community. +2. **Explore options for binary distribution.** Since the module must be built against 3D Slicer with system SSL and specific versions of ROS2, it is not possible to be built as part of the nightly build process. + + +## Approach and Plan + +Questions discuss during the week: +- Questions of future use + - Limited to robots or anything with a ROS2 interface (e.g. haptic devices, IMU, optical and magnetic trackers…) + - Other software packages: [Gazebo](https://gazebosim.org/home), [AMBF](https://github.com/WPI-AIM/ambf), US/CT/MRI simulators… + +- Detailed questions + - Is multithreading possible? Any other way to trigger a periodic computing task (now using QtTimer) + - vtkObjects all have a name and timestamp. Is the name used in Slicer? Can we have a timestamp that is not a counter? + - Improving unit testing, we have some but implementation seems clunky + - For binary distributions, is there a way to host build and/or tgz files? I can host build on JHU computers. + - Is there an existing way to document code in modules, e.g. doxygen + - Small issues: QLatinString in 2 places breaks compiling on older Qt and likely useless, some missing const + + +## Progress and Next Steps + + + +1. Coding + 1. Fixed a few issues based on Slicer experts in the room + 1. We use a Qt timer to "spin" the ROS node. The timer was instantiated in the qLogicWidget and didn't spin until the user would select the ROS2 module. We moved it to qLogic so it's always running. + 1. Fixed CMake project name conflicts between ROS and Slicer macros so the build is in `slicer_ros2_module` (snake_case for TROS) but the module name is `ROS2` + 1. Fixed issue with tests turning off errors and warnings for everything + 1. Code generation + 1. Finished CMake macros to call code generator as well as code generated by CMake itself + 1. Fixed code generation so vtk Python wrappers work (using simpler types `int` vs `int32_t`) + 1. Some simplification in code generator + 1. Usability + 1. Allow to create subscribers and publishers using short names, i.e. `String` vs `vtkMRMLROS2PublisherStringNode` + 1. Added method to list all existing publishers and subscribers + 1. When user tries to create a publisher or subscriber with invalid name, display list of option (good for typos) + 1. Publishers and subscribers now have method `GetBlankMessage` so it's easier to create a payload in Python + 1. Create a self-contained ROS2 package for US simulator in Gazebo that builds a minimal version of PlusLib/IGSIO/vtkAddon that contains launch models and launch files to teleoperate a UR-5 mounted with a US probe. +1. Discussions + 1. Possible feature requests for Slicer + 1. Saved history in Python interpreter (like iPython) + 1. Time stamps in MRML nodes or vtk Object, as in `std::chrono`. Very useful for realtime applications or anything intro-operative. Most ROS payloads have timestamps. This could be either set automatically (in `SetModified`) or controlled by user to preserve time of data collection. + 1. Distribution(s), pros and cons from user perspective + 1. Continue as-is, i.e. source code and users have to compile the module. Pros: can add new messages. Cons: have to compile Slicer from scratch, extensions are not available to download from kitware servers. + 1. Binary distribution with ROS2 core libraries added to Slicer super build. Pros: ready to use, might even provide ROS2 support from Windows, MacOS, any linux instead of Ubuntu only. Cons: harder to add custom messages without a compiler + 1. Binary (bis). If we figure out how to "super build" ROS2 core libraries (`rclcpp`), why not provide the Python wrappers too (`rclpy`) as a pip build. At that point, could port existing features from C++ to Python. A ROS2 pip build could be used outside Slicer. + 1. Simulate and evaluate US/Robot hand-eye calibration by inserting probe in the simulated image and then compare results to ground-truth from the simulation. Adding reference 3D markers to make the simulation more relevant to clinical applications. +1. Next steps + 1. Investigate simple options to make source based distribution easier: provide Slicer and Slicer-SuperBuild, document how to compile and add extensions + 1. For any binary distribution, how hard would it be to compile all ROS2 dependencies from source for "super-building" or "piping" them + +# Illustrations + +Here we can see the robot model loaded with SlicerROS2 into the Slicer Scene. +In this example the we have a transperineal needle robot guide inserting a biopsy needle to sample the prostate of a pig model. +The scene also shows the intraoperative MRI images of the pig pelvis and segmentations of main anatomical structures. + +![smart_template_pig3](https://github.com/NA-MIC/ProjectWeek/assets/17165529/5d2a6f69-d882-415e-bb00-68329dff6750) + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/ManualEditingOfFreesurferCorticalSurfaces/README.md b/PW41_2024_MIT/Projects/ManualEditingOfFreesurferCorticalSurfaces/README.md new file mode 100644 index 000000000..42ae3bd67 --- /dev/null +++ b/PW41_2024_MIT/Projects/ManualEditingOfFreesurferCorticalSurfaces/README.md @@ -0,0 +1,84 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Manual editing of FreeSurfer cortical surfaces +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Benoît Verreman + affiliation: ÉTS + country: Canada + +- name: Jarrett Rushmore + affiliation: Boston University + country: USA + +- name: Sylvain Bouix + affiliation: ÉTS + country: Canada + +--- + +# Project Description + + + + +We will continue to develop scripts to adapt freesurfer procedures so that manual corrections can be made accurately to white and pial surface reconstruction. + + + +## Objective + + + + +1. Improve code so it works better cross-platform +2. Identify remaining areas where corrections are difficult to make and improve +3. Use the modified surface in the downstream FreeSurfer pipelines + + + + +## Approach and Plan + + + + +_No response_ + + + + +## Progress and Next Steps + + +1. Adapted the bash script for both Linux and MacOS +2. Identified remaining areas where corrections are difficult to make: Hippocampus/Amygdala and temporal lobe +3. Added the last part of FreeSurfer recon-all pipeline : statistics, overlays, etc. + + + +# Illustrations + + + +![2024-06-28_NAMICS_result_188347_white_pial_surfaces_sagittal_plane](https://github.com/NA-MIC/ProjectWeek/assets/131895621/5720cb03-898a-47f6-a0a2-8995ae79a7cb) + + +![2024-06-28_NAMICS_result_188347_white_pial_surfaces_coronal_plane](https://github.com/NA-MIC/ProjectWeek/assets/131895621/da2d25d2-1a39-4016-a805-261dfee1dce9) + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/MhubContributorsWorkshop/README.md b/PW41_2024_MIT/Projects/MhubContributorsWorkshop/README.md new file mode 100644 index 000000000..53201fa2c --- /dev/null +++ b/PW41_2024_MIT/Projects/MhubContributorsWorkshop/README.md @@ -0,0 +1,121 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: MHub Contributors +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Leonard Nürnberg + affiliation: Brigham and Women's Hospital + country: Boston + +- name: Andriy Fedorov + affiliation: Brigham and Women's Hospital + country: Boston + +- name: Hugo Aerts + affiliation: Brigham and Women's Hospital + country: Boston + +--- + +# Project Description + + + + +MHub.ai is a platform for deep-learning models in medical imaging. We aim to make AI in medical imaging as simple as possible. Therefore, all MHub models need zero set-ups, can be run with a single command, have a standardized IO interface, run directly on DICOM data, are fully customizable to run on other data types and file structures, are tested and reproducible, and run entirely offline. MHub also provides a toolbox to support developers with data conversion, organization, and standardization tasks. + +We want to demonstrate **WHY** bundling models in the MHub standard, make them as simple as possible to use, and provide a valuable resource to the community. + +Furthermore, we're thrilled to show **HOW** any model or algorithm can be wrapped into an MHub container. We plan to show the process, explain the tools we use, answer questions, and provide assistance and guidance to those who want to use or contribute to an MHub model. + + + +## Objective + + + + +1. Present the MHub.ai platform and model repository with more than 20 models (and counting). +2. Demonstrate the benefits of containerized and standardized models and how you can build on them for reproducible research. +3. Show how to implement any model in MHub in three steps and provide them to the community. +4. Support participants in implementing (their) models into MHub. +5. Gather feedback, improve our documentation, and explore what topics, formats, details, and intensity are best for the educational materials. + + + +## Approach and Plan + + + + +We plan to hold a workshop or break-out session where we demonstrate every step of the contribution process for MHub models in a walk-through style tutorial. We will give detailed examples, discuss best practices, and provide hands-on guidance to all who are planning to implement models into MHub. + + + + + + + +## Progress and Next Steps + + + +We currently host 27 segmentation, prediction and feature extraction models with 10+ more models under active development. + +Bildschirmfoto 2024-06-28 um 10 10 43 + +To help our users contributing models to our platform we provide a detailed documentation and step-by-step tutorials: + +1. **MHub.ai Documentation** +We have [detailed documentation](https://github.com/MHubAI/documentation/tree/main) on [how to run a model](https://github.com/MHubAI/documentation/blob/main/documentation/mhub/run_mhub.md) in MHub and documentation on the individual [tools provided within the MHub-IO framework](https://github.com/MHubAI/documentation/blob/main/documentation/mhubio/mhubio_modules.md). + +2. **MHub.ai Model Deployment** +We created a tutorial that guides through the implementation of a model into the universal MHub format. + - [T3 - Create the Thresholder Model for MHub](https://github.com/MHubAI/documentation/blob/main/tutorials/run_totalsegmentator_on_idc_collection/mhub_tutorial_001.md) + +4. **MHub.ai Contribution Process** +MHub has a clearly defined contribution process. +The requirements and the process are explained in our [documentation](https://github.com/MHubAI/documentation/blob/main/documentation/mhub_contribution/contributing_a_model.md). + +5. **MHub.ai Tutorials** +We wrote two more tutorials demonstrating how to run and customize MHub models based on public data from IDC and how to visualize and compare results in 3D Slicer. + - [T1 - Run TotalSegmentator on IDC Collection](https://github.com/MHubAI/documentation/blob/main/tutorials/run_totalsegmentator_on_idc_collection/mhub_tutorial_001.md) + - [T2 - Run Custom MHub Lung Segmentation Workflow on Chest CT in Nifti Format +](https://github.com/MHubAI/documentation/blob/main/tutorials/run_lungmask_on_chestct_in_nifti_format/mhub_tutorial_002.md) + + + +# Illustrations + + + + +![Mhub Contribution Flowchart](https://raw.githubusercontent.com/MHubAI/documentation/main/documentation/figures/submission_sequence_diagram.png) + + + +# Background and References + + + + + +You can learn more about the MHub platform, repository, and framework at the following links. +- [MHub.ai website](https://mhub.ai) +- [Model-Repository](https://mhub.ai/models) +- [Github Organization](https://github.com/MHubAI/) + +--- + +To dive deeper, you can find the developer documentation, tutorials, and the implementation of all models currently in our repository under these links. +- [Models repository](https://github.com/MHubAI/mdoels) +- [Documentation](https://github.com/MHubAI/documentation) +- [Tutorials](https://github.com/MHubAI/documentation/tree/main/tutorials) diff --git a/PW41_2024_MIT/Projects/MorphodepotCollaborativeSegmentationProjects/README.md b/PW41_2024_MIT/Projects/MorphodepotCollaborativeSegmentationProjects/README.md new file mode 100644 index 000000000..eb719cc23 --- /dev/null +++ b/PW41_2024_MIT/Projects/MorphodepotCollaborativeSegmentationProjects/README.md @@ -0,0 +1,84 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 'MorphoDepot: Collaborative segmentation projects ' +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Steve Pieper + affiliation: Isomics + country: Inc., USA + +- name: Murat Maga + affiliation: Seattle Children's + country: USA + +--- + +# Project Description + + + + +We are developing tools for segmentation of biological specimens (e.g. 3D microCT of fish or snakes). The idea is that a Lab Director will define a project, such as what scan to segment, what anatomical structures to segment, the terminologies to use, etc. Students or lab members would be assigned to segment subsets of the data. We want to leverage existing data management tools, such as github for organizing issues and contributions, and jetstream2 for hosting data and computation. + +We are interested in facilitating collaborative segmentation, including dividing a whole project into tasks, managing allocation to tasks to segmenters, managing/merging contributions, etc. + +If you are interested in similar topics, please join our project! + + + +## Objective + + + + +1. Networking: we would like to know how this fits with anyone else's projects and possibly collaborate +2. Exploration: test the use of github for managing segmentations and zarr with s3 back end for large data +3. Refine: based on what we learn, iterate on the concepts and implementation options + + + +## Approach and Plan + + + + +1. Try [zarr on ceph in jetstream2](https://www.zonca.dev/posts/2022-04-04-zarr_jetstream2) and [zarr in Slicer](https://gist.github.com/pieper/0e7edcf70c844925ea104e07aedbe92a). +2. Try storing seg.nrrd files in github repos +3. Try skeleton Slicer module + + + + +## Progress and Next Steps + +1. Talked with several other project week participants about common interests in collaborative segmentation but did not learn of any existing work that we can build on directly. A more common use case is large number of similar cases divided among annotators rather than annotators all working on a single large scan. +2. Confirmed that all packages are working correctly in Slicer + 1. git: confirmed that it's possible to create a git repo and commit seg.nrrd files from Slicer python + 2. github: confirmed that operations like creating pull requests can be done from Slicer python + 3. ome_zarr: can save any volume in zarr format (with pyramid), can also load, but slowly + 4. s5cmd: can be used to transfer data to/from s3-compatible buckets on Jetstream2 (ceph object store running in openstack) + 5. tensorstore: can load zarr from s3-compatible buckets very quickly +3. Tested performance on a Jetstream VM: + 1. Load volume (2110 x 677 x 666, 1.8 GB) from JS2 object store in about 5 seconds + 2. Same code works on non-JS2 machines, but more slowly + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + +Experiments: [https://github.com/pieper/SlicerMorphoDepot/tree/main/Experiments](https://github.com/pieper/SlicerMorphoDepot/tree/main/Experiments) diff --git a/PW41_2024_MIT/Projects/MultimodalRegistration/README.md b/PW41_2024_MIT/Projects/MultimodalRegistration/README.md new file mode 100644 index 000000000..2c4f1f161 --- /dev/null +++ b/PW41_2024_MIT/Projects/MultimodalRegistration/README.md @@ -0,0 +1,150 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Multimodal Registration MR2CBCT +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Leroux Gaelle + affiliation: University of Michigan + country: USA + +- name: Claret Jeanne + affiliation: University of Michigan + country: USA + +- name: Cevidanes Lucia + affiliation: University of Michigan + country: USA + +- name: Allemang David + affiliation: Kitware + country: USA + +- name: Prieto Juan Carlos + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + +This project aims to develop a novel Slicer tool that combines machine learning with image processing technique with image processing techniques to automatically register MRI to Cone Beam CT (CBCT) images, enabling enhanced visualization and analysis of the TMJ complex. By integrating MRI soft tissue information with CBCT bony details, this automated technique provides clinicians with a more comprehensive patient-specific 3D model of the TMJ to improve diagnostic accuracy and treatment planning. +Temporomandibular joint (TMJ) disorders affect a significant portion of the population and can cause chronic pain and disability. Accurate diagnosis is crucial for effective treatment planning, but can be challenging due to the complex anatomy and limited visibility of soft tissue structures on Cone Beam CT (CBCT) scans. MRI provides superior soft tissue contrast including the articular disc, but requires separate acquisition and manual registration with CBCT for detailed bone degeneration assessments. + + + +## Objective + + + + +The “Multimodal Registration MR2CBCT Project” aims to develop a sequennce of image anlaysis preprocessing steps prior to accurately aligning and overlaying CBCT and MRI multimodal images, using Elastix registration tools. + + + +## Approach and Plan + + + +1. **Dataset Collection:** + - Compile a comprehensive dataset consisting of MRI and CBCT files. + - Perform manual approximation to align MRI and CBCT images initially. + - Perform manual segmentation of the MRI. + +2. **Image Registration Strategy:** + - The primary goal is to achieve precise registration between MRI and CBCT images. To accomplish this, we are exploring two main approaches: + +#### First Approach: + - **Image Transformation Model:** + - Develop and train a model to transform MRI images into CBCT-like images. + - **CBCT Registration:** + - Utilize existing tools to register the transformed CBCT images with actual CBCT images. + +#### Second Approach: + - **Automatic Segmentation:** + - Conduct automated segmentation of CBCT images as an initial step. + - **Automated MRI Segmentation:** + - Train a model to automate the segmentation process of MRI images. + - **Elastix-Based Registration:** + - Use Elastix to do the registration between MRI and CBCT images based on the segmentation. + - Invert MRI to facilitate the registration process with Elastix. + - Normalized MRI and CBCT +3. **Validate:** + - Validate the best method accuracy through rigorous testing against established benchmarks. + - Create the Slicer module interface + - Write documentationsand examples + + + + +## Progress and Next Steps + + + +### Previous work + +**Dataset Collection:** + - We compiled a comprehensive dataset consisting of MRI and CBCT files. + - Performed manual approximation to initially align MRI and CBCT images. + +**Image Registration Strategy:** +#### Second Approach: + - **Automated Segmentation:** + - Conducted automated segmentation of CBCT images as an initial step. + - **Image Preprocessing:** + - Invert the gray scale level of the MRI + - Normalize the MRI and the CBCT + - **Elastix-Based Registration:** + - Working to use Elastix to do the registration between MRI and CBCT images using the manual segmentation. The MRI has been inverted to facilitate the registration process with Elastix. + +### Progress this week +- Harmonization of Mutilmodal MRI and Cone Beam CT (CBCT) / monai normalization +- Registration evaluation metrics : loss function (SSIM,NNC,NMI) + +### Next Steps +**Image Registration Strategies:** +#### Alternative Approach: + - **CBCT Registration:** + - Develop and train a model to transform MRI images into CBCT-like images. + - After finalizing the transformation model, utilize existing tools to register the transformed CBCT images with actual CBCT images. + +#### Main Approach: + - **Automate MRI Segmentation:** + - Train a model to automate the MRI segmentation process. + - **Automate MRI approximation to CBCT:** + - Automatic approximation of the MRI on a CBCT. + +**Validation of the Multimodal Registration:** + - Validate the Motimodal Registration result through rigorous testing. + - Create the Slicer module interface + - Write documentation and examples + + + + +# Illustrations +#### Overlay of the CBCT automated segmentation on a MRI +![Manual Segmentation of the Cranial Base on an MRI](https://github.com/NA-MIC/ProjectWeek/assets/91245687/a4a73f38-5e28-4d32-a5ad-816cad73b118) + +#### Invertion of an MRI +![Invertion of an MRI](https://github.com/NA-MIC/ProjectWeek/assets/91245687/7b8e4f61-90cf-45e8-99de-7e461fc1365b) + +#### Approximation of an MRI on a CBCT +![Approximation of an MRI on a CBCT](https://github.com/NA-MIC/ProjectWeek/assets/91245687/5c4211ce-e930-4bf0-b9d8-a1193a29ea0a) + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/NciImagingDataCommonsUserSupportAndPlatformDevelopment/README.md b/PW41_2024_MIT/Projects/NciImagingDataCommonsUserSupportAndPlatformDevelopment/README.md new file mode 100644 index 000000000..952e4d13a --- /dev/null +++ b/PW41_2024_MIT/Projects/NciImagingDataCommonsUserSupportAndPlatformDevelopment/README.md @@ -0,0 +1,114 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: NCI Imaging Data Commons - user support and platform development +category: Cloud / Web +presenter_location: In-person + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Deepa Krishnaswamy + affiliation: BWH + country: USA + +- name: Vamsi Thiriveedhi + affiliation: BWH + country: USA + +- name: Cosmin Ciausu + affiliation: BWH + country: USA + +- name: Leonard Nuerenberg + affiliation: AIM Lab + country: USA + +- name: Suraj Pai + affiliation: AIM Lab + country: USA + +- name: Steve Pieper + affiliation: Isomics Inc + country: USA + +- name: Ron Kikinis + affiliation: BWH + country: USA + +- name: Michael Onken + affiliation: OpenConnections GmbH + country: Germany + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + + +[NCI Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/explore/) is a cloud-based environment containing publicly available cancer imaging data co-located with analysis and exploration tools and resources. + +IDC provides a growing amount of publicly available cancer imaging data (>65TB at the moment, radiology and digital pathology, including images, annotations, analysis results and clinical data) curated in the cloud to support highly efficient access and to simplify analysis. + + + +## Objective + + +1. Raise awareness about IDC, help users, collect feedback to help prioritize future development. +2. Identify robust AI models that can be applied to IDC data to enrich IDC with annotations. +3. Work on various issues related to the development of IDC platform and related software tools. + +## Approach and Plan + + + +1. Interact with current and prospective users to answer questions and collect feedback. +2. Support any project that has a need for public datasets available for testing, cloud-based notebook implementations of the analysis, scaling up analysis to large cohorts within IDC. +3. Work on priority aspects of the project: maintenance and improvement of SlicerIDCBrowser and idc-index, improvements of the documentation and other learning materials +4. Improve/simplify access to the [NLST/TotalSegmentator analysis results](https://discourse.canceridc.dev/t/new-in-idc-v18-totalsegmentator-segmentations-and-radiomics-features-for-nlst-cts/582). +5. Work on maintenance of dcmqi priority issues: https://github.com/QIICR/dcmqi/issues/489, python wrapper API +6. MRTotalsegmenator SCT codes - Andras +7. DCMTK upgrade in Slicer - JC + + +## Progress and Next Steps + +1. Update [MHub+IDC tutorial](https://github.com/MHubAI/examples/blob/main/notebooks/PW41_tutorial.ipynb) in how it accesses IDC. +1. Prepared initial version of the query to extract processing steps for slide microscopy (SM) images using DICOM metadata ([https://github.com/ImagingDataCommons/idc-index-data/pull/30](https://github.com/ImagingDataCommons/idc-index-data/pull/30)). When completed, this will allow selecting SM images by embedding method, staining (H&E), and fixative without using BigQuery, and with queries of significantly lower complexity as compared to querying full index. +2. Implemented new feature in the dcmqi converter that allows including into DICOM SEG references to the segmented images when geometry of the segmentation is different from the image (e.g., when segmentation was done on the slices orthogonal to the segmented image) ([https://github.com/QIICR/dcmqi/issues/489](https://github.com/QIICR/dcmqi/issues/489)). Lacking this feature, ReMIND collection encoded images that are disconnected from the segmented MR images. +3. Mapped model-specific segmentation labels for [OMAS](https://docs.google.com/spreadsheets/d/1pBicNskjMDJBnD3w4yAQroj8SGSAhDfA_TUK24dLEyc/edit?gid=1390863317#gid=1390863317) and [TotalSegmentator](https://docs.google.com/spreadsheets/d/1oEzXCmraoLgbbb5lNxWiHuYDza86aXxKqSUmUetwI7M/edit?gid=780795691#gid=780795691) to SNOMED-CT (related PRs [https://github.com/wasserth/TotalSegmentator/pull/324](https://github.com/wasserth/TotalSegmentator/pull/324) and [https://github.com/wasserth/TotalSegmentator/pull/325](https://github.com/wasserth/TotalSegmentator/pull/325)). Those interested to map labels from their model can follow instructions in [https://qiicr.gitbook.io/dcmqi-guide/opening/coding_schemes/searching_codes_outside_dicom](https://qiicr.gitbook.io/dcmqi-guide/opening/coding_schemes/searching_codes_outside_dicom) and of course contact Andrey and/or ask questions on the [IDC forum](https://discourse.canceridc.dev/). +4. Presented IDC updates at the Thu breakout session (see notes and references in [this document](https://docs.google.com/document/d/11IG53uKYePUlQFCUX6nFw4HqQDyt2jcLkvqjnHGNPCI/edit)). +5. Reviewed beta (aka pita) release of the [pydcmqi](https://github.com/LennyN95/pydcmqi) python wrapper of dcmqi prepared by Leo. pydcmqi aims to simplify pythonic access to dcmqi functionality. + +# Illustrations + + + + +![Summary of IDC content as of data release v18](https://learn.canceridc.dev/~gitbook/image?url=https%3A%2F%2F1103581492-files.gitbook.io%2F%7E%2Ffiles%2Fv0%2Fb%2Fgitbook-x-prod.appspot.com%2Fo%2Fspaces%252F-MCTG4fXybYgGMalZnmf-2668963341%252Fuploads%252FBPUPVLBlGOSoK0iQxXbl%252Fidc_v18_summary.jpg%3Falt%3Dmedia%26token%3D332a4ac5-5850-4e23-9340-d50607ec3dfd&width=768&dpr=2&quality=100&sign=a98506d4008137a946a692376342be1a161a5301dca7439f7ee2d94db9fa95f1) + + + +# Background and References + + + + +* Fedorov, A., Longabaugh, W. J. R., Pot, D., Clunie, D. A., Pieper, S. D., Gibbs, D. L., Bridge, C., Herrmann, M. D., Homeyer, A., Lewis, R., Aerts, H. J. W., Krishnaswamy, D., Thiriveedhi, V. K., Ciausu, C., Schacherer, D. P., Bontempi, D., Pihl, T., Wagner, U., Farahani, K., Kim, E. & Kikinis, R. National Cancer Institute Imaging Data Commons: Toward Transparency, Reproducibility, and Scalability in Imaging Artificial Intelligence. RadioGraphics (2023). [https://doi.org/10.1148/rg.230180](https://doi.org/10.1148/rg.230180) +* Thiriveedhi, V. K., Krishnaswamy, D., Clunie, D., Pieper, S., Kikinis, R. & Fedorov, A. Cloud-based large-scale curation of medical imaging data using AI segmentation. Research Square (2024). [https://doi.org/10.21203/rs.3.rs-4351526/v1](https://doi.org/10.21203/rs.3.rs-4351526/v1) diff --git a/PW41_2024_MIT/Projects/NewRadiologyAndPathologyDeepLearningModelsIntoMhubAi/README.md b/PW41_2024_MIT/Projects/NewRadiologyAndPathologyDeepLearningModelsIntoMhubAi/README.md new file mode 100644 index 000000000..fcf203a69 --- /dev/null +++ b/PW41_2024_MIT/Projects/NewRadiologyAndPathologyDeepLearningModelsIntoMhubAi/README.md @@ -0,0 +1,89 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: New Radiology and Pathology Deep Learning Models into MHub.ai +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Curtis Lisle + affiliation: KnowledgeVis + country: USA + +- name: Leonard Nürnberg + affiliation: Brigham and Women's Hospital + country: Boston + +--- + +# Project Description + + + + +The MHub.ai project at Harvard has developed methods to execute machine learning models on medical images in an easy to use and standardized way. There is already a Slicer plugin for running MHub.ai format models. For this project, we propose to add two models of different types to the MHub library. + + + +## Objective + + + + +1. Objective A. Test a MONAI-based deep learning model in MHub and validate the instructions for new developers to follow. + +2. Objective B. Evaluate how well the MHub approach works for supporting pathology models in addition to radiology models. + + + +## Approach and Plan + + + + +Step 1. Port one of the pre-trained MONAIAutoSeg3D radiology models developed at Queens (by Andros Lasso et al.) for execution using the MHub framework as a docker container. Test the MHub I/O converters to read a DICOM image and reformat as needed from the input. Write out a DICOM segmentation object as the result. + +Step 2. Start converting a published pathology DNN model (Rhabdomyosarcoma segmentation) for the MHub framework. This will Evaluate how well the MHub approach works for supporting pathology models in addition to radiology models. For example, can the same base Docker image work for pathology? + + + +## Progress and Next Steps + + + + +1. We selected two of the MONAIAutoSeg3D models from the Slicer Extension and wrapped them using the MHub.ai framework as an exercise to learn the MHub approach. As part of this process, we wrote a converter to produce the class descriptions used by MHub to describe model outputs from the original model descriptions. This approach could be used to convert other models later. + +2. We started adapting a trained Rhabdomyosarcoma pathology model for MHub. the first part of the MHub pipeline works in our prototype but we arent processing the model ooutputs correctly yet. + +3. We completed a prototype implementation of the RMS model inside MHub.ai. This demonstrated that the MHub approach can be used for pathology as well as radiology models. Some cleanup is needed yet, but this was a lot of progress this week. + + +# Illustrations + +Below is a Slicer screenshot showing a segmentation created by an MHUb.ai model. For this example, we took the low-res MONAIAutoSeg3D thoractic segmentation model from Andras' Slicer Extension and ported it to execute inside an MHub.ai workflow. Others of the pre-trained AutoSeg models could also be ported with minimal effort. This model uses the SegResNet DNN from the MONAI project: + + +MONAIAutoSeg-in-MHub-result-thoracic + +Here is a rendering of a Fractional DICOM segmentation superimposed over the source image. The segmentation was created by a trained model executing inside the MHub.ai environment. This model was ported during the project week. + +![fractional_mhub_1](https://github.com/NA-MIC/ProjectWeek/assets/2152950/acd54257-d668-4026-b113-2c89f7f5b1b4) + + + +# Background and References + + + + +MONAI AutoSeg3D: [https://github.com/Project-MONAI/tutorials/tree/main/auto3dseg](https://github.com/Project-MONAI/tutorials/tree/main/auto3dseg) + +Slicer Extension: [https://github.com/lassoan/SlicerMONAIAuto3DSeg](https://github.com/lassoan/SlicerMONAIAuto3DSeg) + +pathology model: [https://github.com/knowledgevis/rms-infer-code-standalone](https://github.com/knowledgevis/rms-infer-code-standalone) diff --git a/PW41_2024_MIT/Projects/NodeFocus/README.md b/PW41_2024_MIT/Projects/NodeFocus/README.md new file mode 100644 index 000000000..756cdfbf5 --- /dev/null +++ b/PW41_2024_MIT/Projects/NodeFocus/README.md @@ -0,0 +1,82 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Node focus and highlighting +category: Infrastructure +presenter_location: In-person + +key_investigators: +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada +- name: Andras Lasso + affiliation: Queen's University + country: Canada +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA +--- + +# Project Description + +3D software applications often provide feedback mechanisms for selecting objects and showing users which objects they have selected, or are interacting with. This allows some visualizations to be hidden when the object is not in focus. + +During PW39, we worked on the initial visualization and implementation, however it has since become clear that we will need to account for additional use-cases such as VR and muli-user interaction. + +## Objective + +Analyze the existing prototype implementation and discuss ways that the design could be improved to account for the expanded use-cases. + +This new implementation should be able to handle the existing use-cases: +- See the nodes that they are hovering over or interacting with in the various subject hierarchy trees or node selectors. +- Select nodes by clicking on them in one of the views. + +As well as the new use-cases: +- Multi-controller highlighting in VR. +- Allow highlighting by multiple users. + +If you would like to offer suggestions or feedback on the current prototype, then come see me in-person. + +## Approach and Plan + + + +1. Analyze and discuss how the current design can be improved to work with the additional use-cases. +2. Continue development of the node focus infrastructure. + +## Progress and Next Steps + +- Prototype implementation can be found here: [focus_pw_41](https://github.com/Sunderlandkyl/Slicer/tree/focus_pw_41). +- Developed new design, vtkMRMLSelectionNode will be converted to a vtkMRMLDisplayableNode, and vtkMRMLSelectionDisplayNode will be created to control visualization options. +- Initial implementation of the new displayable manager has now been implemented. Developers can add new non-singleton vtkMRMLSelection(Display)Node to control the selection visualization. Each selection node can have their own focus and display properties. + +### Next steps + +- Test alternative displayable manager implementation +- Integrate changes in Slicer core +- Improve outline rendering performance +- Add and test focus visualization in VR + +# Illustrations + +Example showing multiple hard focus using multiple selection nodes: + +![image](https://github.com/NA-MIC/ProjectWeek/assets/9222709/1dddf50c-eea4-4e95-af1b-1ee95ae25564) + +Example showing segmentations: + +![Atlas node focus](https://github.com/NA-MIC/ProjectWeek/assets/9222709/cd0fd740-2aee-4010-b73d-dc8a53f8e58e) + +Example showing markups: + +![Markups node focus](https://github.com/NA-MIC/ProjectWeek/assets/9222709/2ecbef2b-e7a2-4317-9e9d-1191f5a75d4f) + +Example showing models using a combobox: + +![Combobox model node focus](https://github.com/NA-MIC/ProjectWeek/assets/9222709/7450c678-f8eb-482b-97c2-e0b95d4e05bc) + +# Background and References + +- [Development branch](https://github.com/Sunderlandkyl/Slicer/tree/focus_pw_41) diff --git a/PW41_2024_MIT/Projects/NousNavVideoTutorial/README.md b/PW41_2024_MIT/Projects/NousNavVideoTutorial/README.md new file mode 100644 index 000000000..e9f94c7e8 --- /dev/null +++ b/PW41_2024_MIT/Projects/NousNavVideoTutorial/README.md @@ -0,0 +1,63 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: NousNav Video Tutorial +category: IGT and Training +presenter_location: in person + +key_investigators: +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital, Harvard Medical School + country: USA + +- name: Colin Galvin + affiliation: Brigham and Women's Hospital, Harvard Medical School + country: USA +--- + +# Project Description + +NousNav is an ongoing project led by Dr. Alex Golby at Brigham and Women's Hospital to build and disseminate a low-cost neuronavigation system for brain surgery. +The video tutorial aims to provide end-users with step-by-step guidance on how to set up and use NousNav. + +## Objective + + + +1. Objective A. Review and curate the current version of NousNav video tutorial +1. Objective B. Identify the section of the tutorial that needs to be upgraded to the latest version of NousNav + +## Approach and Plan + + + +1. Review the current raw demonstration clips +1. Review the draft voice-over +1. Organize the raw demonstrations clips into sections that facilitate easy access + +## Progress and Next Steps + + + +1. Reviewed the current raw operating room demonstration clips and associated screen captures. +1. Reviewed the drafted scripts for voice-over, making adjustments to account for updates in the software package and hardware. +1. Recorded updated surgical planning tutorials showcasing each step of the planning workflow. +1. Organized the raw demonstration clips and scripts to facilitate easy access. +1. Incorperated similar language from the user guide into the tutorial scripts to ease learning curve of terminology. + +# Illustrations + + + +# Background and References + + +Screenshot 2024-06-27 at 5 43 45 PM +Screenshot 2024-06-27 at 5 44 31 PM diff --git a/PW41_2024_MIT/Projects/OmasCtOpenModelForAnatomySegmentationInComputerTomography/README.md b/PW41_2024_MIT/Projects/OmasCtOpenModelForAnatomySegmentationInComputerTomography/README.md new file mode 100644 index 000000000..b2102c3d6 --- /dev/null +++ b/PW41_2024_MIT/Projects/OmasCtOpenModelForAnatomySegmentationInComputerTomography/README.md @@ -0,0 +1,113 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 'OMAS CT: Open Model for Anatomy Segmentation in Computer Tomography' +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Tamaz Amiranashvili + affiliation: University of Zurich + country: Switzerland + +- name: Murong Xu + affiliation: University of Zurich + country: Switzerland + +- name: Bjoern Menze + affiliation: University of Zurich + country: Switzerland + +--- + +# Project Description + + + + +We have developed a state-of-the-art automated segmentation model capable of identifying ~170 anatomical structures in volumetric CT scans. This model has been trained on a combined dataset of more than 22,000 diverse, partially-annotated CT scans, setting a new benchmark in medical imaging. Our goal is to integrate this model into a 3D Slicer extension, making it widely available to the community. + + + +## Objective + + + + +1. Model development: Train models on partially-annotated datasets for whole-body CT segmentation covering approximately 170 structures. +2. Open source the trained models: Open-source the trained models and the associated codebase on 3D Slicer and other platforms, making them easily accessible and utilizable for clinical and research purposes, among others. +3. Release the data: Release the expansive dataset and corresponding annotations on the Imaging Data Commons (IDC), facilitating further research on medical image analysis. + + + + + +## Approach and Plan + + + + +1. Data Management: Collection and curation of CT scans. +2. Model Training and Evaluation: Systematic training and assessment of models. +3. Data Release: Consolidation and release of the dataset and corresponding annotations in appropriate formats (e.g., DICOM) on IDC. +4. Model Release: Publication of final model weights. +5. Software Integration: Development and integration of a module for 3D Slicer, optimized for both CPU and GPU usage to accommodate varying user hardware. +6. Documentation: Creation of detailed user guidelines to facilitate the easy application of the models. + + + + + +## Progress and Next Steps + + + + +Current Achievements: + +1. Prototypes of the trained models and an operational inference pipeline have already been developed. + +In progress / next steps: + +1. Benchmarking on public medical image segmentation challenges, followed by evaluation and analysis of results. +2. Preparing the dataset and labels for public release. +3. Developing the 3D Slicer plugin for integration. + + + +# Illustrations + + + + +![10000005_snapshot](https://github.com/NA-MIC/ProjectWeek/assets/254898/dfbe0cbf-0341-4dfc-991d-bdcf2c621c2d) + + + + + + + + + + + + + + + +# Background and References + + + + +TBD diff --git a/PW41_2024_MIT/Projects/PlanningModuleForLeftVentricleAssistDeviceImplantation/README.md b/PW41_2024_MIT/Projects/PlanningModuleForLeftVentricleAssistDeviceImplantation/README.md new file mode 100644 index 000000000..b68e33eaa --- /dev/null +++ b/PW41_2024_MIT/Projects/PlanningModuleForLeftVentricleAssistDeviceImplantation/README.md @@ -0,0 +1,100 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Planning Module for Left Ventricle Assist Device Implantation +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Imre J. Barabas + affiliation: Semmelweis Univ. + country: Hungary + +- name: Attila Tanacs + affiliation: University of Szeged + country: Hungary + +- name: Matthew A. Jolley + affiliation: Children’s Hospital of Philadelphia + country: USA + +- name: Kyle Sunderland + affiliation: Queen's Univ. + country: Canada + +- name: Tamas Ungi + affiliation: Queen's Univ. + country: Canada + +- name: Andras Lasso + affiliation: Queen's Univ. + country: Canada + +--- + +# Project Description + + + + +The Planning Module for Left Ventricle Assist Device (LVAD) Implantation is aimed at developing a state-of-the-art tool to improve the precision and efficacy of LVAD placement surgeries. This module integrates advanced AI-based automatic 3D segmentation, mechanical simulations of heart muscle deformation, and an algorithm for automatic LVAD positioning, drawing from extensive clinical experience. The project seeks to enhance patient safety and surgical outcomes by providing surgeons with precise, patient-specific models and simulations, ultimately bridging engineering advancements with critical cardiac surgical applications. + + + +## Objective + + + + +This week, I will draft the algorithm for automatic LVAD positioning, incorporating clinical insights and surgical best practices. I will also begin iterative testing and refinement to ensure precise and effective device placement. + + + +## Approach and Plan + + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. + + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + +MONAI label based detailed heart segmentation. +GUI based LVAD positioning module developement. + + + +# Illustrations +Képernyőfotó 2024-06-28 - 9 35 18 + +Képernyőfotó 2024-06-28 - 9 40 31 + +Képernyőfotó 2024-06-28 - 9 46 40 + + +_No response_ + + + +# Background and References + + + + +- Barabás IJ, Hartyánszky I, Kocher A, Merkely B. A 3D printed exoskeleton facilitates HeartMate III inflow cannula position. Interact Cardiovasc Thorac Surg. 2019 Oct 1;29(4):644-646. doi: 10.1093/icvts/ivz146. PMID: 31230073. +- Barabás JI, Palkovics D, Bognár V, Sax B, Heltai K, Panajotu A, Merkely B, Hartyánszky I. A 3D technológia szerepe a műszívterápiában [The role of 3D technology in the support of mechanical circulation therapy.]. Orv Hetil. 2023 Jul 2;164(26):1026-1033. Hungarian. doi: 10.1556/650.2023.32804. PMID: 37393547. +- Barabás JI, Merkely B, Hartyánszky I, Palkovics D. Computer-aided Design and Manufacturing of a Patented, Left Ventricle Assist Device Positioning Tool – 3D Navigated Surgical Treatment of End-Stage Heart Failure. Acta Polytechnica Hungarica. 2023 Jan 20(8):9-25. DOI: 10.12700/APH.20.8.2023.8.2 diff --git a/PW41_2024_MIT/Projects/PrismVolumeRenderer/README.md b/PW41_2024_MIT/Projects/PrismVolumeRenderer/README.md new file mode 100644 index 000000000..8bfb54f08 --- /dev/null +++ b/PW41_2024_MIT/Projects/PrismVolumeRenderer/README.md @@ -0,0 +1,59 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: PRISM Volume renderer +category: VR/AR and Rendering +presenter_location: In-person + +key_investigators: +- name: Simon Drouin + affiliation: ÉTS Montreal + country: Canada + +- name: Aurélie Rasolomanana + affiliation: ÉTS Montreal + country: Canada + +- name: Kylian Pasquereau + affiliation: ÉTS Montreal + country: Canada + +--- + +# Project Description + +The PRISM extention aims at providing a list of advanced interactive volume rendering effects not available through the standard Volume Rendering module in Slicer. Users are able to experiment with the available effects either by rendering their own volume or by using the sample volumes provided for each of the effects. + +From a developper's point of view, it is possible to add new volume rendering effects by adding a python script that defines the parameters of the effect and implements modifications to the standard volume rendering shader. The GUI for the effect is automatically generated from the list of parameters. + +## Objective + +1. Fix pending bugs that appeared in the latest version of Slicer +2. Make it possible to load a sample volume for an effect without having previously enabled another loaded volume +3. Develop a new shader or modify the outline shader to be able to produce glass-like rendering of tissue boundaries in a volume + +## Approach and Plan + +1. The glass effect could be obtained by modifying the outline shader to modulate the transparency of the volume by the specular term of the phong model in regions where the volume gradient's amplitude is high. +2. It may be necessary to apply a gaussian filter to the volume before computing the gradient if the signal is too noisy to produce an interesting specular highlight. + +## Progress and Next Steps +1. Fixed minor bugs and inconsistencies in the interface update mechanism +1. Rearranged the interface to be more user-friendly +1. Updated the module to use the parameterNodeWrapper mechanism and simplify the interface update +1. Commits of the week: + 1. [rearrange interface and fix bugs](https://github.com/ETS-vis-interactive/SlicerPRISMRendering/commit/1167ad4e5105587c3e48b4f901ef43d3835ddbe6) + 1. [prepare code for parameterNodeWrapper](https://github.com/ETS-vis-interactive/SlicerPRISMRendering/commit/33a08784d190a4fe750a12e9bd43918214a2ab53) +1. Implemented a first version of the glass volume rendering effect (see illustration below) +1. Discussed an architecture for an experimental volume rendering module that would enable the implementation of more complex effects. + +# Illustrations +Current version of the glass effect +![Glass effect on brain vessels](glass-effect-on-brain-vessels.png) + + +# Background and References + +1.[PRISM at PW39 in Montreal](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/PrismVolumeRendererRefactoringAndBugFixing/) diff --git a/PW41_2024_MIT/Projects/PrismVolumeRenderer/glass-effect-on-brain-vessels.png b/PW41_2024_MIT/Projects/PrismVolumeRenderer/glass-effect-on-brain-vessels.png new file mode 100644 index 000000000..1ada04f2a Binary files /dev/null and b/PW41_2024_MIT/Projects/PrismVolumeRenderer/glass-effect-on-brain-vessels.png differ diff --git a/PW41_2024_MIT/Projects/QualityControlModelForBrainSurfaces/README.md b/PW41_2024_MIT/Projects/QualityControlModelForBrainSurfaces/README.md new file mode 100644 index 000000000..deeb3cc33 --- /dev/null +++ b/PW41_2024_MIT/Projects/QualityControlModelForBrainSurfaces/README.md @@ -0,0 +1,112 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Quality Control Model for Brain Surfaces +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Florian Davaux + affiliation: University of North Carolina + country: USA + +- name: Juan Carlos Prieto + affiliation: University of North Carolina + country: USA + +- name: Lucie Dole + affiliation: University of North Carolina + country: USA + +- name: Martin Styner + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + + +**ShapeAXI** is a shape analysis package that regroups many AI networks which use analysis via transformer networks or 2D convolutional neural networks. +This package is available on Pypi and has been developed using Python and MONAI framework. +The objective of ShapeAXI is to provide different architectures that can be used by anyone using his own data. + +One of this network, called **SaxiRing**, has been used on the Adolescent Brain Cognitive Development (ABCD) data as a quality control (QC) model. One of the outputs of this architecture is a visual explanation from the regions of an input image that are most influential for the model's decision. + +The project would be to create the extension of this QC model and the visualization on 3D Slicer. + + + +## Objective + + + + +1. Build and deploy the extension on 3D-slicer for the QC model and the visualization (GRAD-CAM) +3. The end result would be to have a new 3D Slicer extension ready to be used for anyone who wants to use the QC model on his own data + + + +## Approach and Plan + + + + +1. Create the extension into 3D-Slicer +2. Implement the Extension Logic (organise the code, develop the Logic Module, develop the User Interface (UI)) +3. Integrate the QC model +4. Integrate the GRAD-CAM +4. Distribute the extension + + + +## Progress and Next Steps + + + + +1. We are able to load the model (on Linux) +2. We are able to run the prediction over a direcotry of subjects (on Linux) + +## Video Demo + + + + + +Next steps : +1. Make sure that all preliminary steps have no issue +2. Start creating the extension +3. Thinking about the best UI to improve the accessibility + + + +# Illustrations + + + + +**QC Model Results** + +![QC_DATA_1_TO_1_test_prediction_norm_confusion](https://github.com/NA-MIC/ProjectWeek/assets/91245912/fba985f2-eaa3-4afc-b156-223ff5a90561) + +**Example of GRAD-CAM in 3D-Slicer** + +Screenshot 2024-06-14 at 10 12 44 + + + +# Background and References + + + + +- [ShapeAXI](https://github.com/FlorianDAVAUX/ShapeAXI) diff --git a/PW41_2024_MIT/Projects/QualityControlModelForBrainSurfaces/image.png b/PW41_2024_MIT/Projects/QualityControlModelForBrainSurfaces/image.png new file mode 100644 index 000000000..af011f241 Binary files /dev/null and b/PW41_2024_MIT/Projects/QualityControlModelForBrainSurfaces/image.png differ diff --git a/PW41_2024_MIT/Projects/QuickalignRefinement/README.md b/PW41_2024_MIT/Projects/QuickalignRefinement/README.md new file mode 100644 index 000000000..7d55bb6c6 --- /dev/null +++ b/PW41_2024_MIT/Projects/QuickalignRefinement/README.md @@ -0,0 +1,85 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: QuickAlign Refinement +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Sara Rolfe + affiliation: SCRI + country: USA + +- name: Murat Maga + affiliation: SCRI + country: USA + +--- + +# Project Description + + + + +QuickAlign is a SlicerMorph module used to temporarily fix alignment in two 3D viewers. The user aligns the view in each viewer and then applies the linking. If the nodes are Markups, then joint editing of the point lists can be enabled. We plan to make usability improvements. + + + +## Objective + + + + +1. Address of issue object size/zoom +2. Assess implementations for transforming multiple nodes in scene +3. Discuss whether this may be a useful feature outside SlicerMorph + + + + +## Approach and Plan + + + + +1. Develop plan for correcting for object size/zoom +3. Discuss alternate implementations using camera transforms + + + +## Progress and Next Steps + + + + +1. Implemented and tested alternative methods based on (a) camera event updates from camera instead of 3D view linking and (b) transforming the camera instead of the linked nodes. +2. Identified a more robust solution where camera positions are tracked and updated on interaction events applying a transform. This is likely the best solution if the feature will be integrated in the core and will support alignment of views with multiple nodes. Refer to Endoscopy and ViewPoint examples for camera update. +3. Identified a short-term solution for the SlicerMorph extension where an approximate zoom factor is calculated from the initial camera parameters and object position, then applied as a scaling factor to the aligned object node. The scale factor could be provided as a slider in the QuickAlign module so the zoom could be adjusted both jointly (in the scene) and independently (via the scaling factor). +4. [Zoom-factor solution](https://github.com/SlicerMorph/SlicerMorph/tree/QuickAlignRefinement) pushed to SlicerMorph repo for testing + + +# Illustrations + + + + +quickAlignedSkulls +unalignedSkulls + + + + + + +# Background and References + + + +[Endoscopy example](https://github.com/Slicer/Slicer/blob/main/Modules/Scripted/Endoscopy/Endoscopy.py#L1080) + +[ViewPoint example](https://github.com/SlicerIGT/SlicerIGT/blob/master/Viewpoint/Viewpoint.py) diff --git a/PW41_2024_MIT/Projects/README.md b/PW41_2024_MIT/Projects/README.md new file mode 100644 index 000000000..2cde287d5 --- /dev/null +++ b/PW41_2024_MIT/Projects/README.md @@ -0,0 +1,18 @@ +# How to create a new project + +- Post questions about the project idea and team on the [Project Week forum][forum], our communication mechanism as of PW28. +- When you are ready, add a new entry in the list of **Projects** by creating a new `README.md` file in subfolder in `Projects` folder, and copying contents of [project description template][project-description-template] file into it. Step-by-step instructions for this are: + +1. Open [project description template][project-description-template] and copy its full content to the clipboard + * If the link does not work (https issues) please try [here](https://github.com/NA-MIC/ProjectWeek/blob/master/PW41_2024_MIT/Projects/Template/README.md) +3. Go back to [Projects](https://github.com/NA-MIC/ProjectWeek/tree/master/PW41_2024_MIT/Projects) folder on GitHub +4. Click on "Create new file" button +5. Type `YourProjectName/README.md` + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** +6. Paste the previously copied content of project template page into your new `README.md` +7. Update at least your project's __title, category, key investigators, location, and project description sections__ +8. Create a [pull request](https://help.github.com/articles/creating-a-pull-request/) with the new page + + +[forum]: https://discourse.slicer.org/c/community/project-week +[project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW41_2024_MIT/Projects/Template/README.md diff --git a/PW41_2024_MIT/Projects/RiskPredictionDeepLearningModelsIntoMhubAi/README.md b/PW41_2024_MIT/Projects/RiskPredictionDeepLearningModelsIntoMhubAi/README.md new file mode 100644 index 000000000..5a8baccad --- /dev/null +++ b/PW41_2024_MIT/Projects/RiskPredictionDeepLearningModelsIntoMhubAi/README.md @@ -0,0 +1,93 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Risk Prediction Deep Learning Models into MHub.ai +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Ahmed Adly + affiliation: Harvard Medical School + country: USA + +- name: Leonard Nuernberg + affiliation: Harvard Medical School + country: USA + +- name: Hugo Aerts + affiliation: Harvard Medical School + country: USA + +--- + +# Project Description + + + + +[Mhub.ai](https://mhub.ai/) is a framework to enhance reproducible research by standardizing models into Mhub containers that could be flexible and effortless to use. +Therefore I aim to add two often used risk prediction models (Sybil and CVD-risk-estimator), to make it easy for the community to run such models through Mhub using a standardized way (by one simple command). + + + +## Objective + + + + +1. Getting more familiar with [Mhub.ai](https://mhub.ai/) framework, to keep pushing high quality models there for reproducible science. +2. Publishing risk prediction Models on [Mhub.ai](https://mhub.ai/) . + + + +## Approach and Plan + + + + +1. Attending MHub workshop held at PW, so that I grasp best practices. +2. Start with a basic hands on -> [Mhub.ai](https://mhub.ai/) converter from DICOM to NRRD. +3. Wrap the risk prediction models (Sybil / CVD-risk-estimator) for [Mhub.ai](https://mhub.ai/) Framework. +4. Run the models on data using [Mhub.ai](https://mhub.ai/) and Github, to compare the simplicity of the approach, efficiency (time and effort) and output. + + + +## Progress and Next Steps + + + +## Before PW + +1. Getting more familiar with [Mhub.ai](https://mhub.ai/) infrastructure and documentation. +2. Going through [Mhub.ai](https://mhub.ai/) tutorials. + + +## After PW + +1. Sybil - Cancer risk prediction Model - is wrapped in MHUB.ai Framerwork and pushed to [Mhub.ai](https://mhub.ai/). +2. CVD-Risk-Estimator - CVD risk model - is still ongoing, however at last stage. +3. Got More comfortable with [Mhub.ai](https://mhub.ai/) framework, and looking forward to add more models. + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- [Mhub](https://mhub.ai/) +- [Sybil-Cancer Prediction Model](https://github.com/reginabarzilaygroup/Sybil/tree/main) +- [CVD-Risk-Estimator](https://github.com/DIAL-RPI/CVD-Risk-Estimator/tree/master?tab=readme-ov-file) diff --git a/PW41_2024_MIT/Projects/SOFAUnityHapticModel/README.md b/PW41_2024_MIT/Projects/SOFAUnityHapticModel/README.md new file mode 100644 index 000000000..5bfdf43ad --- /dev/null +++ b/PW41_2024_MIT/Projects/SOFAUnityHapticModel/README.md @@ -0,0 +1,72 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Development of a haptic model for ultrasound-guided catheter insertion +category: IGT and Training +presenter_location: Online + +key_investigators: +- name: Naomi Catwell + affiliation: ÉTS + +- name: Simon Drouin + affiliation: ÉTS +--- + +# Project Description + +Integration of SOFA Framework into Unity in order to develop a haptics-enabled training simulator in virtual reality for intravenous catheter insertion using ultrasound guidance. + +The objective is to develop a realistic haptic model for both the catheter insertion and ultrasound manipulation using Unity and SOFA framework. + +## Objectives + + + +1. Become familiar with SOFA Framework for haptics +2. Integrate SOFA Framework into Unity using InfinyTech's plugin solution +3. Integrate usage of Haply's Inverse3 haptic robot through SOFA in Unity + +## Approach and Plan + + + +1. Complete the SOFA Training introduction course +2. Follow InfinyTech3D integration guide for Unity integration of Inverse3 +3. Ask the slicer-sofa project community or the SOFA Framework community for input if needed + +## Progress and Next Steps + + + +Progress +1. Completed SOFA training course for getting started in SOFA development +2. Equipped existing haptics project with InfinyTech3D's SOFA-Unity asset +3. Implemented test scenes in SOFA + +Next steps +1. Fix/understand reason for segfault error in collision test in SOFA +2. Fix error on SOFA scene loading into Unity +3. Integrate usage of Inverse3 haptic robot through SOFA in Unity and test out the haptics + +# Illustrations + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/35537740/f146afd7-081a-4c3c-b5dc-665243482a46) +![image](https://github.com/NA-MIC/ProjectWeek/assets/35537740/bb533c6c-ba67-4968-812c-96990d7f5540) + + +# Background and References + + + +- [https://www.sofa-framework.org/community/doc/](https://www.sofa-framework.org/community/doc/) +- [https://www.sofa-framework.org/community/doc/getting-started/video-tutorials/introduction-course/](https://www.sofa-framework.org/community/doc/getting-started/video-tutorials/introduction-course/) +- [https://infinytech3d.com/sapapi-unity3d/](https://infinytech3d.com/sapapi-unity3d/) diff --git a/PW41_2024_MIT/Projects/SegmentationVerificationModuleForFinalizingMultiLabelAiSegmentations/README.md b/PW41_2024_MIT/Projects/SegmentationVerificationModuleForFinalizingMultiLabelAiSegmentations/README.md new file mode 100644 index 000000000..cf8148f5b --- /dev/null +++ b/PW41_2024_MIT/Projects/SegmentationVerificationModuleForFinalizingMultiLabelAiSegmentations/README.md @@ -0,0 +1,87 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Segmentation verification module for finalizing multi-label AI segmentations +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: + +- name: Csaba Pintér + affiliation: EBATINCA + country: Spain + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +Creating multi-label segmentation models is a challenge, because the deep learning model will try to segment all the structures that it was trained on, even if some are missing. This issue is present for vertebra segmentation, but most predominantly teeth segmentation. The Dent.AI 3D Guide dental implant planning software contains a tool for fixing the most typical errors in a user friendly way. It would be great if this tool was published as an open-source Slicer module. + + + +## Objective + + + + +1. Create an open-source Slicer extension from the Segmentation Verification widget from the Dent.AI 3D Guide custom app + + + +## Approach and Plan + + + + +1. Discuss the current Segmentation Verification widget. Is the current design satisfactory for a more generic use? Is there any other issue that needs to be handled? +2. Take the existing Segmentation Verification widget from the custom app and create a Slicer module from it +3. Create an extension containing this module + + + + +## Progress and Next Steps + + + + +1. Discussion with the people interested (Andrey) +2. Create extension [SegmentationVerification](https://github.com/cpinter/SlicerSegmentationVerification) +3. Implementation is done +4. Next steps + - Integrate [Slicer PR](https://github.com/Slicer/Slicer/pull/7829), which is needed to use this new module + + +# Illustrations + + + +Video: +https://github.com/NA-MIC/ProjectWeek/assets/1325980/77379558-be9d-4c17-a1a9-a38f78384d4b + +GIF in case video does not show up: +![2024-06-28 16-20-54](https://github.com/NA-MIC/ProjectWeek/assets/1325980/8e631c07-9f4e-4975-95d4-9beced1bce49) + +![image](https://github.com/NA-MIC/ProjectWeek/assets/1325980/f341b4ab-08a2-4c9d-86b7-554ad7f85fd8) + + + + +# Background and References + + + + +- [DeepEdit paper by Andrés](https://scholar.google.com/citations?view_op=view_citation&hl=en&user=LbnADQ0AAAAJ&citation_for_view=LbnADQ0AAAAJ:ns9cj8rnVeAC) +- [Dent.AI 3D Guide software](https://www.youtube.com/watch?v=zs-0mZQLB48&ab_channel=DentAIMedicalImaging) diff --git a/PW41_2024_MIT/Projects/SelfSupervisedDepthEstimationSurgery/README.md b/PW41_2024_MIT/Projects/SelfSupervisedDepthEstimationSurgery/README.md new file mode 100644 index 000000000..d4db662f4 --- /dev/null +++ b/PW41_2024_MIT/Projects/SelfSupervisedDepthEstimationSurgery/README.md @@ -0,0 +1,68 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Methods for self-supervised depth estimation and motion estimation in colonoscopy under deformation +category: Quantification and Computation +presenter_location: In-person + +key_investigators: +- name: Megha Kalia + affiliation: Brigham and Women’s Hospital, Harvard Medical School + county: USA + +--- + +# Project Description + +Estimating Depth and localizing endoscope in surgical environment is critical in many tasks such as, intra-operative registration, augmented reality, surgical automation, among many others. Monocular self-supervised depth and pose estimation methods can estimate depth and camera pose without requiring labels. However, how do these methods perform in presence of deformation while endoscope moves through the lumen is not known. Therefore, through this project we want to evaluate the effect of addition of primarily two modules on depth and pose estimation accuracy. These modules are TransUnet and Optical Flow Module. Optical Flow can capture the image intensity changes in the scene because of deformation. And TransUnet can potentially capture the temporal correlations between the image frames to give better pose and depth predictions. For the project open sources dataset and github codes will be utilized. + + +## Objective + + +1. Objective A. To build and run and train the flowNet on colonoscopy dataset +1. Objective B. To integrate the flowNet module in the Monodepth2 framework +1. Objective C. To integrate and evaluate TransUnet blocks in Monodepth2 framework. + +## Approach and Plan + + +1. Run the Monodepth2 on the colonoscopy dataset. +1. Train the optical flow network on the colonoscopic dataset +1. + +## Progress and Next Steps + +1. Run the model on the colonoscopic dataset. +2. Self-supervised training with supervision from scale-invariant depth loss. +3. Hosting the model on Huggingface + +Next Steps: +creating a 3D mesh from generated depth values. + +# Illustrations + +Left : Ground Truth, Right : The 3D Depth prediction (Purple - Yellow : Farther - Close) + + + +HuggingFAce link: [https://huggingface.co/spaces/mkalia/DepthPoseEstimation](https://huggingface.co/spaces/mkalia/DepthPoseEstimation) + +Simple Upload and Predict + +upload_model + +depth_image_huggingface + + +# Background and References + +- [Dataset](http://cmic.cs.ucl.ac.uk/ColonoscopyDepth/Data/) +- [https://data.mendeley.com/datasets/cd2rtzm23r/1](https://data.mendeley.com/datasets/cd2rtzm23r/1) +- [Monodepth2](https://github.com/nianticlabs/monodepth2) +- [TansUnet](https://github.com/Beckschen/TransUNet/tree/main/networks) + +images_combined diff --git a/PW41_2024_MIT/Projects/SimpleEditorForPythonScripting/README.md b/PW41_2024_MIT/Projects/SimpleEditorForPythonScripting/README.md new file mode 100644 index 000000000..8a0659c31 --- /dev/null +++ b/PW41_2024_MIT/Projects/SimpleEditorForPythonScripting/README.md @@ -0,0 +1,102 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Simple Editor for Python Scripting +category: Infrastructure +presenter_location: Online + +key_investigators: + +- name: Oshane Thomas + affiliation: Seattle Children's Research Institute + country: USA + +- name: Murat Maga + affiliation: Seattle Children's Research Institute + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: Inc, USA + +- name: Sara Rolfe + affiliation: Seattle Children's Research Institute + country: USA + +--- + +# Project Description + + + + +A built-in simple text editor for python scripts has been discussed on the Slicer forum before. We would like to discuss design issues around this and hope to have a basic prototype (or two) by the end of PW. + + + +## Objective + + + + +1. Support python syntax highlighting +2. Direct highlighted code from the editor to python console (e.g., right click interaction) +3. Embed scripts into the scene. +4. Code autocompletion. + + + + + +## Approach and Plan + + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +No specific approach has been developed just yet. + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + +So far we have a brief web-based mock-up from Steve Pieper: +[https://github.com/pieper/SlicerEditor](https://github.com/pieper/SlicerEditor) + +We also have a slicer module mock-up from Oshane Thomas: + +[https://github.com/oothomas/SlicerMorph/tree/downloading/SlicerEditor](https://github.com/oothomas/SlicerMorph/tree/downloading/SlicerEditor) + +* This week we compared features and issues. Althought we prefered the simplest approach, the features of monaco editor (the core of VS code) convinced us to go with that approach. +* We developed a way to host monaco with out networking +* We determined a small subset of monaco that is small enough to distribute in an extension +* We tested with Sara and Chi and they like it! +* We will make a stand-alone extension that can be used by SlicerMorph and other use cases + + +# Illustrations + + + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18602669/363df0c2-9135-44e2-b215-c272d6af1dd1) + +![image](https://github.com/NA-MIC/ProjectWeek/assets/18602669/6b06a211-ce37-43db-a56d-64210185b576) + + + + +# Background and References + + + + +[https://discourse.slicer.org/t/support-python-text-highlighting-in-text-module/34511](https://discourse.slicer.org/t/support-python-text-highlighting-in-text-module/34511) diff --git a/PW41_2024_MIT/Projects/SkinSurfaceSegmentationForNousnav/README.md b/PW41_2024_MIT/Projects/SkinSurfaceSegmentationForNousnav/README.md new file mode 100644 index 000000000..db0165638 --- /dev/null +++ b/PW41_2024_MIT/Projects/SkinSurfaceSegmentationForNousnav/README.md @@ -0,0 +1,98 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Skin Surface Segmentation for NousNav +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Reuben Dorent + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA + +- name: Colin Galvin + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA + +- name: Sarah Frisken + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA + +- name: Sam Horvath + affiliation: Kitware + country: USA + +--- + +# Project Description + + + + +This project was initiated during the [PW36](https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/SkinSegmentation/). +It aims to create an automated skin segmentation tool for pre-operative scans for NousNav. + +A model has been already trained to automatically segment scans in multi-parametric MRI. In this project, we aim to integrate the developed tool into Slicer for further integration in NousNav. + + + + +## Objective + + + + +1. Create a Slicer Extension for automatic skin segmentation based on a pre-trained nnUnet framework +2. Test the Slicer Extension in different settings, including different OS and different hardware configuration (only CPU, GPU, Mac) +3. Integrate the Slicer module into NousNav + + + + +## Approach and Plan + + + + +1. Leverage the existing TotalSegmentator Slicer extension as template +2. Create a small database that can be shared to test the algorithm in different settings +3. Discuss with the NousNav development team for its integration + + + + +## Progress and Next Steps + + + + +1. The Slicer extension has been implemented and is publicly available [here](https://github.com/ReubenDo/SlicerSkinSegmentator). +2. The extension has been tested on CPU (MacOS and Windows) and GPU (Windows) +3. The integration in NousNav is still pending. + + +# Illustrations + + + + +image + + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/SlicerExtensionForShapeAnalysisForDentalApplicationsUsingShapeaxiModule/README.md b/PW41_2024_MIT/Projects/SlicerExtensionForShapeAnalysisForDentalApplicationsUsingShapeaxiModule/README.md new file mode 100644 index 000000000..dda9b4fe7 --- /dev/null +++ b/PW41_2024_MIT/Projects/SlicerExtensionForShapeAnalysisForDentalApplicationsUsingShapeaxiModule/README.md @@ -0,0 +1,125 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Slicer extension for shape analysis for dental applications using ShapeAXI module +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Lucie Dole + affiliation: University of North Carolina + country: USA + +- name: Florian Davaux + affiliation: University of North Carolina + country: USA + +- name: Claudia Trindade Mattos + affiliation: University of Michigan + country: USA + +- name: Selene Barone + affiliation: University of Michigan + country: USA + +- name: Felicia Miranda + affiliation: University of Michigan + country: USA + +- name: Marcela Lima Gurgel + affiliation: University of Michigan + country: USA + +- name: Juan Carlos Prieto + affiliation: University of North Carolina + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +--- + +# Project Description + + + + +ShapeAXI is a shape analysis package integrating different models for shape analysis. +ShapeAxi has demonstrated good performance on Cleft defects and to some extent condylar shape degeneration. We propose to create an extension of classification for different datasets. + + + +## Objective + + + + +- Build and deploy an extension for 3D-slicer for the classification of cleft defects, airway obstructions, and condyles degeneration +- The extension should be used by anyone including clinicians to help them in diagnosis with ready-to-use models +- Add Grad-CAM visualization to help clinicians understand the decision-making process and check if highlights are coherent with existing knowledge + + + + +## Approach and Plan + + + + +- Create and develop the slicer extension with a User Interface +- Integrate shapeAxi module and models +- Documentation and user guides +- Distribution of extension + + + + +## Progress and Next Steps + + + + +1. Progress +- Models have been trained for cleft and. condyles +- Newest models are being tested on airways (4 classes) +- A little work has been done on the extension +2. Next steps +- Make sure all preliminary steps have no issue +- Integrating shapeaxi package +- Testing of UI to improve accessibility + + + + + +# Illustrations + + +User Interface + + + +Condyles binary classification + +Screenshot 2024-06-19 at 3 06 19 PM + +Cleft severity classification + +Screenshot 2024-06-19 at 3 07 37 PM + + + + + +# Background and References + + + + +- [https://github.com/DCBIA-OrthoLab/ShapeAXI](https://github.com/DCBIA-OrthoLab/ShapeAXI) diff --git a/PW41_2024_MIT/Projects/SlicerExtensionForShapeAnalysisForDentalApplicationsUsingShapeaxiModule/user_interface.png b/PW41_2024_MIT/Projects/SlicerExtensionForShapeAnalysisForDentalApplicationsUsingShapeaxiModule/user_interface.png new file mode 100644 index 000000000..2dacca4c2 Binary files /dev/null and b/PW41_2024_MIT/Projects/SlicerExtensionForShapeAnalysisForDentalApplicationsUsingShapeaxiModule/user_interface.png differ diff --git a/PW41_2024_MIT/Projects/SlicerPluginForDetectionOfMotionArtifactOnT1WMri/README.md b/PW41_2024_MIT/Projects/SlicerPluginForDetectionOfMotionArtifactOnT1WMri/README.md new file mode 100644 index 000000000..54237c91f --- /dev/null +++ b/PW41_2024_MIT/Projects/SlicerPluginForDetectionOfMotionArtifactOnT1WMri/README.md @@ -0,0 +1,90 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Slicer Plugin for Detection of Motion Artifact on T1w MRI +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Charles Bricout + affiliation: ÉTS + country: Canada + +- name: Sylvain Bouix + affiliation: ÉTS + country: Canada + +- name: Owen Borders + affiliation: Massachusetts General Hospital + country: USA + +--- + +# Project Description + + + + +We want to create a Slicer module to detect and / or quantify motion artifacts in T1 weighted MRI. + + + +## Objective + + + + +1. Create a Slicer module to plug our models. +2. Connect our models with Slicer (retraining might be necessary). +3. Include an estimation of uncertainty. + + + +## Approach and Plan + + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. + + + + +## Progress and Next Steps + + + + +1. Review and improved the synthetic data generation pipeline + - Change magnitude of motion + - Change interpolation method +2. Create a Slicer Module to expose pretrained models + - Inference in 0.5 seconds on CPU + - Light preprocessing + - Display full histogram output + - Keep history of motion metrics for comparison between files +3. **Next Steps** : + - Ability to export history to CSV + - Connector with [Comet.ml](https://www.comet.com) to retrieve models associated with specific experiment from Slicer + +# Illustrations + + +![Capture d'écran 2024-06-28 091535](https://github.com/NA-MIC/ProjectWeek/assets/28633686/3ad44c02-c05e-40a3-9735-4083cc5fcfbb) + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/SlicerSofa/README.md b/PW41_2024_MIT/Projects/SlicerSofa/README.md new file mode 100644 index 000000000..9478eb972 --- /dev/null +++ b/PW41_2024_MIT/Projects/SlicerSofa/README.md @@ -0,0 +1,153 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 'Slicer-SOFA ' +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Rafael Palomar + affiliation: Oslo University Hospital / NTNU + country: Norway + +- name: Paul Baksic + affiliation: INRIA + country: France + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Naomi Catwell + affiliation: ETS Montreal + country: Canada + +- name: Chi Zhang + affiliation: Texas A&M School of Dentistry + country: USA + +- name: Ron Alkalay + affiliation: Beth Israel Deaconess Medical Center + country: USA + +- name: Quinn Williams + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + + +Slicer-SOFA was born during [PW40](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/SlicerSofaIntegration/) with the aim to support the use of the SOFA simulation library within 3D Slicer. This project will continue the development of the extension. + +slicer-sofa-logo + + + + + + + + +## Objective + + + + +1. Engage with the community to understand the needs and possibilities +2. Establish a roadmap for future development of the Slicer-SOFA extension + - Extension architecture to support simulation modules [RafaelPalomar/Slicer-SOFA#15](https://github.com/RafaelPalomar/Slicer-SOFA/issues/15) + - Distribution of third-party libraries [RafaelPalomar/Slicer-SOFA#14](https://github.com/RafaelPalomar/Slicer-SOFA/issues/14) + - ... + + + +## Approach and Plan + + + + +1. Meetings with the community, the SOFA and 3D Slicer developers. +2. Add new example modules +3. Add documentation +4. Bug fixes and extension release + + + +## Progress and Next Steps + + + +* Modifications towards the binary distribution of SlicerSOFA as an extension (rafaelpalomar/SlicerSOFA#22) by Jean-Christophe Fillion-Robin +* Tested Slicer-SOFA extension on mac os build + * Requires a local slicer build (tested on debug, see notes below for details) + * Issues + * Turning off Qt and OpenGL dependencies doesn't seem to work - it would be best to build just the core simulation and python wrapping. + * Building in release mode didn't recognize libaries + * Added a reset simulation functionality (rafaelpalomar/SlicerSOFA#23) by Quinn Williams + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ + +# Notes + +## Mac OS build + * Configure and make: + ``` + cmake \ + -DCMAKE_BUILD_TYPE:STRING=Debug \ + -DSlicer_DIR:PATH=${SLICER_BUILD} \ + -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.0 \ + ../Slicer-SOFA + + make -j50 |& tee log + ``` + * Launch: + ``` + export SLICERSOFA_DIR=/Users/pieper/slicer/latest/SOFA + export SLICER_DIR=/opt/s + SOFA_ROOT=${SLICERSOFA_DIR}/Slicer-SOFA-build/SOFA-build \ + ${SLICER_DIR}/Slicer-build/Slicer \ + --launcher-additional-settings ${DIR}/Slicer-SOFA-build/inner-build/AdditionalLauncherSettings.ini + ``` + * Then paste a [script like this](https://github.com/pieper/Slicer-SOFA/blob/main/Experiments/lung.py) into the python console. diff --git a/PW41_2024_MIT/Projects/SpinalMusculoskeletalModuleForComputingVertebralSpecificLoadingInDailyTasks/README.md b/PW41_2024_MIT/Projects/SpinalMusculoskeletalModuleForComputingVertebralSpecificLoadingInDailyTasks/README.md new file mode 100644 index 000000000..d9b05e521 --- /dev/null +++ b/PW41_2024_MIT/Projects/SpinalMusculoskeletalModuleForComputingVertebralSpecificLoadingInDailyTasks/README.md @@ -0,0 +1,142 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Spinal musculoskeletal module for computing vertebral-specific loading in daily tasks +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Ron Alkalay + affiliation: Beth Israel Deaconess Medical Center + country: US + +- name: Dennis Anderson + affiliation: Beth Israel Deaconess Medical Center + country: US + +- name: Vy Hong + affiliation: Technical University Munich + country: Germany + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Csaba Pinter + affiliation: Ebacinca, S.L. + country: Spain +--- + +# Project Description + + + + +Musculoskeletal models of the spine allow insight into the complex loading states experienced by the human spine that cannot be measured in human subjects noninvasively. We have previously established models for such analyses within the open-source modeling software OpenSim, as well as developing methods and experience in personalizing models to represent individual human subjects and patients using a variety of data. However, establishing personalized models from clinical imaging is complex and time-consuming, historically requiring manual segmentation of thoracic and abdominal vertebrae and spinal musculature using expensive commercial applications and custom scripting for data computation, curation, and assembling of model parameters. + +Over the last year, our group, in collaboration with members of the 3D Slicer community, has developed DL models for the segmentation of human thoracic and lumbar vertebrae and detailed segmentation of the torso and abdominal musculature in cancer patients. We have similarly ported our model creation, analysis, and and data management scripts to Python. We propose integrating these tools within the extension framework to enable the complete pipeline to assess spinal loading using our open-source spinal model in OpenSim. Having such an open-source model in 3d Slicer will significantly contribute to the scientific and clinical community for cancer patient research and to studying the effect of spinal loading on morbidity in elderly populations and surgical outcomes. + + + +## Objective + + + + +1. Create an open-source Slicer extension to integrate vertebrae and musculature DL segmentation models (TS, AutoSeg, in-house) and our group's Python-based data analysis and management scripts to allow the preparation of a spinal model for analysis in OpenSim. + +2. Discuss the possible integration of tools for running static and dynamic simulations and evaluating and presenting model results. + + + +## Approach and Plan + + + + +1. Discuss the current analysis and management scripts pipeline. Is it ready for integration? What parts are missing? Integration of the DL-based masks for generating data for the pipeline. + 1. The key gap for integration of DL segmentation data is translating DL-based masks into model-relevant information. + +2. What issues must be solved for this integration within the extension mechanism? Build an integration plan emphasizing a framework for modularity and code expansion. + +3. Possible avenues for slicer module(s) creation (the very ambitious version) + 1. Create Slicer module (s) from the Python scripts. + 2. Create a Slicer module to run-extract information from the DL models + 3. Create a Slicer module to assemble the data file for the OpenSim modeler + 4. Create an extension containing the modules. + +4. Discuss methods of results presentation. + + + +## Progress and Next Steps + + + +During project week we: +1. Created framework for saving 3D muscle and vertebral joint data as model creation information. +2. Troubleshooting and confirmed muscle measurements from DL masks matched expected results. +3. Established plan for defining OpenSim spine bodies and joints from segmentations + 1. Identifying endplates of vertebral bodies via clustering. + 2. Use centroids of both full vertebral and vertebral body only segmentations to evaluate local vertebral orientation + +Next steps: +1. Finalize downstream model creation code. +2. Integration - multiple DL models + several measurement and model creation scripts +3. Test on multiple input scans. +4. Enable model creation in Slicer with an extension that will display key measurements to be used, and possibly have a method for editing /correcting obvious errors. + + + +# Illustrations + + + + +![Picture1](https://github.com/NA-MIC/ProjectWeek/assets/49168951/81a8be1a-2648-49a7-ae30-c5043f56f677) + +Model creation for the analysis of personalized patient spinal loading predictions. + + + +# Background and References + + + + +• [Evaluation of Load-To-Strength Ratios in Metastatic Vertebrae and Comparison With Age- and Sex-Matched Healthy Individuals](https://www.frontiersin.org/articles/10.3389/fbioe.2022.866970/full) + +# Results + +## Intervertebral centroid calculation + +### 1. Seperate vertebral body from existing segmentation + +![Screenshot 2024-06-27 215559](https://github.com/VyHong/ProjectWeek/assets/67245730/af1ea7cd-4eab-43ef-8fd9-945065a09774) +![Screenshot 2024-06-27 215504](https://github.com/VyHong/ProjectWeek/assets/67245730/c3dc9282-38e0-486d-ba76-9be863ada80f) + +### 2. Convert volume to surface +![Screenshot 2024-06-27 223435](https://github.com/VyHong/ProjectWeek/assets/67245730/9d717ed2-cf1f-47e4-9e73-6fe34f40eb7a) + +### 3. Cluster surface points to determine vertebral endplates + + + + + +### 4. Calculate convex hull centroid between 2 endplates + +![Screenshot 2024-06-27 234732](https://github.com/VyHong/ProjectWeek/assets/67245730/2c0d1065-1b19-4517-9b6a-542a10916cd6) diff --git a/PW41_2024_MIT/Projects/SusceptibilityDistortionCorrectionInDiffusionMriDataset/README.md b/PW41_2024_MIT/Projects/SusceptibilityDistortionCorrectionInDiffusionMriDataset/README.md new file mode 100644 index 000000000..97a5e14a1 --- /dev/null +++ b/PW41_2024_MIT/Projects/SusceptibilityDistortionCorrectionInDiffusionMriDataset/README.md @@ -0,0 +1,82 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: 'Susceptibility distortion correction in diffusion MRI dataset ' +category: Quantification and Computation +presenter_location: Online + +key_investigators: + +- name: Sedigheh Dargahi + affiliation: 'École de technologie supérieure ' + country: Canada + +- name: Sylvain Bouix + affiliation: 'École de technologie supérieure ' + country: Canada + +--- + +# Project Description + + + + +The aim of this project is to mitigate susceptibility distortions in dMRI dataset by offering a deep learning solution as an alternative of traditional state-of-the-arts methods, only using a single blip-up or blip-down image. + + + +## Objective + + + + +1. Objective A. Simplifying dMRI acquisition protocols by utilizing just one of the blip-up or blip-down image as input for the model. This approach can reduce scan time, potentially halving the duration of dMRI acquisitions. + +2. Objective B. Facilitating more precise analysis and promoting advancements in our understanding of brain by equipping researchers with more efficient and accurate tools. + + + + +## Approach and Plan + + + + +No response + + + + +## Progress and Next Steps + + + + +I successfully debugged the initial error in the extension's Python code. However, there remains an unresolved issue with an error, undefined function in my model. I am currently working on resolving this problem. +Next Steps: + 1. Address the undefined function issue. + 2. Test the built extension. + + + + +# Illustrations + + + + +No response + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/T1WMriArtifactRemoval/README.md b/PW41_2024_MIT/Projects/T1WMriArtifactRemoval/README.md new file mode 100644 index 000000000..36476c300 --- /dev/null +++ b/PW41_2024_MIT/Projects/T1WMriArtifactRemoval/README.md @@ -0,0 +1,78 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: T1w MRI Artifact Removal +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Owen Borders + affiliation: Massachusetts General Hospital + country: USA + +- name: Sylvain Bouix + affiliation: ETS + country: Canada + +- name: Charles Bricout + affiliation: ETS + country: Canada + +--- + +# Project Description + + + + +I will train a Generative Convolutional Neural Network to remove artifacts (motion, ghosting, metal artifacts, etc.) from T1w MRI images. + + + +## Objective + + + + +1. In the end, I plan to have a trained deep learning model and statistics on its performance. + + + +## Approach and Plan + + + + +1. I will write a Python program to generate artificial artifacts on TW1 MRI images. I will then train a CNN with the images with artificial artifacts as the input, and the original images without the artifacts as the output. Its performance will be evaluated on real data. + + + +## Progress and Next Steps + + + + +_No response_ + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/Template/README.md b/PW41_2024_MIT/Projects/Template/README.md new file mode 100644 index 000000000..d8dd883a4 --- /dev/null +++ b/PW41_2024_MIT/Projects/Template/README.md @@ -0,0 +1,58 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Write full project title here +category: Uncategorized +presenter_location: Online + +key_investigators: +- name: Person Doe + affiliation: University + +- name: Person2 Doe2 + affiliation: University2 + country: Spain +--- + +# Project Description + + + +## Objective + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW41_2024_MIT/Projects/Template/README.md.j2 b/PW41_2024_MIT/Projects/Template/README.md.j2 new file mode 100644 index 000000000..1c5d55905 --- /dev/null +++ b/PW41_2024_MIT/Projects/Template/README.md.j2 @@ -0,0 +1,56 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: {{ title | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +category: {{ category | regex_replace("\n$|\n\.\.\.\n$", "") }} +presenter_location: {{ presenter_location | regex_replace("\n$|\n\.\.\.\n$", "") }} + +key_investigators: +{% for investigator in investigators %} +- name: {{ investigator.name | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} + affiliation: {{ investigator.affiliation | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- if investigator.country %} + country: {{ investigator.country | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- endif %} +{% endfor %} +--- + +# Project Description + + + +{{ description }} + +## Objective + + + +{{ objective }} + +## Approach and Plan + + + +{{ approach }} + +## Progress and Next Steps + + + +{{ progress }} + +# Illustrations + + + +{{ illustrations }} + +# Background and References + + + +{{ background }} diff --git "a/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/Capture d\342\200\231e\314\201cran 2024-06-28 a\314\200 02.40.17.png" "b/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/Capture d\342\200\231e\314\201cran 2024-06-28 a\314\200 02.40.17.png" new file mode 100644 index 000000000..36c821bf2 Binary files /dev/null and "b/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/Capture d\342\200\231e\314\201cran 2024-06-28 a\314\200 02.40.17.png" differ diff --git a/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/README.md b/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/README.md new file mode 100644 index 000000000..ae6fcdfb5 --- /dev/null +++ b/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/README.md @@ -0,0 +1,92 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Importing Labelled Sulci from Morphologist Pipeline (Brainvisa). Creating a new 3D Slicer terminologie +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Sara Fernandez Vidal + affiliation: ICM Paris Brain Institute + +- name: Valerio Frazzini + affiliation: ICM Paris Brain Institute & APHP Pitié Salpêtrière Hospital + +--- + +# Project Description + + + + +We are rewritting the entire epiSTIM toolbox (for [SEEG procedures](https://en.wikipedia.org/wiki/Stereoelectroencephalography)) as a 3D Slicer Extension and adding additional features requested by the clinicians. One of them is the import of labelled sulcis from T1 Morphologist (in Brainvisa Toolbox). To procced, first we need to create a Terminologie for the sulci (and a cbtl? ) + + + + + +## Objective + + + + +1. Create the new terminology for sulci +2. Import the sulci Segmentation (from BrainVisa T1 Morphologist Pipeline ) associating a new Teminology + + + + +## Approach and Plan + + + + +1. Find a official or Standard Terminology/Ontology of brain sulci +2. Create the 3D Slicer Terminology +3. Associate the Terminology to the objects imported from T1 Morphologist + + + + +## Progress and Next Steps + + + + +1. Import of anatomical labelled sulci ok. +2. Terminology : work in progress ! + + + + +# Illustrations + + + +![Data Model epiSTIM](epiSTIMDataModelWithLabelledSulciFromMorphologist.png) + +![nomenclature_translation](nomenclature_translation.png) + +# Background and References + + +[Morphologist Pipeline](https://brainvisa.info/web/morphologist.html) + +[BrainInfo Ontology API](http://braininfo.rprc.washington.edu/nnont.aspx) + +[TA Viewer](https://ta2viewer.openanatomy.org/?id=3932) + +[How to create 3D Slicer Terminologies](https://github.com/lassoan/SlicerMONAIAuto3DSeg/blob/main/UsingStandardTerminology.md) + +[SNOMED-CT, DICOM use it](https://browser.ihtsdotools.org/?perspective=full&conceptId1=279348008&edition=MAIN/2024-06-01&release=&languages=en) + +[UBERON (multi especes ontology)](https://obophenotype.github.io/uberon/current_release/) + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/epiSTIMDataModelWithLabelledSulciFromMorphologist.png b/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/epiSTIMDataModelWithLabelledSulciFromMorphologist.png new file mode 100644 index 000000000..36c821bf2 Binary files /dev/null and b/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/epiSTIMDataModelWithLabelledSulciFromMorphologist.png differ diff --git a/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/nomenclature_translation.png b/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/nomenclature_translation.png new file mode 100644 index 000000000..eac4280d2 Binary files /dev/null and b/PW41_2024_MIT/Projects/TerminologiesForSulciAndBasalGangliaEpistim3DSlicerExtension/nomenclature_translation.png differ diff --git a/PW41_2024_MIT/Projects/UltrasoundExtension/README.md b/PW41_2024_MIT/Projects/UltrasoundExtension/README.md new file mode 100644 index 000000000..22e908342 --- /dev/null +++ b/PW41_2024_MIT/Projects/UltrasoundExtension/README.md @@ -0,0 +1,92 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Ultrasound extension +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Tamas Ungi + affiliation: Queen's University + country: Canada + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: USA + +- name: Amene Asgari + affiliation: Brigham and Women's Hospital + country: USA + +- name: Mike Jin + affiliation: Centaur Labs + country: USA + +--- + +# Project Description + + + + +The Ultrasound extension for Slicer contains modules for ultrasound video data processing. + + + +## Objective + + + + +1. Standardize format of anonymized DICOM exporter by the anonymizer module. +2. Update guide and tutorials for all modules. +3. Plan future module for annotation. + + + + +## Approach and Plan + + + + +1. Talk with people who use ultrasound in DICOM format. +2. Talk with people who need to anonymize and/or annotate ultrasound video data. + + + + +## Progress and Next Steps + + + + +1. Tested anonymizer module on a new dataset. +2. Added Time Series Annotation module documentation and screenshot. +3. Working on NeedleGuide module (custom UI for interventions), WIP code available but not packaged in extension yet. + + +# Illustrations + + +![2024-04-14_AnonymizeUltrasound_s](https://github.com/NA-MIC/ProjectWeek/assets/2071850/54294863-0a96-4a32-9e57-1a7c52e3db93) + +![2024-04-14_MmodeAnalysis_s](https://github.com/NA-MIC/ProjectWeek/assets/2071850/227b6f82-4e45-4767-9d19-c73a8bfc592b) + +![TimeSeriesAnnotation_2024-06-27.png](https://raw.githubusercontent.com/ungi/SlicerUltrasound/b4c3fdea3025d2891f849a9061a89ca8cbb30b99/Screenshots/TimeSeriesAnnotation_2024-06-27.png) + +_No response_ + + + +# Background and References + + + + +[Source code of the extension](https://github.com/SlicerUltrasound/SlicerUltrasound) diff --git a/PW41_2024_MIT/Projects/UpdatedIconsAndThemeSwitching/README.md b/PW41_2024_MIT/Projects/UpdatedIconsAndThemeSwitching/README.md new file mode 100644 index 000000000..83f9262f0 --- /dev/null +++ b/PW41_2024_MIT/Projects/UpdatedIconsAndThemeSwitching/README.md @@ -0,0 +1,111 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Updated Icons and Theme switching +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Wendy Plesniak + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +We will be working on integrating new icons, QSS support, and better dynamic theme switching + + + +## Objective + + + + +1. Get a working light /dark theme switch for icon sets +2. Use the newly developed icon set in Slicer +3. Mov towards using QSS for styling + + + + +## Approach and Plan + + + + +1. Investigate current methods for swapping out light and dark themes for icons. +2. Work on putting together a QSS style implementation for the current Slicer Light / Dark themes +3. Investigate methods of orverrding the existing icon set +4. Consolidate and icons and remove non-existent icons + + + + +## Progress and Next Steps + + + + +1. Icon swap approach: External resource binaries + - Refactor out current resources into an external resource file + - [Dynamically Loaded Resources](https://doc.qt.io/qt-6/qresource.html#dynamic-resource-loading) + - Load dark vs. light resources at startup + - Pros + - Minimal changes to the Slicer core code (still using the resource system) + - Cons + - Icons can only be refreshed with a restart + - Build system will need to generate the rcc file instead of compiling in the resources + - Will need to add calls to register custom resources for modules and main application +2. Icon swap progress + - Created standalone example in [test extension](https://github.com/sjh26/SlicerIconSwitch) + - Created [branch](https://github.com/Slicer/Slicer/tree/icon-switching) for Slicer with WIP implementations for bundling external resources + - Setup external resources for loadable modules and libraries + +3. Next steps + - Work on swapping bundled resources for Python (or an icon picker approach in code) + - Work on bugs with existing implementation (timing of loading resources for loadable modules, using SVGs as mouse cursors, Transform icon is being stubborn) + - Review approach with Slicer dev team, mainly when / where the external resources are loaded at runtime, and where they are stored on disk + +# Illustrations + + +## Example of New icons +![image](https://raw.githubusercontent.com/Slicer/slicer-media-assets/main/SlicerIcons/SlicerSVG/SeparateStyles/LightThemeIcons/SpatialProbes/SlicerSlicePlanesOptions.svg) +![image](https://raw.githubusercontent.com/Slicer/slicer-media-assets/main/SlicerIcons/SlicerSVG/SeparateStyles/LightThemeIcons/Modules/SegmentEditorModule.svg) +![image](https://raw.githubusercontent.com/Slicer/slicer-media-assets/main/SlicerIcons/SlicerSVG/SeparateStyles/LightThemeIcons/Modules/WelcomeModule.svg) + +## Switching with external resources in test extension + +### Slicer Dark: +![SlicerDarkUpdated](https://github.com/NA-MIC/ProjectWeek/assets/25040869/12767317-b88c-4340-8652-4b919d2da814) + +![DarkToolBar](https://github.com/NA-MIC/ProjectWeek/assets/25040869/ea3b494a-d07e-40ca-a47c-ad4139380551) + + +### Slicer Light: +![SlicerLightUpdated](https://github.com/NA-MIC/ProjectWeek/assets/25040869/a13bea8b-5e7a-4da0-af67-02849cbe810e) + +![LightToolBar](https://github.com/NA-MIC/ProjectWeek/assets/25040869/fe36d249-a489-4a45-bfbf-5bd21c1183c2) + + +# Background and References + + +- [Icon design document](https://docs.google.com/document/d/1OYhRzBFjwT6dUOIDVL_II8ZQ8QUwDl68wbtt3eIV1ao/edit?usp=sharing) +- [Asset repo](https://github.com/Slicer/slicer-media-assets) +- [Testing extension](https://github.com/sjh26/SlicerIconSwitch) +- [WIP Slicer branch](https://github.com/Slicer/Slicer/tree/icon-switching) diff --git a/PW41_2024_MIT/Projects/UsingStatisticalShapeModelingForImproving3DReconstructionOfFetalUltrasoundImages/README.md b/PW41_2024_MIT/Projects/UsingStatisticalShapeModelingForImproving3DReconstructionOfFetalUltrasoundImages/README.md new file mode 100644 index 000000000..7885f08d2 --- /dev/null +++ b/PW41_2024_MIT/Projects/UsingStatisticalShapeModelingForImproving3DReconstructionOfFetalUltrasoundImages/README.md @@ -0,0 +1,92 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Using statistical shape modeling for improving 3D reconstruction of fetal ultrasound + images +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Chi Zhang + affiliation: Texas A&M University School of Dentistry + +- name: Emet Schneiderman + affiliation: Texas A&M University School of Dentistry + +- name: Preetam Schramm + affiliation: Texas A&M University School of Dentistry + +- name: Zohre German + affiliation: Texas A&M University School of Dentistry + +- name: Ju-Ying Lin + affiliation: Texas A&M University School of Dentistry + +--- + +# Project Description + + + + +We are initiating a multi-center (led by Dr. Emet Schneiderman) study to understand the impact of maternal obstructive sleep apnea (OSA) and different treatment plans (i.e., mouth appliances) on both pregnant women and fetuses, including fetal craniofacial dysmorphology and growth issues. A large sample size of ultrasound images will be collected. However, the fetuses may have different poses and partial face covered, and the ultrasonographic images have lots of noise. We are interested in using statistical modeling to refine 3D segmentation to diagnose fetal orofacial dysmorphology, reconstruct 3D growth trajectories, explore epigenetic effects of facial growth problems and dysmorphology, predict newborn facial shapes, etc. + + + +## Objective + + + + +1. Use statistical shape modeling tools or other tools from Slicer and Kitware to refine 3D reconstruct of fetal faces based on ultrasonography. Aesthetics is also important because part of the goal is to make parents aware of the sleep treatment. + + + +## Approach and Plan + + + + +1. Segmentation of noisy US images +2. Learn and explore techniques in statistical shape modeling and ultrasound image processing using 3D slicer and other tools developed by Kitware; set up a plan. +3. Locate sample data for experimenting. +4. Develop potential collaborations + + + + +## Progress and Next Steps + + + + +1. Preparing grant application and data collection plans +2. See above + +## PW Progress +1. Potential collaborators: Haichong Zhang and Xihan Ma from WPI +2. Find fetal facial features related to maternal sleep apnea that can only be quantified by 3D rather than standard 2D US imaging. + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +The most recent study is [Alomar et al. 2022]https://www.sciencedirect.com/science/article/pii/S0169260722002759 using new born babies to build a statistical model for US refinement. + +image diff --git a/PW41_2024_MIT/Projects/VirtualPlatePlacementForOrbitalSurgery/README.md b/PW41_2024_MIT/Projects/VirtualPlatePlacementForOrbitalSurgery/README.md new file mode 100644 index 000000000..66fc3a262 --- /dev/null +++ b/PW41_2024_MIT/Projects/VirtualPlatePlacementForOrbitalSurgery/README.md @@ -0,0 +1,144 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Virtual plate placement for orbital surgery +category: IGT and Training +presenter_location: In-person + +key_investigators: + +- name: Chi Zhang + affiliation: Texas A&M University School of Dentistry + country: USA + +- name: Kyle Sunderland + affiliation: Queen's University, Canada + +- name: Rafael Palomar + affiliation: Oslo University Hospital / NTNU, Norway + +- name: Braedon Gunn + affiliation: Texas A&M University School of Dentistry + country: USA + +- name: Andrew Read-Fuller + affiliation: Texas A&M University School of Dentistry + country: USA + +--- + +# Project Description + + + + +We are developing a Slicer module to simulating registering titanium plate to orbital fracture sites to aid surgical planning and investigating the fitness of different commercial preformed plates across a large sample of patients. + + + + + +## Objective + + + + +Make a module to allow the plate to properly sits above the bone at the orbital fracture site + + +image + + + + + +## Approach and Plan + + + + +See below. + + + +## Progress and Next Steps + + + + +I have complied existing methods in Slicer and VTK into one preliminary module [https://github.com/chz31/surgical_plate_registration](https://github.com/chz31/surgical_plate_registration) to do the semi-automatic registration: + +image + +1. An initial fiducial registration and a refined registration based on only allowing the plate to rotate while pivoting on the posterior stop, an important landmark for place the nail. + +**2. Use `VTKCollisionDetector()` and intersect marker to detect collision and mark the intersection** +image + +**3. Use `Probe Volume with Model ` to paint both the orbit and the plate to mark the overlapping areas.** +image + +**4. Use the new Interaction Transform Handle to manually fine tune the position** +image + +**7. Update new intersection and overlapping areas until they ar minimized.** +image + + +**Next steps:** +1. Improving the use of the Interaction Transform Handle and intersection marker to more efficiently adjust plate position so that it sits just above the bone. Converting transform matrix into standard descriptor: yaw, roll, and pitch. +image +(from https://doi.org/10.1371/journal.pone.0150162) + +3. Design measurements to quantify plate fitness. Comparing the shape of the plate with the unfractured orbit. + +4. Automated segmentation for segmenting fractured orbit. + +5. Explore methods such as reinforcement learning and SOFA for actual simulating how the plate is placed. + + +## PW Progress +Being able to use interaction handle to rotate the plate and real-time update colomap to highlight intersection and report intersection. This can provide visual aid for manual adjustment of plate position + +image image +image + +# Video demo: + + +https://youtu.be/4no8vEyKo5s + + +## Next steps +1. Refine color-map and details for visual aid +2. Working with Rafael for using Slicer-SOFA: + - Deforming the plate: + - Simulating plate failure (e.g. the bone area for screwing the plate damaged and plate became loose; adapting/bending poor fitted plate introduces metal fatigue) + - Including soft-tissue +3. Segmentation of fractured orbit? + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + +The repo for the current module: [surgical_plate_registration](https://github.com/chz31/surgical_plate_registration) +Other studies using commercial software iPlan from BrainLab, which should still be based on manual adjustment: [Schreur et al. 2017](https://doi.org/10.1371/journal.pone.0150162) and [Schreur et al. 2021](https://doi.org/10.1016/j.cxom.2020.10.003) diff --git a/PW41_2024_MIT/Projects/VisualEvaluationAnd3DInspectionOfCorticalSurfaceReconstructionsFromMriUsing3DSlicer/README.md b/PW41_2024_MIT/Projects/VisualEvaluationAnd3DInspectionOfCorticalSurfaceReconstructionsFromMriUsing3DSlicer/README.md new file mode 100644 index 000000000..30129b1e6 --- /dev/null +++ b/PW41_2024_MIT/Projects/VisualEvaluationAnd3DInspectionOfCorticalSurfaceReconstructionsFromMriUsing3DSlicer/README.md @@ -0,0 +1,100 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Visual Evaluation and 3D Inspection of Cortical Surface Reconstructions from MRI Using + 3D Slicer +category: Segmentation / Classification / Landmarking +presenter_location: Online + +key_investigators: + +- name: Kaveh Moradkhani + affiliation: ÉTS + country: Canada + +- name: Sylvain Bouix + affiliation: ÉTS + country: Canada + +- name: Jarrett Rushmore + affiliation: Boston University + country: USA + +--- + +# Project Description + + + + +We will visually evaluate and inspect the outputs obtained from the cortical surface reconstructions generated by our designed model. We will ensure the accuracy and correctness of these reconstructions, focusing on white matter and pial surfaces to assess their structural integrity and reliability. + + + +## Objective + + + + +1. Objective A: Visually evaluate cortical surface reconstructions from MRI images. Focus on assessing white matter and pial surfaces in .stl and .ply formats to ensure high-quality reconstructions. +2. Objective B: Visualize and inspect these reconstructions using 3D Slicer. Examine surface topology and detect self-intersections to ensure structural integrity and accuracy. + + + +## Approach and Plan + + + + +_No response_ + + + +## Progress and Next Steps + +Progress + +During the NA-MIC Project Weeks, significant progress was made in the following areas: + + +**1- Neuroanatomy and Neurosegmentation Overview:** Attended sessions led by Professor Jarrett Rushmore on neuroanatomy, neurosegmentation, and the evaluation of brain structures. + +**2- Freesurfer Outputs Examination:** Analyzed the outputs of Freesurfer, identifying inaccuracies, such as regions mistakenly classified as cortical surface, like the hippocampus, and parts including gray matter instead of cortex. + +**3- Sinus Examination in MRA Images:** Investigated the presence and structure of sinuses in MRA images. + +**4- Model Comparison:** Compared the outputs of our model against those from Freesurfer and Vox2Cortex models, identifying strengths and weaknesses. Notably, Fastsurfer was found to be inferior to Freesurfer in certain cases. + +**5- Model Evaluation and Problem Identification:** Evaluated the outputs of our cortical surface reconstruction model at critical points, identifying various issues and areas for improvement. + +**6- 3D Slicer Extension Development:** Developed approximately 70% of the extension for 3D Slicer to support cortical surface reconstruction tasks. + +Next Steps + +Moving forward, the following steps will be undertaken: + + +**1- Problem Resolution:** Address the issues identified during the visual evaluation of the model’s outputs, ensuring higher accuracy and structural integrity. + +**2- Extension Completion:** Finalize the development of the 3D Slicer extension to fully support the cortical surface reconstruction workflow. + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW41_2024_MIT/Projects/VisualModelForRespiratoryMotionOfAirwaysWithSofaSlicer/README.md b/PW41_2024_MIT/Projects/VisualModelForRespiratoryMotionOfAirwaysWithSofaSlicer/README.md new file mode 100644 index 000000000..a0867ebad --- /dev/null +++ b/PW41_2024_MIT/Projects/VisualModelForRespiratoryMotionOfAirwaysWithSofaSlicer/README.md @@ -0,0 +1,115 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: Visual Model for Respiratory Motion of Airways with SOFA-Slicer +category: VR/AR and Rendering +presenter_location: In-person + +key_investigators: + +- name: Quinn Williams + affiliation: Brigham and Women's Hospital + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Rafael Palomar + affiliation: Oslo University Hospital / NTNU + country: Norway + +--- + +# Project Description + + + + +Integration of a softbody physics simulation of lung movement during breathing in SOFA into slicer. It will be used as a virtual reference for bronchoscopy alongside streamed camera data. + + + +## Objective + + + + +1. Integrate current simulation into the slicer with the slicer-SOFA extension +2. Refine the respiratory motion +3. Create UI elements to control breathing parameters +4. Sync camera position data with a virtual camera in the airways +5. Define a standard pipeline from segmentation to simulation for further bronchoscopy procedures + + + + +## Approach and Plan + + + + +1. Talk with slicer-sofa devs to understand the communication with sofa and slicer +2. Talk with SOFA devs for advice on lung physics in SOFA +3. Script the UI elements for the simulation +4. Figure out whether creating a virtual camera is benificial +5. Create a write-up on the process used to create the simulation and how to recreate it + + + + +## Progress and Next Steps + + + + +1. Created a module for slicerSOFA extension +2. Created scene for lung simulation with lung vtk mesh +3. Created UI elements to control simulation parameters(breathing force, period, youngs modulus) +4. Updated collision parameters for ribs + +Next steps: +1. fix rib collisions +2. allow rib movement during breathing +3. fix simulation caching +4. create a transformation for the airway segmentation based on the lung deformation + + + + +# Illustrations + + + +Current Lung Simulation in Slicer + + + + + + + + +Old Version in SOFA: + +![Current airway movement simulated with SOFA](https://github.com/NA-MIC/ProjectWeek/assets/63506358/2e408192-19b0-477f-8939-5a102cd10cff) + + + + +# Background and References + + +- Lung Simulation Module for slicerSOFA + - https://github.com/Quilliams85/Lung-Simulation-Module-SlicerSOFA.git + +- Old airway sofa simulation + - [https://github.com/Quilliams85/Lung-Simulation-SNR-Lab.git](https://github.com/Quilliams85/Lung-Simulation-SNR-Lab.git) diff --git a/PW41_2024_MIT/Projects/VolumeaxiVolumeAnalysisExplanabilityAndInterpretabilityOnCbct/README.md b/PW41_2024_MIT/Projects/VolumeaxiVolumeAnalysisExplanabilityAndInterpretabilityOnCbct/README.md new file mode 100644 index 000000000..c3435e85b --- /dev/null +++ b/PW41_2024_MIT/Projects/VolumeaxiVolumeAnalysisExplanabilityAndInterpretabilityOnCbct/README.md @@ -0,0 +1,114 @@ +--- +layout: pw41-project + +permalink: /:path/ + +project_title: VolumeAXI - Volume Analysis, Explanability and Interpretability on CBCT +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Jeanne Claret + affiliation: University of Michigan + country: USA + +- name: Gaëlle Leroux + affiliation: University of Michigan + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: Claudia Mattos + affiliation: University of Michigan + country: USA + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + +This project aims to develop interpretable deep learning models for the automated classification of impacted maxillary canines and assessment of dental root resorption in adjacent teeth using Cone-Beam Computed Tomography (CBCT). Impacted maxillary canines (IC) are a common clinical problem that can lead to complications if not diagnosed and treated early. We propose to develop a 3D slicer module, called Volume Analysis, eXplainability and Interpretability (Volume-AXI), with the goal of providing users an explainable approach for classification of bone and teeth structural defects in CBCT scans gray-level images. We test various deep learning models based on Monai Convolutional Neural Network (CNN) architectures to classify impacted maxillary canine position and detect root resorption. Gradient-weighted Class Activation Mapping (Grad-CAM) has already been integrated to generate visual explanations of the CNN predictions, enhancing interpretability and trustworthiness for clinical adoption. + + + +## Objective + + + + +1. Classify tooth position within the bone using the Monai Densenet 121 and 201. +2. Enhance Explainability and Interpretability of the Classification by generating salience maps using Monai GradCAM +3. Create the VolumeAXI 3D Slicer module and deploy the model as part toe the Slicer automated Dental tools extension + + + + +## Approach and Plan + + + + +1. Data Preparation and Pre-processing +2. Model Development and Training: Explore and select appropriate neural network architectures (e.g., ResNet, SENets, DenseNet) for image classification and feature visualization. +3. Explainability and Visualization Techniques: Implement methods to make AI decisions transparent and understandable such as Grad-CAM. +4. Validation and Testing +5. Documentation and Training: Create comprehensive documentation and user guides explaining the functionality and benefits of the AI tools. + + + + + +## Progress and Next Steps + + + +1. Trained models with DenseNet architecture to classify the buccolingual position of the impacted maxillary canine. +2. Implementation of GRAD-CAM with MONAI for visualization + +Project Week Update: +1. Evaluated various root resorption assessment techniques, concluding that alternative methods are required for optimal results. +2. Conducted additional experiments on position classification to enhance current performance metrics. +3. Initiated deployment of the VolumeAXI module. + +Next Steps: +1. Change pipeline direction to classify root resorption. +2. Find the best hyper-parameters for the given applications to improve the results. +3. Finish the module by implementing the interpretability. +4. Clean and organise the code. +5. Write the documentation and provide examples to use the code. + + + +# Illustrations + + +### 3D Slicer Interface of VolumeAXI + +Screenshot 2024-06-28 at 9 33 30 AM + + +#### Well predicted case + + +![Position_grouped](https://github.com/NA-MIC/ProjectWeek/assets/91120559/46528c60-eb97-4011-953a-7d03f9671fbb) + + + + + +# Background and References + + + + +[VolumeAXI repository](https://github.com/Jeanneclre/VolumeAXI) diff --git a/PW41_2024_MIT/Projects/image.png b/PW41_2024_MIT/Projects/image.png new file mode 100644 index 000000000..af011f241 Binary files /dev/null and b/PW41_2024_MIT/Projects/image.png differ diff --git a/PW41_2024_MIT/README.md b/PW41_2024_MIT/README.md new file mode 100644 index 000000000..9480fc8de --- /dev/null +++ b/PW41_2024_MIT/README.md @@ -0,0 +1,194 @@ +--- +permalink: /:path/ +redirect_from: +- /PW41_2024_Boston/README.html +- /PW41_2024_Boston/Readme.html + +project_categories: +- IGT and Training +- DICOM +- VR/AR and Rendering +- Segmentation / Classification / Landmarking +- Quantification and Computation +- Registration +- Cloud / Web +- Infrastructure +- Other +--- + +# Welcome to the web page for the 41st Project Week! + +[This event](https://projectweek.na-mic.org/PW41_2024_Boston/README.html) took place June 24th - June 28th, 2024 at MIT, Cambridge, MA. Project Week 41 was a hybrid event with a strong in-person component. + +## Location + +Grier Rooms (34-401A, 34-401B), Building 34, 50 Vassar Street, Cambridge, MA + +## Preparation meetings + +We held weekly preparation meetings at 10am on Tuesdays on Zoom, starting April 30, 2024. +## Registration +Register at the link [here](https://cvent.me/dldl10). + +## Discord +The **Discord** application is used to communicate between team members and organize activities before and during Project Week. Please join the Project Week [Discord server](https://discord.gg/AkxzKvqMBp) as soon as possible and explore its functionality before the workshop. For more information on the use of Discord before and during Project Week, please visit [this page](../common/Discord.md). + +## Agenda + +{% include calendar.md from="2024-06-24" to="2024-06-28"%} + +## Breakout sessions + +### [Slicer Platform Slides for PW 41](https://docs.google.com/presentation/d/1CX99cU3zJDzLC6PUA3Is07HckF98AxkGsYd6oojEv9o/edit?usp=sharing) + +## Projects + +To learn how to create or update project pages, please refer to the [contributing project pages](ContributingProjectPages.md) section. + +{% include projects.md %} + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. + +List of registered participants so far (names will be added here after processing registrations): + + + + +Updated on 2024-06-29. + +1. Ekaterina Akhmad, Maastricht University, Netherlands, Virtual +1. Ron Alkalay, Beth Israel Deaconess Medical Center, USA, In Person +1. Tamaz Amiranashvili, University of Zurich, Switzerland, In Person +1. Dennis Anderson, Beth Israel Deaconess Medical Center, USA, In Person +1. Theodore Aptekarev, Slicer Community, Montenegro, Virtual +1. Amene Asgari, Brigham and women’s hospital, USA, In Person +1. Paul Baksic, INRIA - France, France, Virtual +1. Mikulas Bankovic, German Cancer Research Center, Germany, Virtual +1. Imre János Barabás, Semmelweis University, Hungary, In Person +1. Mohamed Alalli Bilal, Université Nouakchott Al-Asriya, Mauritania, Virtual +1. Owen Borders, Massachusetts General Hospital, USA, In Person +1. Sylvain Bouix, École de technologie supérieure, Canada, In Person +1. Charles Bricout, ETS, Canada, In Person +1. Mamadou Samba CAMARA, Université Cheikh Anta Diop de Dakar, Senegal, Virtual +1. Naomi Catwell, ÉTS, Canada, Virtual +1. Mirela Cazzolato, University of Sao Paulo, Brazil, Virtual +1. Lucia Cevidanes, University of Michigan, USA, Virtual +1. Gnaneswar Chundi, Rutgers University, USA, In Person +1. Cosmin Ciausu, Brigham and Women's Hospital, USA, In Person +1. Jeanne Claret, University of Michigan, USA, In Person +1. David Clunie, PixelMed, USA, Virtual +1. Carole-Anne COS, Paris Brain Institute, France, Virtual +1. Mariana Costa Bernardes Matias, Brigham and Women's Hospital, USA, In Person +1. Ghazal Danaee, École de technologie supérieure( ÉTS), Canada, Virtual +1. sedigheh dargahi, École de technologie supérieure, Canada, Virtual +1. Florian Davaux, Unaffiliated, , In Person +1. Paulo Eduardo de Barros Veiga, Universidade de São Paulo, Brazil, Virtual +1. Anton Deguet, Johns Hopkins University, USA, In Person +1. Chayanika Devi, University of Cincinnati, USA, Virtual +1. Mouhamed DIOP, Cheikh Anta Diop University of Dakar, Senegal, Virtual +1. Lucie Dole, Unaffiliated, , In Person +1. Mauro Ignacio Dominguez, Independent, Argentina, Virtual +1. Reuben Dorent, Brigham and Women's Hospital, USA, In Person +1. Felix Dorfner, Massachusetts General Hospital and Charité - Universitätsmedizin Berlin, USA, In Person +1. Simon Drouin, École de technologie supérieure, Canada, In Person +1. Moumen El-Melegy, Assiut University, Brigham and Women's Hospital, USA, In Person +1. Zeinabou Babe Elemine, Cheikh Anta Diop University, Mauritania, Virtual +1. Andrey Fedorov, Brigham and Women's Hospital, USA, In Person +1. sara Fernandez Vidal, PARIS BRAIN INSTITUTE, France, In Person +1. Jean-Christophe Fillion-Robin, Kitware, USA, In Person +1. Sarah Frisken, Brigham and Women's Hospital, USA, In Person +1. Colin Galvin, Brigham and Women's Hospital, USA, In Person +1. Valeria Gomez Valdes, Slicer translation for Latin America- UAEMéx, Mexico, Virtual +1. Douglas Samuel Gonçalves, USP, Brazil, Virtual +1. Martin Gregorio, ICM, France, Virtual +1. Michael Halle, Open Anatomy Project, USA, In Person +1. Nazim Haouchine, BWH, USA, In Person +1. Ahmed Hassan, Harvard Medical School, Netherlands, In Person +1. Carl Haugg, Harvard Medical School, USA, In Person +1. Enrique Hernandez-Laredo, Universidad Autónoma del Estado de México, Mexico, Virtual +1. Vy Hong, Technical University of Munich, Germany, In Person +1. Samantha Horvath, Kitware Inc, USA, In Person +1. Bing-Xing Huo, Broad Institute, USA, Virtual +1. Mike Jin, Brigham and Women's Hospital; Centaur Labs, USA, In Person +1. JANGRAE JO, University of Massachusetts, USA, Virtual +1. Megha Kalia, Brigham and Womens, USA, In Person +1. Tina Kapur, Brigham and Women's Hospital, Harvard Medical School, USA, In Person +1. Ron Kikinis, Brigham and Women's Hospital, Harvard Medical School, USA, In Person +1. Deepa Krishnaswamy, Brigham and Women's Hospital, USA, In Person +1. Andras Lasso, Queen's University, Canada, In Person +1. kyuheon lee, korea university, Republic of Korea, Virtual +1. Simon Leonard, Johns Hopkins University, USA, In Person +1. Gaelle Leroux, University of Michigan, USA, In Person +1. Rui Li, New York University, USA, Virtual +1. Wenjie Liang, Maastricht University, Netherlands, Virtual +1. Curtis Lisle, KnowledgeVis, LLC, USA, In Person +1. Xihan Ma, Worcester Polytechnic Institute, USA, In Person +1. Murat Maga, Seattle Childrens Research Institute, USA, Virtual +1. Nikolaos Makris, Massachusetts General Hospital, USA, In Person +1. Katie Mastrogiacomo, Brigham and Women's Hospital, USA, In Person +1. Moaid Mohamedosman, Unaffiliated, Egypt, Virtual +1. Reza Mojahed-Yazdi, AIM Lab, Brigham and Women Hospital, USA, In Person +1. Victor Manuel Montaño Serrano, Universidad Autónoma del Estado de México, Mexico, Virtual +1. Kaveh Moradkhani, École de technologie supérieure ÉTS, Canada, Virtual +1. Pedro Moreira, Brigham and Women's Hospital, USA, In Person +1. Luiz Murta, University of São Paulo, Brazil, Virtual +1. Fatou Bintou NDIAYE, Université Cheikh Anta Diop of Dakar, Senegal, Virtual +1. Leonard Nürnberg, MGB / Harvard, Netherlands, In Person +1. Ballambat Suraj Pai, Brigham and Womens Hospital, USA, In Person +1. Rafael Palomar, Oslo University Hospital, Norway, In Person +1. Umang Pandey, Clinica Universidad de Navarra, Madrid, Spain, Virtual +1. Tae Young Park, KIST, Republic of Korea, Virtual +1. Sam Pathak, NCI, USA, Virtual +1. Steve Pieper, Isomics, Inc., USA, In Person +1. Csaba Pinter, EBATINCA, Spain, Virtual +1. Juan Prieto, University of North Carolina, USA, In Person +1. Sonia Pujol, Brigham and Women's Hospital, Harvard Medical School, USA, In Person +1. Ciro Benito Raggio, Karlsruhe Institute of Technology (KIT), Germany, Virtual +1. Aneesh Rangnekar, Memorial Sloan Kettering Cancer Center, USA, Virtual +1. Domenico Riggio, Karlsruhe Institute of Technology, Germany, Virtual +1. Monserrat Ríos-Hernández, Universidad Autónoma del Estado de México, Mexico, Virtual +1. Sara Rolfe, Seattle Children's Research Institute, USA, In Person +1. Jarrett Rushmore, Boston University School of Medicine, USA, In Person +1. Divya Sain, Velsera, USA, Virtual +1. Johanna Samuelsson, Velsera, Sweden, Virtual +1. Lucas Sanchez Silva, Universidade de São Paulo, Brazil, Virtual +1. Leonardo Seoane, Ochsner Health System, USA, In Person +1. Zahra Soltani, Beth Israel Deaconess Medical Center, USA, Virtual +1. Kyle Sunderland, Queen's University, Canada, In Person +1. Baye Balla SY, Military Health School of Dakar, Senegal, Virtual +1. Attila Tanács, University of Szeged, Hungary, Virtual +1. Jess Tate, University of Utah, USA, Virtual +1. Pape mady Thiao, EMS, Senegal, Virtual +1. Vamsi Thiriveedhi, Brigham and Women's Hospital, USA, In Person +1. Oshane Thomas, Seattle Children's Research Institute, USA, Virtual +1. Junichi Tokuda, Brigham and Women's Hospital, USA, In Person +1. Tamas Ungi, Queen's University, Canada, In Person +1. Jeff VanOss, BAMF Health, USA, Virtual +1. Benoit Verreman, ÉTS, Canada, In Person +1. Adriana H. Vilchis González, Facultad de Ingeniería- Facultad de Medicina _UAEMéx, Mexico, Virtual +1. William Wells, BWH, USA, In Person +1. Quinn Williams, Brigham and Women's Hospital, USA, In Person +1. Murong Xu, Univerisity of Zurich, Switzerland, In Person +1. Khaled Younis, MedAiConsult, United States Minor Outlying Islands, Virtual +1. Chi Zhang, Texas A&M University School of Dentistry, USA, In Person +1. Haichong Zhang, Worcester Polytechnic Institute, USA, In Person + + + +## Statistics + +Participation statistics + +## Organizers + +* [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](http://www.spl.harvard.edu/pages/People/tkapur)), +* [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://drouin-simon.github.io/ETS-web//)) +* [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)) +* [@piiq](https://github.com/piiq) ([Theodore Aptekarev](https://discourse.slicer.org/u/pll_llq)) +* [@sjh26](https://github.com/sjh26) ([Sam Horvath, PhD](https://www.kitware.com/samantha-horvath/)) + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). diff --git a/PW41_2024_MIT/statistics.svg b/PW41_2024_MIT/statistics.svg new file mode 100644 index 000000000..e6c58549d --- /dev/null +++ b/PW41_2024_MIT/statistics.svg @@ -0,0 +1,15253 @@ + + + + + + + + 2024-06-29T16:18:41.295483 + image/svg+xml + + + Matplotlib v3.9.0, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/PW42_2025_GranCanaria/ContributingProjectPages.md b/PW42_2025_GranCanaria/ContributingProjectPages.md new file mode 100644 index 000000000..c5e353ad3 --- /dev/null +++ b/PW42_2025_GranCanaria/ContributingProjectPages.md @@ -0,0 +1,85 @@ +--- +--- +{% comment %}Extract event name (e.g "PW39_2023_Montreal"). {% endcomment %} +{%- assign event_name = page.path | split: '/' | first -%} + +# Contributing Project Pages + +## Creating new project pages + +With the [Project Week GitHub Issue page](https://github.com/NA-MIC/ProjectWeek/issues/new/choose), you have three options to create your Project Page: + +1. [Create a Proposal](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=proposal%2Cevent%3A{{ event_name }}&projects=&template=proposal.yml&title=Proposal%3A+) issue: If you have an idea for a project page but are not quite ready to create it yet, you can create a “Proposal” issue. You will still need to create a project page later. + +2. [Create a Project](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) issue: If you are ready to create your page, you can simply create a “Project” issue. This issue will allow you to fill out a convenient form to provide the necessary details. The Project Week website team will then review the issue and trigger the page creation pull request. + +3. [Create the project page yourself using the template](Projects/README.md): If you prefer to create the Project Page yourself, you can still do so by using the provided template and submitting a pull request. + +## Project Creation Tips + +- Get your project pages created early! The day before is best to make sure everything you need for you presentation is available. The ProjectWeek site will be closed to edits for the ***10 minutes before*** both the opening and closing presentation session to ensure site generation. After this 10 minute period edits will be re-enabled. + +- If you are [creating the project page yourself using the template](Projects/README.md), **don't reuse a project page template from a previous year.** We have made significant updates to the template to support auto-generation of project pages, so previous years' templates will not function properly. + + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** + - Make sure to fill out / update all of the information at the top of the README file (title, category, location, etc) + +- Remember to fill out the title for your project when using the [project creation issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) + +- Check the formatting on the Key Investigators list when creating a [project issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) (this is critical for page generation): + + `- Firstname Lastname (Affiliation, Country)` + +## Updating existing project pages + +Here are the steps using the GitHub web interface: + +1. Navigate to your project's `README.md` on the GitHub website. For instance, if you want to update a project called **YourProjectName**, visit the URL like the following: + + ``` + https://github.com/NA-MIC/ProjectWeek/blob/master/{{ event_name }}/Projects/YourProjectName/README.md + ``` + +2. Click the edit button, as shown in this screenshot: ![Screenshot 2023-06-12 10 43 35](https://github.com/NA-MIC/ProjectWeek/assets/25040869/ab01a7bf-c1e4-4c23-9aca-e2c6421ca530) + +3. You can now edit the page, add images by dragging and dropping, and more. + +4. Once done, click "Commit Changes", and follow the instructions to create a fork and a pull request to add your changes to the webpage. See this screenshot for reference: ![Screenshot 2023-06-12 10 50 50](https://github.com/NA-MIC/ProjectWeek/assets/25040869/180e81bb-d4f9-4f65-8569-a93192b2828e) + +## Videos in project pages + +Here are some steps to make sure all of your awesome videos render correctly: + +1. Videos added by drag and drop will render correctly when viewed through GitHub, but need some extra tweaks to work in the final generated website. + + + In your `README.md`, if you have a video link that looks like this: + + ``` + https://github.com/NA-MIC/ProjectWeek/assets/66890913/8f257f29-fa9c-4319-8c49-4138003eba27 + ``` + + Update it to: + + ```html + + ``` + +2. Links to externally hosted videos (such as YouTube) will need an iframe. + + Replace: + + ``` + https://youtu.be/ZWxE5QcGvE8 + ``` + + with + + ````html + + ```` diff --git a/PW42_2025_GranCanaria/PW40_Group.jpg b/PW42_2025_GranCanaria/PW40_Group.jpg new file mode 100644 index 000000000..33cd9baa3 Binary files /dev/null and b/PW42_2025_GranCanaria/PW40_Group.jpg differ diff --git a/PW42_2025_GranCanaria/PW40_WorkArea.jpg b/PW42_2025_GranCanaria/PW40_WorkArea.jpg new file mode 100644 index 000000000..fd9f7087f Binary files /dev/null and b/PW42_2025_GranCanaria/PW40_WorkArea.jpg differ diff --git a/PW42_2025_GranCanaria/Projects/3DSlicerForLatinAmerica/README.md b/PW42_2025_GranCanaria/Projects/3DSlicerForLatinAmerica/README.md new file mode 100644 index 000000000..558394b55 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/3DSlicerForLatinAmerica/README.md @@ -0,0 +1,168 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 3D Slicer for Latin America +category: Infrastructure + +key_investigators: +- name: Sonia Pujol + affiliation: Brigham and Women's Hospital, Harvard Medical School + country: USA + +- name: Luiz Murta + affiliation: Universidade de São Paulo + country: Brazil + +- name: Douglas Samuel Gonçalves + affiliation: Universidade de São Paulo + country: Brazil + +- name: Lucas Sanchez Silva + affiliation: Universidade de São Paulo + country: Brazil + +- name: Paulo Eduardo de Barros Veiga + affiliation: Universidade de São Paulo + country: Brazil + +- name: Paulo Guilherme Pinheiro Pereira + affiliation: Universidade de São Paulo + country: Brazil + +- name: Mirela Teixeira Cazzolato + affiliation: Universidade de São Paulo + country: Brazil + +- name: Adriana Herlinda Vilchis González + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Enrique Hernández Laredo + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Victor Manuel Montaño Serrano + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Monserrat Ríos-Hernández + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Valeria Gómez Valdes + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Juan Carlos Avila Vilchis + affiliation: Universida Autónoma del Estado de México + country: Mexico + +- name: Fatou Bintou Ndiaye + affiliation: University Cheikh Anta Diop + country: Senegal + +- name: Mohamed Alalli Bilal + affiliation: University Cheikh Anta Diop + country: Senegal + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware Inc. + country: USA + +--- + +# Project Description + +The goal of this project is twofold: first, to leverage 3D Slicer’s internationalization infrastructure to localize the software into Spanish and Portuguese, and second, to develop a novel software infrastructure for localizing tutorials. + +## Objective + + + +1. Solve the existing GitHub issues related to core functionalities of the extension Tutorial Maker. For example, not handling clicks or widget resizing during annotations blocks the user from finishing the tutorial. +2. Solve the existing GitHub issues raised during the African team tests. For example, buttons in the annotator that don't work properly, pdf exporter breaking the original format. +3. Fix problems in existing tutorials. For example, some missing slides in the PDF. +4. Improve the code's legibility and the extension's performance. +5. Collect more feedback from the users on the event. + +## Approach and Plan + + + +1. All the tasks (issues) are designated to pairs in the team, during the project week we will keep in continuous touch to solve each of these problems and any problem raised during feedback in the project week. + +## Progress and Next Steps + + + +1. Fix core-related issues + +| Old | New | +| --- | --- | +| ![image](https://github.com/user-attachments/assets/61f238dc-0352-403d-b909-ca03591524f0) | ![one](https://github.com/user-attachments/assets/d56da455-7fc8-46b5-afc5-7a591edd15d6) | +| ![image](https://github.com/user-attachments/assets/65cd72c9-ddc4-4511-9b05-8bd0e6e31a9f) | ![image](https://github.com/user-attachments/assets/9a2bf575-c500-42f2-92df-e42d658f60fa) | + +2. Code improvement + +- Creation of a GitHub Action to run the tests of the extension either on pushing `main` or `develop`, or when a pull request is created. + +| Old | New | +| --- | --- | +| ![image](https://github.com/user-attachments/assets/34c7d441-1039-4105-906f-5054b79bbdfd) | ![image](https://github.com/user-attachments/assets/bde1d39d-b2e5-47be-a137-66fc30e9f08c) | + + - Related pull requests: + - [SlicerTutorialMaker#67](https://github.com/SlicerLatinAmerica/SlicerTutorialMaker/pull/67): `BUG: Prevent test_TutorialMaker1 from being detected as a unittest case` + - [SlicerTutorialMaker#68](https://github.com/SlicerLatinAmerica/SlicerTutorialMaker/pull/68): `ENH: Streamline execution of tests downloading Slicer` + +- Review the approach FileMDHTML to export HTML and print PDF. [Commit](https://github.com/SlicerLatinAmerica/SlicerTutorialMaker/commit/4ae712601ffed42a7d39b96dc89f2212dd4caf22) + +- Review the TutorialGUI approach to open Annotator and manipulate the events more reliably. Adding new features. +![ArrowShowcase](https://github.com/user-attachments/assets/aa88cd04-5584-4aa9-957c-92712fecab24) + +![image](https://github.com/user-attachments/assets/81f8f7b9-77bc-426d-b564-7035c7449834) | ![image](https://github.com/user-attachments/assets/91e9c0dc-dd5b-4f59-ba4d-ddf9480ba6a7) + +- Add new features like the ability to select and add screenshots more than one time +![image](https://github.com/user-attachments/assets/4181dbc5-cf40-4b28-8dc5-7b9a1360b51b) + + +3. Use case (WIP) + +- BoneReconstructionPlannerTutorial - Mauro Dominguez [Repository](https://github.com/SlicerLatinAmerica/TestSlicerTutorials/blob/feature/add_bone_reconstruction_planner_tutorial/Tutorials/BoneReconstructionPlannerTutorial/BoneReconstructionPlannerTutorial.py) + +![image](https://github.com/user-attachments/assets/54dd3cd7-6457-4dc7-a266-da222b813018) +![image](https://github.com/user-attachments/assets/f5bf233f-5b60-4c22-bef0-560afd150a9e) +![image](https://github.com/user-attachments/assets/8e073f1a-2d8c-4de5-9a63-1c05600610e9) + +5. Tutorials manually translated + +| Slicer Developer Tutorial: Programming in Slicer. S. Pujol, S. Pieper | Segmentation for 3D printing. A. Nagy, C. Pintér | +| --- | --- | +| [Portuguese](https://slicerlatinamerica.github.io/media/Tutorials/Slicer5_Programando%20no%20Slicer_SPujol-SPieper.pdf) | [Portuguese](https://slicerlatinamerica.github.io/media/Tutorials/Segmentation3DPrinting-ANagy-CPinter_pt-BR.pdf) | +| [Spanish](https://slicerlatinamerica.github.io/media/Tutorials/Slicer5_ProgrammingTutorial_es-419.pdf) | [Spanish](https://slicerlatinamerica.github.io/media/Tutorials/SegmentationFor3DPrinting_es.pdf) | + +# Nexts Steps + +1. Merge all the work that was created during the PW +2. Discuss and define what will be generated to the repositories +3. Discuss and define how the other extensions will extends TutorialMaker to create tutorials + +# Illustrations + +# Background and References + + + +[TutorialMaker](https://github.com/SlicerLatinAmerica/SlicerTutorialMaker) +The repository with all the code related to the extension. Feel free to open an issue or contribute! You can also download using the Extension Manager in the preview version of 3D Slicer. diff --git a/PW42_2025_GranCanaria/Projects/3Dand2DRadiologyCopilotIntegrationin3DSlicer/README.md b/PW42_2025_GranCanaria/Projects/3Dand2DRadiologyCopilotIntegrationin3DSlicer/README.md new file mode 100644 index 000000000..9150f30e2 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/3Dand2DRadiologyCopilotIntegrationin3DSlicer/README.md @@ -0,0 +1,94 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 3D and 2D Radiology Copilot Integration in 3D Slicer +category: Segmentation / Classification / Landmarking + +key_investigators: +- name: Andres Diaz-Pinto + affiliation: NVIDIA + country: UK +- name: Andras Lasso + affiliation: Queen's University + country: Canada +- name: Vishwesh Nath + affiliation: NVIDIA + country: USA +- name: Nigel Nelson + affiliation: NVIDIA + country: USA +- name: Sean Huver + affiliation: NVIDIA + country: USA +- name: Mingxin Zheng + affiliation: NVIDIA + country: China +- name: Wenqi Li + affiliation: NVIDIA + country: UK +- name: Xueyan Mei + affiliation: Mount Sinai + country: USA +- name: Zelong Liu + affiliation: Mount Sinai + country: USA +- name: Tim Deyer + affiliation: East River Imaging + country: USA +--- + +# Project Description + +This project aims to create the first 3D and 2D radiology copilot in 3D Slicer by developing a module that can consume REST APIs of radiology copilots. The main goal is to demonstrate the benefits of using the NVIDIA Holoscan platform for deploying AI models in medical imaging applications. + +## Objective + +1. Develop a 3D Slicer module that integrates with radiology copilots via REST APIs. +2. Showcase the capabilities of the NVIDIA Holoscan platform for AI model deployment in medical imaging. +3. Implement functionality to send 3D volumes to the copilot, ask questions, and receive insights. + +## Approach and Plan + +1. Create a new 3D Slicer module that can communicate with REST APIs +2. Integrate the private model trained by East River Imaging and NVIDIA using the RadImageNet dataset. +3. Implement support for the MONAI VILA-M3 model. +4. Develop a user interface within the module to allow sending 3D volumes, asking questions, and displaying copilot insights. +5. Optimize the module's performance using the NVIDIA Holoscan platform. + +## Progress and Next Steps + +1. Initial project setup and team coordination completed. +2. Research on integrating REST APIs within 3D Slicer modules conducted. +3. Preliminary design of the user interface drafted. +4. Repository: [https://github.com/Project-MONAI/VLM/tree/main/plugins/RadCoPilot_Slicer](https://github.com/Project-MONAI/VLM/tree/main/plugins/RadCoPilot_Slicer) + +## Next Steps: + +### For RadViLLA server: + +- In the server, create a session so loading volume and resizing doesn't need to happen every time the user send a prompt +- Cache the volume so inference is faster + +### VILA-M3: + +- [Current server](https://github.com/Project-MONAI/VLM/pull/66) using NVCF doesn't accept volumes in the cloud with HTTPS. Change this flag or make sure it also accepts volumes hosted using HTTP +- Get the current slice and send it to the prompt request rather than keeping it hard coded in the server + +# Illustrations + +![updatedSlicerModule](https://github.com/user-attachments/assets/4c2bb0cb-0367-4758-bbb6-786283206c73) + + +[video](https://github.com/user-attachments/assets/0d9fd2c6-ef26-4d14-851e-c761bb218ea7) + + + +# Background and References + +- [NVIDIA Holoscan SDK](https://github.com/nvidia-holoscan/holoscan-sdk) +- [East River Imaging](https://eastriverimaging.com/) +- [NVIDIA](www.nvidia.com) +- [RadImageNet dataset](https://www.radimagenet.com/) +- [MONAI VILA-M3 paper](https://arxiv.org/pdf/2411.12915) diff --git a/PW42_2025_GranCanaria/Projects/AutomaticClassificationOfMrScanSequenceType/README.md b/PW42_2025_GranCanaria/Projects/AutomaticClassificationOfMrScanSequenceType/README.md new file mode 100644 index 000000000..0925ebe0a --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/AutomaticClassificationOfMrScanSequenceType/README.md @@ -0,0 +1,105 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Automatic classification of MR scan sequence type +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital/Harvard Medical School + country: USA + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital/Harvard Medical School + country: USA + +- name: Joost van Griethuysen + affiliation: The Netherlands Cancer Institute + country: The Netherlands + +--- + +# Project Description + + + + +Knowing the type of MRI scan is an important data curation step. For instance clinicians and developers need to know if a scan is T1 weighted, T2 weighted, diffusion, etc in order to make a diagnosis or develop an AI model. This curation can take a long time to do manually, especially if the fields in DICOM data are missing or incorrect. Some tools have been developed already, mostly for brain image classification, and only a few are available for abdominal/prostate areas. + +![PW_41_image_radiology_cropped](https://github.com/user-attachments/assets/f15b3843-7800-4fdf-aa3e-fa9782928734) + +The past two project weeks, we've made some progress in developing tools for AI/ML classification of prostate MR scans. See our: +- [PW 41 page](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/AutomaticClassificationOfMrScanSequenceType/) +- [PW 40 page](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/DicomSeriesClassificationAndVisualizationOfParameters/) +- [paper here](https://openreview.net/forum?id=1GEz81GU3g) and [code here](https://github.com/deepakri201/DICOMScanClassification). + +In this project week, we will focus on creating a 3DSlicer module. + + +## Objective + + + + +1. We will create a 3DSlicer module to perform the scan type classification on all series in a study. + + + + +## Approach and Plan + + + + +1. We will first allow the user to pick a study from the DICOM database. +2. We will run inference using our pre-trained prostate model on all the series in the study. +3. We will modify the layout automatically. +4. If there is time, we will allow the user to choose a body part and appropriate model - there are models for brain MRI, and chest/abdominal MRI scan classification + + + + +## Progress and Next Steps + + + + +1. We have created an CNN that uses both image+metadata information to classify a scan into T1w, T2w, diffusion and apparent diffusion coefficient maps. +2. We load data into the DICOM database, and allow the user to choose a specific patient + study +3. We list all the series, and show which ones can be classified using the model + + + + +# Illustrations + + + + +![PW_42_image_CNN_cropped](https://github.com/user-attachments/assets/b20ed579-95b0-4a32-8bde-96b61fc48efa) + + + + + +# Background and References + + + +[GitHub repo for WIP SlicerMRClass extension](https://github.com/deepakri201/SlicerMRClass/) + +[PW 41 work](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/AutomaticClassificationOfMrScanSequenceType/) + +[PW 40 work](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/DicomSeriesClassificationAndVisualizationOfParameters/) + +# Related work +- [dcm-classifier](https://github.com/BRAINSia/dcm-classifier) diff --git a/PW42_2025_GranCanaria/Projects/Bringing3DSlicerToTheWebWithSlicerTrame/README.md b/PW42_2025_GranCanaria/Projects/Bringing3DSlicerToTheWebWithSlicerTrame/README.md new file mode 100644 index 000000000..cec486cf8 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/Bringing3DSlicerToTheWebWithSlicerTrame/README.md @@ -0,0 +1,77 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Bringing 3D Slicer to the web with Slicer trame +category: Infrastructure + +key_investigators: + +- name: Thibault Pelletier + affiliation: Kitware + country: France + +--- + +# Project Description + + + + +trame is a Python-based framework that simplifies the creation of interactive visual applications for web, desktop, and Jupyter environments. It leverages powerful visualization libraries like VTK and ParaView, enabling developers to build applications without extensive web development knowledge. + +This project aims to continue the ongoing effort of bringing 3D Slicer to the web using the trame framework. It will enable the creation of powerful modern web applications by reusing core components of 3D Slicer. Additionally, it will establish a connection between existing 3D Slicer modules and Slicer trame-based modules. + + + +## Objective + + + + +1. Help researchers and developers getting started with Slicer trame based applications +2. Allow running Slicer trame servers from an existing 3D Slicer install tree +3. Allow creating scripted modules compatible both with 3D Slicer and Slicer trame + + + +## Approach and Plan + + + + +1. Hold a breakout session for Slicer trame during the PW42 NAMIC Week +2. Create a 3D Slicer extension to run Slicer trame server from 3D Slicer +3. Create a new template of scripted modules compatible both with 3D Slicer and Slicer trame + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- trame : [https://kitware.github.io/trame/](https://kitware.github.io/trame/) +- trame intro course : [https://kitware.github.io/trame/guide/intro/course.html](https://kitware.github.io/trame/guide/intro/course.html) diff --git a/PW42_2025_GranCanaria/Projects/BuildsOfSlicerForArmBasedSystemsMacAndLinux/README.md b/PW42_2025_GranCanaria/Projects/BuildsOfSlicerForArmBasedSystemsMacAndLinux/README.md new file mode 100644 index 000000000..fef222265 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/BuildsOfSlicerForArmBasedSystemsMacAndLinux/README.md @@ -0,0 +1,182 @@ +--- +layout: pw42-project +permalink: /:path/ +project_title: Builds of Slicer for ARM-based systems Mac and Linux +category: Infrastructure + +key_investigators: +- name: Andres Diaz-Pinto + affiliation: NVIDIA + country: UK +- name: Steve Pieper + affiliation: Isomics + country: USA +- name: Rafael Palomar + affiliation: OUH + country: Norway +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA +- name: Andras Lasso + affiliation: Queen's University + country: CA +- name: Mauro I. Dominguez + affiliation: Independent + country: Argentina +- name: Robin Peretzke + affiliation: German Cancer Research Center + country: Germany +--- + +# Project Description + +Investigate, document, and fix any issues related to building Slicer from source on ARM-based systems (e.g. Apple silicon or Nvidia linux systems). + +During the Slicer Week, we plan to have a working 3D Slicer version on ARM architecture. This version will facilitate the use of Slicer for volume rendering and access to other modules with segmentation AI models for interaction with radiology copilots. We'll have virtual (ssh) access to an IGX box for testing this and finding the gaps for a complete solution to ARM version of 3DSlicer. + +## Objective + +1. Achieve a functional 3D Slicer build on ARM architecture, specifically targeting Apple silicon and Nvidia linux systems. +2. Identify and resolve issues related to volume rendering, segmentation AI models, and radiology copilot interactions on ARM-based systems. + +## Approach and Plan + +1. Set up virtual (ssh) access to an NVIDIA IGX box for testing and development. +2. Compile and build 3D Slicer from source on the ARM-based system. +3. Test volume rendering capabilities and identify any performance issues or incompatibilities. +4. Integrate and test segmentation AI models on the ARM version of 3D Slicer. +5. Evaluate the interaction between 3D Slicer and radiology copilots on the ARM architecture. +6. Document all encountered issues, workarounds, and solutions. +7. Collaborate with the Slicer community to implement necessary fixes and optimizations. + +## Progress and Next Steps + +1. Initiated project planning and team coordination. +2. Secured access to NVIDIA IGX box for development and testing purposes. +3. Began preliminary research on existing ARM-related issues in the Slicer GitHub repository. +4. Environment setup on the NVIDIA IGX box. +5. Build scripts published at [https://gist.github.com/jcfr/487f5d846bc86e374969be5565c6d95e](https://gist.github.com/jcfr/487f5d846bc86e374969be5565c6d95e) +6. Submitted Slicer pull request updating external project to support externally built TBB libraries. See [PR-8202](https://github.com/Slicer/Slicer/pull/8202) +7. Slicer built on Ubuntu 22.04 (aarch64) ✨✅ + +``` +$ uname -a +Linux demos-NVIDIA-IGX-Orin-Development-Kit 5.15.0-1012-nvidia-tegra-igx #12-Ubuntu SMP Wed Apr 24 15:57:28 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux +``` + +``` +$ cd ~/Projects +$ ./build-CTKAppLauncher.sh +[...] +[100%] Built target CTKAppLauncher +Install the project... +-- Install configuration: "Release" +-- Up-to-date: /home/demos/Projects/CTKAppLauncher-install/bin/CTKAppLauncher +-- Up-to-date: /home/demos/Projects/CTKAppLauncher-install/CMake/ctkAppLauncher.cmake +-- Up-to-date: /home/demos/Projects/CTKAppLauncher-install/CMake/ctkAppLauncher-configure.cmake +-- Up-to-date: /home/demos/Projects/CTKAppLauncher-install/bin/CTKAppLauncherSettings.ini.in +-- Up-to-date: /home/demos/Projects/CTKAppLauncher-install/./CTKAppLauncherConfig.cmake + +$ ./build-tbb.sh +[...] +[100%] Built target tbb +Install the project... +-- Install configuration: "Release" +-- Up-to-date: /home/demos/Projects/tbb-install/lib/libtbb.so.12.15 +-- Up-to-date: /home/demos/Projects/tbb-install/lib/libtbb.so.12 +-- Up-to-date: /home/demos/Projects/tbb-install/lib/libtbb.so +[...] + +$ ./build-Slicer.sh +[...] +[ 99%] No install step for 'Slicer' +[ 99%] Forcing configure step for 'Slicer' +[100%] Completed 'Slicer' +[100%] Built target Slicer +``` + +Next steps: +- Begin testing basic functionality and identify initial challenges. + +# Illustrations + + + +# Main Challenges from previous Project Week 2024 + +Building and running 3D Slicer on the NVIDIA IGX box (ARM architecture) involves several steps: + +1. **Qt**: There are no Qt binary packages for arm64. + +2. **CTKAppLauncher** This launcher is needed to set up Qt paths so that dynamically-loaded Qt libraries can be found. Since Qt paths are not set up for the launcher itself, the launcher has to be built statically (all the libraries has to be linked into the executable). We don’t build CTKAppLauncher executable as part of the Slicer build process because static linking of Qt libraries would require a paid Qt license. [Link to comments](https://discourse.slicer.org/t/how-to-modify-ctkapplauncherlib/32972/3) + +3. **Threading Building Blocks (TBB)** This has to be built agains the Qt version available on the system + +4. **VTK**: VTK needs to be built with Qt and Python support on ARM. + +5. **CUDA Library Compatibility**: Some Holoscan applications that rely on CUDA have shown instability due to library differences, leading to crashes when deallocating memory. + +6. **Long-term Maintenance**: Maintaining these custom builds for ARM devices can pose a significant long-term effort unless a broader community helps to support and test the codebase. + +7. **Component Disabling**: Some Qt components may need to be disabled due to build errors, potentially limiting functionality. + +**Note:** 3D Slicer works on Mac with ARM architecture (Apple Silicon) primarily due to Apple's Rosetta 2 translation layer, which allows x86_64 applications to run on ARM-based Macs36. This translation layer is not available on other ARM-based architectures like the NVIDIA IGX. While 3D Slicer is not yet natively compiled for ARM on Mac, the Rosetta 2 emulation is efficient enough to provide good performance. In contrast, other ARM-based systems like the IGX would require a native ARM build of 3D Slicer, which is not yet available. + + +## General Notes: Building Slicer in [Ubuntu 22.04 (x86)](https://slicer.readthedocs.io/en/latest/developer_guide/build_instructions/linux.html#ubuntu-22-04-jammy-jellyfish): + +This section provides instructions for building Slicer on x86 systems: + +0. Install the development tools and the support libraries: + +```console +sudo apt update && sudo apt install git build-essential \ + cmake cmake-curses-gui cmake-qt-gui \ + libqt5x11extras5-dev qtmultimedia5-dev libqt5svg5-dev qtwebengine5-dev libqt5xmlpatterns5-dev qttools5-dev qtbase5-private-dev \ + libxt-dev libssl-dev +``` + +1. Create a folder called Slicers where you will clone the Slicer repository and create the build directory: + +```console +mkdir ~/Slicers +cd ~/Slicers +``` + +2. Clone the Slicer repository: + +```console +git clone https://github.com/Slicer/Slicer.git +``` +3. Locate the Qt installation. It is typically found in /opt/qt/. For example: /opt/qt/5.15.2/gcc_64/lib/cmake/Qt5 + +4. Run the following bash commands to set up the environment and build Slicer: + +```console +# Set variables +export Slicer_RELEASE_TYPE=Stable +export SLICER_CODE_PATH=/home/$USER/Slicers/Slicer +export SLICER_SUPERBUILD_PATH=/home/$USER/Slicers/SlicerR +export SLICER_BUILD_PATH=$SLICER_SUPERBUILD_PATH/Slicer-build +export NUMBER_OF_SLICER_COMPILATION_JOBS=2 + +# Create build directory +mkdir -p $SLICER_SUPERBUILD_PATH +cd $SLICER_SUPERBUILD_PATH + +# Configure Slicer build +cmake -DCMAKE_BUILD_TYPE:STRING=Release -DQt5_DIR:PATH=/opt/qt/5.15.2/gcc_64/lib/cmake/Qt5 $SLICER_CODE_PATH + +# Build Slicer +time make -j$NUMBER_OF_SLICER_COMPILATION_JOBS +``` + + +# Background and References + +The effort to build 3D Slicer on ARM architecture is part of a broader initiative to expand the platform's compatibility and leverage the capabilities of modern ARM-based systems. This project aligns with the growing trend of ARM adoption in various computing environments, from mobile devices to high-performance computing. + + + +- [Slicer ARM-related Issues](https://github.com/Slicer/Slicer/issues?q=is%3Aissue+is%3Aopen+arm) +- [NVIDIA IGX Platform](https://www.nvidia.com/en-gb/edge-computing/products/igx/) diff --git a/PW42_2025_GranCanaria/Projects/ComputeShadersAndCustomizedShadersForSurfaceRendering/README.md b/PW42_2025_GranCanaria/Projects/ComputeShadersAndCustomizedShadersForSurfaceRendering/README.md new file mode 100644 index 000000000..4658d318e --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ComputeShadersAndCustomizedShadersForSurfaceRendering/README.md @@ -0,0 +1,68 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Compute shaders and customized shaders for surface rendering +category: VR/AR and Rendering + +key_investigators: + +- name: Simon Drouin + affiliation: ETS Montreal + country: Canada + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +--- + +# Project Description + + + + +The volume rendering module in Slicer contains an infrastructure that makes it possible to specify shader replacements for the volume rendering mapper. These shader replacements are automatically considered in every 3D view, including virtual reality. + +The goal of this project is to implement a similar interface for surface rendering and to propose a generalized architecture (to be implemented later) that makes it simple to implement new rendering techniques that use an arbitrary combinations of volumes, surfaces, transfer functions, custom parameters and custom shaders and may include preprocessing steps based on Compute Shaders + + + +## Objective + +1. Implement shader replacement for surface rendering in Slicer +2. Validate the possibility to use Compute Shaders in the Slicer OpenGL context and determine the interface for doing so +3. Come up with a long term plan for an infrastructure that would facilitate the implementation new rendering techniques that use an arbitrary combinations of volumes, surfaces, transfer functions, custom parameters and custom shaders and may include preprocessing steps based on Compute Shader. + +## Approach and Plan + +1. Replicate the shader replacement mechanism architecture of the volume rendering module in the models module +2. Create a prototype modification of the volume rendering module that can run a simple ComputeShader (e.g. gaussian blur) on the volume before rendering. +3. Determine how mappers can make use of textures already present in the GPU rather than uploading volumes to textures. +4. Volume rendering: Use multiple perfectly overlapping volumes without paying the overhead for multi volume rendering + +## Progress and Next Steps + +1. Discussions with all parties interested in advanced graphics in Slicer allowed us to identify the list of requirements for future graphics programming capabilities in Slicer: + * **Target experienced graphics programmers with no VTK and Slicer experience**. The rational for that requirement is that many graphics programmers develop valuable rendering methods directly on basic APIs (OpenGL, DirectX, Vulkan), on prototyping systems (ex. [InViwo](https://inviwo.org/)), or game engines, all of which make it difficult to the more clinically oriented research community and clinical users to use them on a daily basis. We would like to create an incentive for porting to Slicer. + * **Provide basic implementation of surface and volume rendering that can take an arbitrary number of inputs of various type**. Although current volume rendering infrastructure enable multiple input volume and shader replacement, the infrastructure is relatively rigid, using multiple volumes is slow because it assumes they are not perfectly overlapping, and coordination between input volumes and shader variables is complicated. + * **Provide Compute Shaders that can be assembled in pipelines with an arbitrary number of inputs and outputs**. The input of the pipeline could include Slicer nodes that represent volumes, surfaces, color tables, transfer functions and transforms. The output of the pipeline would be connectable to Surface and Volume rendering implementations mentionned above without having to bring the data back to the CPU memory. + * **Make all the above-mentionned functionality customizable from the Python interface**. One simple way to fulfill this requirement is to enable pipeline construction from Python as well as shader specification. The Shaders code should declare its inputs and outputs so that Slicer can determine automatically how the pipeline can be connected and automatically assign inputs and outputs to the proper shader variables. +1. The AR/VR and Rendering breakout session involved a discussion with Sankesh and Jaswant for Kitware, who are both involved in the development of the WebGPU backend to replace OpenGL in VTK. The new backend promises to provide the appropriate infrastructure to implement the list of requirements identified above. + * The documentation for the WebGPU backend is available [here](https://docs.vtk.org/en/latest/modules/vtk-modules/Rendering/WebGPU/README.html) + * The current implementation already supports **Surface Rendering** and an infrastructure for **Compute Shader** pipelines. + * Volume rendering is not implemented at this point +1. The next step for the community should be to create an experimental rendering module that can wrap the existing WebGPU backend feature and enable the construction of simple rendering pipeline for Surface rendering and make sure the construction of pipeline is possible through the Python interface. + +# Illustrations +Custom volume rendering done in Unity VR, to be ported to VTK and 3D Slicer. The current implementation relies on Compute Shaders. + + +# Background and References + + + + +* Related previous project: [https://projectweek.na-mic.org/PW41_2024_MIT/Projects/PrismVolumeRenderer/](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/PrismVolumeRenderer/) diff --git a/PW42_2025_GranCanaria/Projects/ConversionOfBoneMarrowSmearDatasetFromMiraxFormatIntoDicom/README.md b/PW42_2025_GranCanaria/Projects/ConversionOfBoneMarrowSmearDatasetFromMiraxFormatIntoDicom/README.md new file mode 100644 index 000000000..06e64348f --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ConversionOfBoneMarrowSmearDatasetFromMiraxFormatIntoDicom/README.md @@ -0,0 +1,108 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Conversion of bone marrow smear dataset from MIRAX format into DICOM +category: DICOM + +key_investigators: + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + +- name: David Clunie + affiliation: PixelMed + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +As the DICOM standard is increasingly used in digital pathology imaging, conversion of available datasets from proprietary formats into DICOM format can make the data more FAIR and improve transparency and reproducibility of research conducted with these data. For this reason, the NCI Imaging Data Commons (IDC) hosts all its data in DICOM format. + +A set of bone marrow smear WSI available in MIRAX (.mrxs) format are to be ingested into the IDC. For that purpose they need to be converted into DICOM (.dcm) along with all available image and clinical metadata. +In addition, this dataset contains extensive deep-learning generated nuclei annotations (bounding boxes) that should also be converted into DICOM in a suitable way. + + + +## Objective + + + + 1. **Objective A**: Have a working script for the conversion of the complete set of bone marrow smear WSI into DICOM format based on [wsidicomizer](https://github.com/imi-bigpicture/wsidicomizer). + 2. **Objective B**: Include clinical metadata in an IDC-conformant way. + 3. **Objective C (optional)**: Have a script that converts the nuclei annotations into DICOM. Consider this issue: https://github.com/imi-bigpicture/wsidicomizer/issues/56 + + +## Approach and Plan + + + +**Objective A** +1. Implement and verify code for basic conversion of the .mrxs files as is into .dcm. +2. Investigate automatically filled metadata (including pixel spacing). wsidicomizer's default data can be found [here](https://github.com/imi-bigpicture/wsidicom/tree/ab16e38c678b4bb6eb8e2c670d4c7278c67edf03/wsidicom/metadata), an overview of attributes for VL Whole Slide Microscopy IOD [here](https://dicom.innolitics.com/ciods/vl-whole-slide-microscopy-image). +3. Add code for ingestion of metadata that are not obtained from the .mrxs files / correct potential falsely estimated metadata (via wsidicom API or JSON file). +4. Verify correct conversion with dciodvfy on every file and dcentvfy on every set of files in a series. +5. Have a few successfully converted samples and be ready to run code on complete collection. + +**Objective B** +1. Prepare additional clinical and lab data as table such that they can be ingested into IDC as BigQuery table. + +**Objective C (optional)**: +1. Discuss and decide in what way available annotations can be best encoded in DICOM. +2. Implement conversion pipeline for annotation conversion based on IDC annotation conversion code by Chris Bridge. + +## Progress and Next Steps + + + +**Objective A**: +- We successfully wrote a conversion pipeline for .mrxs files into .dcm using wsidicomizer and have a couple of converted files. A few issues have been identified on the way, reported to wsidicomizer and mostly already been fixed by Erik Gabriellson. + +**Objective B**: +- to be done + +**Objective C**: +- We discussed and decided that the best way to encode available annotations is in DICOM Microscopy Bulk Simple Annotations. + +**Next steps**: +- Run conversion script on whole dataset. +- Do Objective B: Prepare additional clinical and lab data as table such that they can be ingested into IDC as BigQuery table. +- Finish and run annotation conversion pipeline. + + +# Illustrations + + + +![Example image of bone marrow smears](./bone_marrow_smear.png) \ +*Example image of bone marrow smears. Taken from: [https://doi.org/10.1177/1040638712452731](https://doi.org/10.1177/1040638712452731).* + + +# Background and References + + + + +Background reading: +- Herrmann, M. D., Clunie, D. A., Fedorov, A., Doyle, S. W., Pieper, S., Klepeis, V., Le, L. P., Mutter, G. L., Milstone, D. S., Schultz, T. J., Kikinis, R., Kotecha, G. K., Hwang, D. H., Andriole, K. P., John Lafrate, A., Brink, J. A., Boland, G. W., Dreyer, K. J., Michalski, M., Golden, J. A., Louis, D. N. & Lennerz, J. K. Implementing the DICOM standard for digital pathology. J. Pathol. Inform. 9, 37 (2018). [http://dx.doi.org/10.4103/jpi.jpi_42_18]( http://dx.doi.org/10.4103/jpi.jpi_42_18) +- Clunie, D. A. DICOM format and protocol standardization-A core requirement for digital pathology success. Toxicol. Pathol. 49, 738–749 (2021). [http://dx.doi.org/10.1177/0192623320965893](http://dx.doi.org/10.1177/0192623320965893) + +Further resources: +- [IDC Portal](https://portal.imaging.datacommons.cancer.gov/) +- Description of MIRAX format: [Introduction to MIRAX/MRXS](https://lists.andrew.cmu.edu/pipermail/openslide-users/2012-July/000373.html) +- Conversion tool: [wsidicomizer](https://github.com/imi-bigpicture/wsidicomizer) +- Annotation conversion scripts by Chris Bridge: [idc-sm-annotations-conversion](https://github.com/ImagingDataCommons/idc-sm-annotations-conversion/tree/0a5060d44e25f6cfa78074f0dde7a1ca1aa6bc53) +- Useful tools for inspection/verification of DICOM files: [dcmtk](https://dicom.offis.de/en/dcmtk/dcmtk-tools/), [dicom3tools](https://www.dclunie.com/dicom3tools.html) +- Related project from this project week: [Evaluation of imi-bigpicture/wsidicomizer as a tool for conversion into DICOM whole slide imaging format](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/EvaluationOfImiBigpictureWsidicomizerAsAToolForConversionIntoDicomWholeSlideImagingFormat/) +- Related earlier project from PW40: [WSI-DICOM Improvement - From Viewer to Analysis](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/WsiDicomImprovementFromViewerToAnalysis/) diff --git a/PW42_2025_GranCanaria/Projects/ConversionOfBoneMarrowSmearDatasetFromMiraxFormatIntoDicom/bone_marrow_smear.png b/PW42_2025_GranCanaria/Projects/ConversionOfBoneMarrowSmearDatasetFromMiraxFormatIntoDicom/bone_marrow_smear.png new file mode 100644 index 000000000..4acadcab2 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/ConversionOfBoneMarrowSmearDatasetFromMiraxFormatIntoDicom/bone_marrow_smear.png differ diff --git a/PW42_2025_GranCanaria/Projects/CreatingDicomCompatibleCancerAnnotationsForNlst/README.md b/PW42_2025_GranCanaria/Projects/CreatingDicomCompatibleCancerAnnotationsForNlst/README.md new file mode 100644 index 000000000..cbefdaa54 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/CreatingDicomCompatibleCancerAnnotationsForNlst/README.md @@ -0,0 +1,103 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Creating DICOM-compatible cancer annotations for NLST +category: DICOM + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital + country: USA + +- name: Suraj Pai + affiliation: Brigham and Women's Hospital + country: USA + +- name: Leonard Nürnberg + affiliation: Brigham and Women's Hospital + country: USA + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +- name: David Clunie + affiliation: PixelMed Publishing + country: USA +--- + +# Project Description + + + + +The National Lung Screening Trial (NLST) is one of the largest lung cancer collections, with over 25K patients. In Imaging Data Commons (IDC), we have segmentations of anatomical regions using the TotalSegmentator model, but, we are missing any annotations of cancer. + +There were several initiatives to add cancer nodule annotations to NLST data in IDC. One set of nodule segmentations was created from an AI model from [this initiative]([https://zenodo.org/records/10081112](https://zenodo.org/records/10081112)), but only a percentage of them have been verified by an expert. + +However, there is one initiative from MIT ([https://github.com/reginabarzilaygroup/Sybil](https://github.com/reginabarzilaygroup/Sybil)) that had experts annotate center points and bounding boxes for nodules in NLST patients. Our plan is to convert these json annotations to DICOM Structured Reports, which can then be ingested into IDC and displayed. + + + +## Objective + + + + +We will first convert the json point annotations to DICOM Structured Reports. Then we will ingest them into a DICOM datastore, and deploy our own OHIF application to display the points overlaid on the image data. + + + + +## Approach and Plan + + + + +1. We will understand the format of the json files by plotting them in Slicer. +2. We will create a DICOM SR for a patient, starting with one point annotation per patient. +3. We will store these DICOM SR objets in a DICOM data store. +4. We will deploy OHIF, and display our point annotations along with the image. +5. If that works, we will add the ability for the DICOM SR to store multiple annotations. + + + +## Progress and Next Steps + + + + +1. We have started to understand the format of the json files by plotting them in Slicer. + +![image](https://github.com/user-attachments/assets/2bd2dc91-378b-42fa-bc0b-47e5b8051064) + +2. We successfully created a DICOM Structured Report for holding the cancer annotation. +3. Later we will create bounding box DICOM Structured Reports. + + +# Illustrations + + + + + + +# Background and References + + + +[Current Google Colab notebook](https://colab.research.google.com/drive/1E_LkCbCqhJTLJ__TPMjNt7bx7tyL-cyw?usp=sharing) + +Resources from David Clunie: +1. [Understanding ImagePatientPosition and ImageOrientation](https://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.2.html) +2. [DICOM SR for understanding planar annotations](https://www.researchgate.net/publication/353243563_DICOM_SR_for_communicating_planar_annotations_-_An_Imaging_Data_Commons_IDC_White_Paper) +3. [OHIF github issues](https://github.com/OHIF/Viewers/issues/1215) diff --git a/PW42_2025_GranCanaria/Projects/CreatingLinuxDistroAgnosticBinariesForPlastimatch/README.md b/PW42_2025_GranCanaria/Projects/CreatingLinuxDistroAgnosticBinariesForPlastimatch/README.md new file mode 100644 index 000000000..0f422eed3 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/CreatingLinuxDistroAgnosticBinariesForPlastimatch/README.md @@ -0,0 +1,87 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Creating Linux distro-agnostic binaries for Plastimatch +category: Infrastructure + +key_investigators: + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital, Harvard Medical Schools + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware Inc. + country: USA + +--- + +# Project Description + + + + +Plastimatch is an open-source software for medical image processing and registration. +It is written in C++ and can be built from source in Linux and Windows. +Binary packages are available for Windows, Debian (plus its derivatives), and Arch Linux users. +No Linux distro-agnostic binaries are currently available. +During this week we will explore the possibility of creating a binary version of plastimatch easily portable/deployed in different Linux distros. + + + +## Objective + + + + +1. Create a Plastimatch distro-agnostic binary package + + + +## Approach and Plan + + + + +1. Prepare a fresh build environment +2. Compile Plastimatch from source +3. Figure out how to bundle/embed the required libraries + + + +## Progress and Next Steps + + + + +1. A bash script to compile Plastimatch and its dependencies has been written +2. We decided to use ManyLinux 2.28 docker container as a building environment +3. We found out the Plastimatch file probe fails to identify the dicomRT (this happens only when we move the binary) +4. We found out it was needed to set -DDCMTK_DEFAULT_DICT=builtin -DDCMTK_ENABLE_PRIVATE_TAGS=ON during DCMTK configuration step + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- [https://gitlab.com/plastimatch/plastimatch](https://gitlab.com/plastimatch/plastimatch) diff --git a/PW42_2025_GranCanaria/Projects/CrowdVsModelGeneratedSBOSegmentation/README.md b/PW42_2025_GranCanaria/Projects/CrowdVsModelGeneratedSBOSegmentation/README.md new file mode 100644 index 000000000..6912e058d --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/CrowdVsModelGeneratedSBOSegmentation/README.md @@ -0,0 +1,90 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Comparison of crowd sourced vs. model generated accuracy on abdominal ultrasound +category: Segmentation / Classification / Landmarking + +key_investigators: +- name: Jacqueline Foody + affiliation: Centaur Labs/MGB + country: USA + +- name: Hallee Wong + affiliation: MIT + country: USA + +- name: Mike Jin + affiliation: Centaur Labs/MGB + country: USA + +- name: Tina Kapur + affiliation: MGB + country: USA +--- + +# Project Description + +Segmenting small bowel from abdominal ultrasound images is a challenging task, even for highly trained physicians. However, it may be a powerful way to diagnose small bowel obstruction. We employed the Centaur AI platform to leverage a crowd of labelers, by training them on a dataset of labels generated by a consensus expert physicians. For this project, we wanted to explore whether a model given the context of this specific task in the form of a few segmented frames can perform well. + +## Objective + +1. Objective A. Implement [MultiverSeg](https://multiverseg.csail.mit.edu/) for predictions on abdominal ultrasound images. +2. Objective B. Evaluate the accuracy of the model for generating segmentations relative to the crowd consensus by comparing the resulting bowel diameters. + +## Approach and Plan + +1. Set up MultiverSeg: [https://github.com/halleewong/MultiverSeg](https://github.com/halleewong/MultiverSeg) +2. Evaluate how the model performs using an increasing number of context frames from the same patient, and separately from different patients +4. Similarly, add in user input in the form of positive and negative clicks with the context frames. +5. Compare the performance of these methods by evaluating the resulting bowel diameter. + +## Progress and Next Steps + +1. 2-3 frames from the same patient clip were sufficient context to achieve consistent results, and adding more didn't appear to improve the results +2. Tested up to 30 context frames from a set of 10 randomly selected patients. While there was some improvement in adding >15 context frames, the model struggled to identify the bowel in new patients. +### Given 20 context frames from a set of 10 randomly selected patient clips +Prediction: 262.968 +Ground truth: 338.373 +ICC(2,1): -0.296 +95% CI: (-0.590, 0.071) +image +### Given 20 context frames from a set of 10 randomly selected patient clips, with 2 positive & 2 negative support points +Prediction: 280.856 +Ground truth: 338.373 +ICC(2,1): -0.192 +95% CI: (-0.514, 0.180) +image +### Given 2 context frames from the same clip +Prediction: 313.465 +Ground truth: 338.373 +ICC(2,1): 0.748 +95% CI: (0.524, 0.876) +image +### Given 2 context frames from the same clip, with 2 positive & 2 negative support points +Prediction: 305.765 +Ground truth: 338.373 +ICC(2,1): 0.784 +95% CI: (0.584, 0.895) +image + +# Illustrations +Example of Crowd Segmentations:
+![crowd_example](https://github.com/user-attachments/assets/77cbd081-2fe6-4bef-906a-71ff70407eaa) +
+Example of Expert Segmentations Demonstrating Bowel Diameter:
+![expert_example](https://github.com/user-attachments/assets/ef8bba58-7080-40f2-ac9b-8874f8cc6418) + +# Background and References + +- [https://github.com/halleewong/MultiverSeg](https://github.com/halleewong/MultiverSeg) +- [https://github.com/halleewong/ScribblePrompt](https://github.com/halleewong/ScribblePrompt) + +# Relevant Publications: + +Wong, H.E., Ortiz, J.J.G., Guttag, J. & Dalca, A.V., (2024). MultiverSeg: Scalable Interactive Segmentation of Biomedical Imaging Datasets with In-Context Guidance. arXiv preprint arXiv:2412.15058. +[paper](https://arxiv.org/abs/2412.15058) [code](https://github.com/halleewong/MultiverSeg) + +Wong, H.E., Rakic, M., Guttag, J., & Dalca, A.V., (2024). ScribblePrompt: Fast and Flexible Interactive Segmentation for Any Biomedical Image. In European Conference on Computer Vision. +[paper](https://arxiv.org/abs/2312.07381) [code](https://github.com/halleewong/ScribblePrompt) diff --git a/PW42_2025_GranCanaria/Projects/Cursed/README.md b/PW42_2025_GranCanaria/Projects/Cursed/README.md new file mode 100644 index 000000000..1f16b1b40 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/Cursed/README.md @@ -0,0 +1,107 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: DICOM WSI Interoperability with Kaapana +category: DICOM + +key_investigators: + +- name: Maximilian Fischer + affiliation: German Cancer Research Center + country: Heidelberg + +- name: Marco Nolden + affiliation: German Cancer Research Center + country: Heidelberg + +- name: Klaus Maier-Hein + + affiliation: German Cancer Research Center + country: Heidelberg + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: David Clunie + affiliation: Pixelmed Publishing + country: USA + +--- + +# Project Description + + + + +Kaapana is an open-source research platform for radiological imaging, featuring a range of infrastructure components. While some of these components natively support DICOM WSI (Whole Slide Imaging), others do not, necessitating the integration of alternative solutions to enable the processing of DICOM WSI data within the platform. The currently integrated components were selected based on their ability to provide fundamental functionality for DICOM WSI data processing, with a focus on being open-source and ensuring high interoperability. + +Since the initial integration, newer tools and technologies have been developed that may better meet Kaapana's requirements. The objective of this project is to reassess the existing components and evaluate whether additional or alternative components should be incorporated. This evaluation aims to enhance the overall processing capabilities for WSI data within Kaapana and further improve the interoperability of the system. + +Some components, such as the integrated PACS (dcm4che), are predefined as they form the foundation of the entire platform. Other components, such as the SlimViewer, were integrated on top because it was one of the first viewers capable of connecting to a DICOMweb PACS system. + +Goal of this project is to evaluate all exchangeable components to ensure optimal support of DICOM WSI and interoperability with other systems (e.g. for generated analysis results of WSI within the platform). + + + +## Objective + + + + +1. Identification of available alternatives to visualize DICOM WSI files +2. Test integration possibilities in Kaapana +3. Document results + + + + +## Approach and Plan + + + + +1. Bind other viewing components like QuPath to the data storage in Kaapana +2. Run dl-based analysis of DICOM WSI data with different reading libraries (openslide vs. wsidicom) +3. Visualize exported results (e.g. heatmaps as StructuredReport) with external DICOM WSI viewers. + + + +## Progress and Next Steps + + + +1. Setup of public Kaapana server in preparation of the DICOM conectathon at: 172.16.17.47 +2. Some progress in containerizing QuPath within Kaapana to serve as viewer from Objectstorage (NOT from PACS) +3. Evaluating [https://github.com/huangch/openremoteslide](https://github.com/huangch/openremoteslide) as additional extension for communication with external server +4. Testing some interoperability components for Zeiss WSI files. + + + + +# Illustrations + +Screenshot 2025-01-31 at 11 02 27 + + + +_No response_ + + + +# Background and References + +* Conversion routines for SR, ANN and fractional SEG developed by Chris Bridge / IDC: [https://github.com/ImagingDataCommons/idc-sm-annotations-conversion/tree/main](https://github.com/ImagingDataCommons/idc-sm-annotations-conversion/tree/main) for the dataset available in [https://zenodo.org/records/14041167](https://zenodo.org/records/14041167) +* Tutorials created by Daniela in IDC for ANN and SR: [https://github.com/ImagingDataCommons/IDC-Tutorials/tree/master/notebooks/pathomics](https://github.com/ImagingDataCommons/IDC-Tutorials/tree/master/notebooks/pathomics) (ANN) and [https://github.com/ImagingDataCommons/IDC-Tutorials/tree/master/notebooks/collections_demos/rms_mutation_prediction](https://github.com/ImagingDataCommons/IDC-Tutorials/tree/master/notebooks/collections_demos/rms_mutation_prediction) (SR) +* Support of external PACS (eg, Googe Healthcare DICOM stores) in Kaapana: [https://codebase.helmholtz.cloud/kaapana/kaapana/-/merge_requests/415](https://codebase.helmholtz.cloud/kaapana/kaapana/-/merge_requests/415), [https://codebase.helmholtz.cloud/kaapana/kaapana/-/issues/1191](https://codebase.helmholtz.cloud/kaapana/kaapana/-/issues/1191) + + + +_No response_ diff --git a/PW42_2025_GranCanaria/Projects/DecaDenseCorrespondenceAnalysisToolkitForShapeAnalysis/README.md b/PW42_2025_GranCanaria/Projects/DecaDenseCorrespondenceAnalysisToolkitForShapeAnalysis/README.md new file mode 100644 index 000000000..7529323c7 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/DecaDenseCorrespondenceAnalysisToolkitForShapeAnalysis/README.md @@ -0,0 +1,97 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'DeCA: Dense Correspondence Analysis Toolkit for Shape Analysis' +category: Quantification and Computation + +key_investigators: + +- name: Sara Rolfe + affiliation: Seattle Children's + country: USA + +- name: Murat Maga + affiliation: Seattle Children's + country: USA + +--- + +# Project Description + + + + +DeCA (Dense Correspondence Analysis) is an open-source tool for biologists and other researchers using 3D imaging. DeCA integrates biological insights in the form of homologous landmark points with dense surface registration to provide highly detailed shape analysis of smooth and complex structures that are typically challenging to analyze with sparse manual landmarks alone. + +Currently, DeCA exists as a prototype that can be run within 3D Slicer. We have collected preliminary feedback from initial users to improve the interface and workflow. The goal of this project is make and test these updates and publish DeCA as an extension. + + + +## Objective + + + + +1. Objective A. Implement an improved workflow for the DeCA module +2. Objective B. Publish DeCA extension + + + +## Approach and Plan + + + + +1. Update the DeCA interface to simplify running analysis +3. Demo/test improvements and collect feedback +4. Document workflow +5. Publish extension + + + + +## Progress and Next Steps + + + + +1. Document changes from initial user testing +2. Meetings to discuss workflow/interface and future applications to new datasets +3. Completion of user interface changes with two fully automated workflows +4. Updates to GUI support and logic functions to support new workflow +5. Landmark subsetting function added to DeCAL +6. Testing of DeCAL dense landmarking + + + + +# Illustrations + + + + +![Image](https://github.com/user-attachments/assets/195e437a-abb5-49e1-bd8d-59ed6a00535a) + +DeCA prototype + +DeCA new workflow interface +DeCAL new workflow interface + + + + +# Background and References + + + + +Source: [https://github.com/smrolfe/DeCA](https://github.com/smrolfe/DeCA) + +Publications: +- Rolfe, S. M., and A. Murat Maga. "DeCA: A Dense Correspondence Analysis Toolkit for Shape Analysis." International Workshop on Shape in Medical Imaging. Cham: Springer Nature Switzerland, 2023. + +- Rolfe, S. M., Mao, D., & Maga, A. M. (2024). Streamlining Asymmetry Quantification in Fetal Mouse Imaging: A Semi-Automated Pipeline Supported by Expert Guidance. bioRxiv, 2024-10. diff --git a/PW42_2025_GranCanaria/Projects/DeployingOvsegInSlicer/README.md b/PW42_2025_GranCanaria/Projects/DeployingOvsegInSlicer/README.md new file mode 100644 index 000000000..9f007c9bd --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/DeployingOvsegInSlicer/README.md @@ -0,0 +1,84 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Deploying OvSeg in Slicer +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Thomas Buddenkotte + affiliation: University Medical Center HamburgEppendorf + country: Germany + +--- + +# Project Description + + + + +[OvSeg](https://github.com/ThomasBudd/ovseg/) is a deep learning-based library for the segmentation of high-grade serous ovarian cancer on CT images. Right now, to obtain the segmentations the user has to write some lines of Python code, making the tool not directly usable by non-technical people. It would be great to expose this algorithm in Slicer to be used in a codeless manner. + + + +## Objective + + + + +1. Expose OvSeg in 3D Slicer in order to provide a scalar volume as input and obtain the segmentations as a segmentation node + + + + +## Approach and Plan + + + + +1. Create a Slicer extension +2. Let the extension to install OvSeg via pip +3. Let the extension pull the CT volume, run the inference, and push back the segmentations + + + + +## Progress and Next Steps + + + + +1. Ovseg can be successfully used in Slicer +2. The code can be downloaded from [this GitHub repository](https://github.com/pzaffino/SlicerOvseg) + +3. We would like to include this extension in the Slicer Extension repository +4. We need a logo +5. We could decide to expose the extended ovseg version which provides more segmentations classes + + + + +# Illustrations + + + + +![Image](https://raw.githubusercontent.com/NA-MIC/ProjectWeek/refs/heads/master/PW42_2025_GranCanaria/Projects/DeployingOvsegInSlicer/SlicerOvseg_screenshot.png) + + +# Background and References + + + + +1. [https://github.com/ThomasBudd/ovseg/](https://github.com/ThomasBudd/ovseg/) +2. [https://www.repository.cam.ac.uk/items/d7d9011c-2518-4a7a-8b85-01b086d672fc](https://www.repository.cam.ac.uk/items/d7d9011c-2518-4a7a-8b85-01b086d672fc) +3. [https://eurradiolexp.springeropen.com/articles/10.1186/s41747-023-00388-z](https://eurradiolexp.springeropen.com/articles/10.1186/s41747-023-00388-z) diff --git a/PW42_2025_GranCanaria/Projects/DeployingOvsegInSlicer/SlicerOvseg_screenshot.png b/PW42_2025_GranCanaria/Projects/DeployingOvsegInSlicer/SlicerOvseg_screenshot.png new file mode 100644 index 000000000..5405ac788 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/DeployingOvsegInSlicer/SlicerOvseg_screenshot.png differ diff --git a/PW42_2025_GranCanaria/Projects/DeployingScribblepromptAndMultiversegForInteractiveSegmentationAsA3DSlicerExtension/README.md b/PW42_2025_GranCanaria/Projects/DeployingScribblepromptAndMultiversegForInteractiveSegmentationAsA3DSlicerExtension/README.md new file mode 100644 index 000000000..dbe68248a --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/DeployingScribblepromptAndMultiversegForInteractiveSegmentationAsA3DSlicerExtension/README.md @@ -0,0 +1,99 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Deploying ScribblePrompt and MultiverSeg for interactive segmentation as a 3D Slicer + extension +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Hallee Wong + affiliation: MIT + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +--- + +# Project Description + + + + +We will develop a 3D slicer extension to deploy two interactive segmentation models aimed at helping researchers and clinicians perform new segmentation tasks: + +[ScribblePrompt](https://scribbleprompt.csail.mit.edu/) is a deep learning model that enables users to interactively segment an image using scribbles, clicks, and bounding boxes. The model is designed to generalize to new labels and types of biomedical images and uses a lightweight UNet-based architecture so it runs quickly even without a GPU. + +[MultiverSeg](https://multiverseg.csail.mit.edu/) extends this interactive approach to speed up the segmentation of sets of similar images. Using the same interaction types as ScribblePrompt (scribbles, clicks, bounding boxes), the system learns from each segmentation to improve subsequent predictions. Given enough similar example segmentations, MultiverSeg can also automatically segment new images without any user interaction. + + + +## Objective + + + + +1. Implement a 3D slicer extension for interactive segmentation with ScribblePrompt using scribbles, clicks, and bounding boxes +2. Add MultiverSeg to the extension to enable interactive and automatic segmentation of sets of images (or slices from 3D volumes) +3. Compare to other interactive segmentation tools + + + + +## Approach and Plan + + + + +1. We will start by following the [tutorial](https://training.slicer.org/) for developing a 3D slicer extension + + +## Progress and Next Steps + + + + +1. Adapted the [SlicerSegmentWithSAM](https://github.com/mazurowski-lab/SlicerSegmentWithSAM) extension to use ScribblePrompt +2. Added support for storing and retrieving the previous prediction for each slice to use as input when updating the segmentation +3. Added support for positive and negative scribble inputs + +Next Steps: +1. Add support for bounding box inputs +2. Test and debug MultiverSeg predictions for slice-by-slice segmentation of volumes +3. Refactor the extension to integrate into SegmentEditor instead of being a standalone module + +# Illustrations + + + + + +# Background and References + + + +Relevant Publications: + +Wong, H.E., Rakic, M., Guttag, J., & Dalca, A.V., (2024). ScribblePrompt: Fast and Flexible Interactive Segmentation for Any Biomedical Image. In European Conference on Computer Vision. +[paper](https://arxiv.org/abs/2312.07381) [code](https://github.com/halleewong/ScribblePrompt) + +Wong, H.E., Ortiz, J.J.G., Guttag, J. & Dalca, A.V., (2024). MultiverSeg: Scalable Interactive Segmentation of Biomedical Imaging Datasets with In-Context Guidance. arXiv preprint arXiv:2412.15058. +[paper](https://arxiv.org/abs/2412.15058) [code](https://github.com/halleewong/MultiverSeg) + +Related 3D Slicer extensions: +- [MonaiLabel](https://github.com/Project-MONAI/MONAILabel) +- [MedSAM](https://github.com/bowang-lab/MedSAMSlicer) +- [FastSAM3D](https://github.com/arcadelab/FastSAM3D_slicer) +- [SAMM](https://github.com/bingogome/samm) +- [TomoSAM](https://github.com/fedesemeraro/SlicerTomoSAM) +- [SlicerSegmentWithSAM](https://github.com/mazurowski-lab/SlicerSegmentWithSAM) diff --git a/PW42_2025_GranCanaria/Projects/DeterminationOfSurgicalClassBasedOnTheCurvatureAndShapeOfTheCarotidSyphon/README.md b/PW42_2025_GranCanaria/Projects/DeterminationOfSurgicalClassBasedOnTheCurvatureAndShapeOfTheCarotidSyphon/README.md new file mode 100644 index 000000000..47c3b9a2f --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/DeterminationOfSurgicalClassBasedOnTheCurvatureAndShapeOfTheCarotidSyphon/README.md @@ -0,0 +1,90 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Determination of surgical class based on the curvature and shape of the carotid syphon +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Attila Tanács + affiliation: University of Szeged + country: Hungary + +- name: Ferenc Dezső Bakó + affiliation: University of Szeged + country: Hungary + +- name: Attila Nagy + affiliation: University of Szeged + country: Hungary + +--- + +# Project Description + + + + +Stroke is a leading cause of death worldwide of which ischaemic stroke is the more common. Mechanical thrombectomy involves inserting a catheter into the cerebral vasculature to remove blood clot. Catheter devices with different parameters are available to perform the procedure of which the correct one must be selected beforehand to avoid blockage. Clinical experience suggests that large lumen aspiration catheters were most commonly stuck at the anterior curvature of the carotid syphon. + +In literature Waihrich et al. proposed to use 2D X-Ray to measure angles of vessel segments for classification. In our prevcious work we extended this approach to 3D measuring the minimal angle along the centerline of the carotid syphon. Based on 49 studies classified manually into 5 surgical classes by a medical expert, it turned out that this angle alone is not a good indicator of surgery class. + + + +## Objective + + + + +1. Objective A. Determine the vessel cross section area/diamater at given vessel centerline points. +2. Objective B. Figure out what other numerical parameters could be extracted for classification. +3. Obecjtive C. What methods could be used for classification taking into account the low number of studies. + + + +## Approach and Plan + + + + +1. We'd like to extend our Slicer extension module to produce the numbers for classification. + + + +## Progress and Next Steps + + + + +- Generating vessel cross-section tables for all studies using VMTK. +- Sampling vessel cross-section values at 9 given centerline points +around the middle point. +- 9 centerline points determine 8 line sections that determine 7 angles. +- Table is generated for all 49 studies containing 9 + 8 features. +- Next step is to use this values for classification (possibly using SVM +and fully connected shallow neural network). + + + + +# Illustrations + + + + +Vessel parts of the carotid syphon. + +Image + + + +# Background and References + + + + +_No response_ diff --git a/PW42_2025_GranCanaria/Projects/DevelopmentOfA3DSlicerExtensionToSupportSamsungMedicalCenterProtocolBasedPreoperativePlanningForLiverTransplantationAndLiverCancerSurgery/README.md b/PW42_2025_GranCanaria/Projects/DevelopmentOfA3DSlicerExtensionToSupportSamsungMedicalCenterProtocolBasedPreoperativePlanningForLiverTransplantationAndLiverCancerSurgery/README.md new file mode 100644 index 000000000..a8da852b6 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/DevelopmentOfA3DSlicerExtensionToSupportSamsungMedicalCenterProtocolBasedPreoperativePlanningForLiverTransplantationAndLiverCancerSurgery/README.md @@ -0,0 +1,131 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Development of a 3D Slicer Extension to Support Samsung Medical Center Protocol Based + PreOperative Planning for Liver Transplantation and Liver Cancer Surgery +category: Infrastructure + +key_investigators: + +- name: Soyoung Lim + affiliation: Samsung Medical Center + country: Republic of Korea + +- name: Hyejeong Hong + affiliation: Samsung Medical Center + country: Republic of Korea + +--- + +# Project Description + + + + +This extension will be developed for use at Samsung Medical Center to support their protocol-based pre-operative planning for liver transplantation and liver cancer surgery using 3D Slicer. The extension will generate accurate 3D patient models from medical imaging data and enable surgical planning and simulation. This will improve the efficiency of the 3D reconstruction process. + +A recent study (Rhu et al., 2021) found that using 2D illustrations and 3D modeling of donor anatomy during living donor hepatectomy improved image guidance for liver transplantation procedures at Samsung Medical Center. The medical center has established a protocol for 3D reconstruction to generate accurate patient-specific 3D models from medical imaging data, enabling enhanced pre-operative planning and simulation. + +Since the summer of 2023, the medical center's research team has been transitioning from MIMICS program to 3D Slicer as their primary segmentation tool. To streamline the repetitive tasks, they have been working to automate the workflow using custom scripts starting in early 2024. The goal of this project is to develop a specialized 3D Slicer extension that aligns with the established institutional protocols and automates the 3D modeling process. + + + +## Objective + + + + +1. The goal is to develop 5–6 of the most frequently used features from the custom script currently connected to the slicer.rc file into a Slicer module, making it easier to maintain. This will help automate repetitive tasks in the data preparation process for long-term research. Planned features include tools for optimizing liver transplantation surgery segmentation, liver cancer segmentation, displaying dice score comparison tables and liver segment volume tables related to AI research, and exporting segmentation masks as NIfTI files for AI training. + +2. Given that Slicer is a visualization tool, an additional goal is to implement features for customizing 3D view rendering materials and lighting settings. As someone with a background in design and medical illustration who frequently uses Blender 3D, I’ve always thought having a module for adjusting rendering options in 3D Slicer would be beneficial. This idea could be extended by creating optimized rendering presets for different organs, making it easier to apply tailored visual settings. + +3. We also intend to improve the manual script currently used for volume and dice score tables. Enhancements will include allowing one-click copy-paste functionality and saving results directly as Excel files for easier review. + +**[Project Proposal PPT ->](https://docs.google.com/presentation/d/1kLeWb436ZpJCnbPZJxa0f1xVZAY0btyB/edit?usp=drive_link&ouid=117843046046586749971&rtpof=true&sd=true)** + + + +## Approach and Plan + + + + +1. We will start by following the [tutorial](https://docs.google.com/presentation/d/1JXIfs0rAM7DwZAho57Jqz14MRn2BIMrjB17Uj_7Yztc/edit#slide=id.g420896289_0251) for developing a 3D slicer extension. +2. Since we are researchers with a background in Medical Illustration rather than Computer Science, we plan to actively utilize Cursor.ai and Slicer documentation to develop our module during this hackathon. +3. We always welcome advice from other researchers who have experience developing Slicer modules! 😊 + + + + +## Progress and Next Steps + +Our workflow diagram for Liver Cancer Surgery 3D Images and AI data (before participating Project Week 2025) + +Image + + +Our workflow diagram for Living Donor Liver Transplantation Surgery 3D Images and AI data (before participating Project Week 2025) + +Image + + +[Day 1-2] Studied and studying tutorial documents on devleoping Slicer Extension. + Tested making modules with "Extension Wizard" +https://training.slicer.org/ +https://slicer.readthedocs.io/en/latest/developer_guide/index.html + + + +[Day 2-5] Began developing the Liver Volumetry module for LDLT, which calculates the Graft-to-Recipient Weight Ratio (GRWR) to assist in selecting the optimal liver donor among candidates. The GRWR is a key parameter in liver transplantation, ensuring that the donated liver graft is of adequate size to support the recipient’s metabolic needs. + +![Image](https://github.com/user-attachments/assets/7efe841f-37a5-4b55-bd95-83baec421e62) + +Studied and studying the [Bone Reconstruction Planner module from SlicerIGT](https://github.com/SlicerIGT/SlicerBoneReconstructionPlanner/blob/main/BoneReconstructionPlanner/BoneReconstructionPlanner.py#L423) for reference (thanks to Mauro I. Dominguez). + +Tasks: +Designing the UI using Qt Designer +Implementing signal/slot connections + + +Next Steps +1. Finalize module logic for LDLT donor volumetry and test. +2. Finalize module UI for Liver Cancer Surgery module, logic and test. +3. After making the module, upload it to Slicer extension market. ((https://github.com/Slicer/ExtensionsIndex/tree/main) + + + + + + + +# Illustrations + + + + +![Image](https://github.com/user-attachments/assets/76d91735-6e44-486a-84d2-0d2bb599a4cc) + +https://github.com/user-attachments/assets/0776a062-2128-4773-84de-8c3174e869ed + +![Image](https://github.com/user-attachments/assets/485e8a96-ff90-406a-a792-c2fc97408c78) + +![Image](https://github.com/user-attachments/assets/576e7253-d77e-4f9b-8449-01b229c8e3e8) + +![Image](https://github.com/user-attachments/assets/e5ef5fc4-5dd5-43d8-90d3-a04117170f58) + +![Image](https://github.com/user-attachments/assets/e4a1f9c8-f0d3-4642-ae44-1b8baacc4e6a) + + + +# Background and References + + + + +Rhu J, Choi GS, Kim MS, Kim JM, Joh JW. Image guidance using two-dimensional illustrations and three-dimensional modeling of donor anatomy during living donor hepatectomy. Clin Transplant. 2021 Jan;35(1):e14164. doi: 10.1111/ctr.14164. Epub 2020 Dec 12. PMID: 33222255. [https://pubmed.ncbi.nlm.nih.gov/33222255/](https://pubmed.ncbi.nlm.nih.gov/33222255/) + + +Oh, N., Kim, JH., Rhu, J. *et al.* Automated 3D liver segmentation from hepatobiliary phase MRI for enhanced preoperative planning. *Sci Rep* **13**, 17605 (2023). [https://doi.org/10.1038/s41598-023-44736-w](https://doi.org/10.1038/s41598-023-44736-w) + diff --git a/PW42_2025_GranCanaria/Projects/EvaluationOfImiBigpictureWsidicomizerAsAToolForConversionIntoDicomWholeSlideImagingFormat/README.md b/PW42_2025_GranCanaria/Projects/EvaluationOfImiBigpictureWsidicomizerAsAToolForConversionIntoDicomWholeSlideImagingFormat/README.md new file mode 100644 index 000000000..464990ec1 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/EvaluationOfImiBigpictureWsidicomizerAsAToolForConversionIntoDicomWholeSlideImagingFormat/README.md @@ -0,0 +1,139 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Evaluation of imi-bigpicture/wsidicomizer as a tool for conversion into DICOM whole + slide imaging format +category: DICOM + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +- name: David Clunie + affiliation: Pixelmed Publishing + country: USA + +- name: Max Fischer + affiliation: DKFZ + country: Germany + +--- + +# Project Description + + + + +DICOM standard is gaining acceptance in digital pathology imaging. Conversion of slide images into DICOM format can make the data more FAIR, improve quality and comprehensiveness of the associated metadata, and improve their interoperability with the commercial and open source tools implementing the standard. + +DICOM format is used for slide microscopy images available in NCI Imaging Data Commons (IDC). Images submitted to IDC in the vendor-specific formats must be converted into DICOM representation, which is currently done using the PixelMed Toolkit based scripts available in [https://github.com/ImagingDataCommons/idc-wsi-conversion](https://github.com/ImagingDataCommons/idc-wsi-conversion). + +Our goal is to migrate the DICOM WSI conversion to use community-supported open source tools. Based on our current assessment and experience, [imi-bigpicture/wsidicomizer](https://github.com/imi-bigpicture/wsidicomizer/) is the most promising tools available for this task. In this project we will work on evaluating this tool. + + + +## Objective + + + + +1. Assemble inventory of the publicly available test samples representative of the variety of data encountered by IDC and perhaps outside of IDC. +2. Document requirements for the conversion tool based on the needs of IDC. +3. Complete evaluation of `wsidicomizer` and document the results (in terms of the features and performance of the conversion process). +4. Document results and identified gaps to help with the next steps. + + + + +## Approach and Plan + + + + +1. Select representative source images in the original format and the results of conversion to DICOM available in IDC (as converted using PixelMed Toolkit), as a reference. Assemble information about the characteristics of those samples in a document (vendor, compression, ...). Include the accompanying tabulated metadata that is needed for converting each particulare sample. +2. Requirements: initialization of metadata, standard compliance of the result, transfer of ICC profile, acceptable performance .... (intentionally, DICOM-TIFF dual personality at this point is not a requirement) +3. Create a publicly available script/notebook that performs conversion. +4. Evaluate the results and summarize in a publicly available document. +5. Document any identified problems by opening issues in the `wsidicomizer` repo. + + + + +## Progress and Next Steps + +1. Set up conversion code in python (simple), confirmed conversion approach is consistent between what we use in IDC and what Max is using in Kaapana (`wsidicomizer` Python function - not command line tool). +2. Prepared queries for selecting test images from IDC. Mapping to the source file in vendor format is stored in a private tag `(0009,1001)` (source non-DICOM files are in private buckets in IDC). +3. Identified problems in selecting samples based on `TransferSyntaxUID` - did not realize initially it can vary across instances within the same series! +4. Identified numerous very strange images in IDC - will need to investigate this further. +5. Started testing `wsidicomizer`, tested with JPEG and uncompressed samples. +6. Identified and reported converter issues, several of which have already been resolved (kudos to Erik Gabrielsson, `wsidicomizer` maintainer!): + * https://github.com/imi-bigpicture/wsidicomizer/issues/117 + * https://github.com/imi-bigpicture/wsidicomizer/issues/118 + * https://github.com/imi-bigpicture/wsidicomizer/issues/123 +6. Discussed various issues related to conversion and shared experience; reached agreement `wsidicomizer` is the best choice given combined experience, and very good support from Erik. +7. Identified issues in `dicom3tools` building it in Colab VM - fixed by David Clunie ([notebook](https://colab.research.google.com/drive/17Ce67NY3dhoIPKLBdgouSIESwgoRG0P_?usp=sharing)). + +Query for selecting samples from IDC based on `TransferSyntaxUID` applied to the base layer of the image pyramid: + +```sql +WITH + RankedRows AS ( + SELECT + SeriesInstanceUID, + StudyInstanceUID, + TotalPixelMatrixColumns*TotalPixelMatrixRows AS totalPixels, + TransferSyntaxUID, + ROW_NUMBER() OVER (PARTITION BY SeriesInstanceUID ORDER BY TotalPixelMatrixColumns*TotalPixelMatrixRows DESC) AS rn + FROM + `bigquery-public-data.idc_current.dicom_all` + WHERE + Modality = "SM" and collection_id not like "%htan%") +SELECT + TransferSyntaxUID, + StudyInstanceUID, + SeriesInstanceUID, + totalPixels, + concat("https://viewer.imaging.datacommons.cancer.gov/slim/studies/",StudyInstanceUID,"/series/",SeriesInstanceUID) +FROM + RankedRows +WHERE + rn = 1 + # Explicit VR Little Endian + AND TransferSyntaxUID = "1.2.840.10008.1.2.1" + +ORDER BY + totalPixels ASC +``` + + +# Illustrations + + +![](overview_label.png) + + +# Background and References + + + + +Background reading: +* Herrmann, M. D., Clunie, D. A., Fedorov, A., Doyle, S. W., Pieper, S., Klepeis, V., Le, L. P., Mutter, G. L., Milstone, D. S., Schultz, T. J., Kikinis, R., Kotecha, G. K., Hwang, D. H., Andriole, K. P., John Lafrate, A., Brink, J. A., Boland, G. W., Dreyer, K. J., Michalski, M., Golden, J. A., Louis, D. N. & Lennerz, J. K. Implementing the DICOM standard for digital pathology. J. Pathol. Inform. 9, 37 (2018). [http://dx.doi.org/10.4103/jpi.jpi_42_18](http://dx.doi.org/10.4103/jpi.jpi_42_18) +* Clunie, D. A. DICOM format and protocol standardization-A core requirement for digital pathology success. Toxicol. Pathol. 49, 738–749 (2021). [http://dx.doi.org/10.1177/0192623320965893](http://dx.doi.org/10.1177/0192623320965893) + +Other related materials: +* IDC Portal: [https://portal.imaging.datacommons.cancer.gov/](https://portal.imaging.datacommons.cancer.gov/) +* [conversion_mirax_dicom](https://github.com/ImagingDataCommons/conversion_mirax_dicom/blob/main/add_metadata.py) (currently, private repo) +* Related earlier project from PW40: [https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/WsiDicomImprovementFromViewerToAnalysis/](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/WsiDicomImprovementFromViewerToAnalysis/) +* Code from Fabian Hörst experiments: [https://github.com/TIO-IKIM/PathoPatcher/blob/main/pathopatch/patch_extraction/patch_extraction.py](https://github.com/TIO-IKIM/PathoPatcher/blob/main/pathopatch/patch_extraction/patch_extraction.py) +* Test samples used by wsidicomizer: [https://github.com/imi-bigpicture/wsidicomizer/blob/main/tests/download_test_images.py#L23-L60](https://github.com/imi-bigpicture/wsidicomizer/blob/main/tests/download_test_images.py#L23-L60) +* [Google WSI transformation pipeline](https://github.com/GoogleCloudPlatform/medical-imaging/blob/main/pathology/transformation_pipeline/docs/digital_pathology_transformation_pipeline_to_dicom_spec.md) diff --git a/PW42_2025_GranCanaria/Projects/EvaluationOfImiBigpictureWsidicomizerAsAToolForConversionIntoDicomWholeSlideImagingFormat/overview_label.png b/PW42_2025_GranCanaria/Projects/EvaluationOfImiBigpictureWsidicomizerAsAToolForConversionIntoDicomWholeSlideImagingFormat/overview_label.png new file mode 100644 index 000000000..8deea78f8 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/EvaluationOfImiBigpictureWsidicomizerAsAToolForConversionIntoDicomWholeSlideImagingFormat/overview_label.png differ diff --git a/PW42_2025_GranCanaria/Projects/ExtendingRadiotherapyTreatmentPlanningCapabilitiesWithinSlicerrt/README.md b/PW42_2025_GranCanaria/Projects/ExtendingRadiotherapyTreatmentPlanningCapabilitiesWithinSlicerrt/README.md new file mode 100644 index 000000000..e497c543c --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ExtendingRadiotherapyTreatmentPlanningCapabilitiesWithinSlicerrt/README.md @@ -0,0 +1,97 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Extending Radiotherapy Treatment Planning Capabilities within SlicerRT +category: Quantification and Computation + +key_investigators: + +- name: Niklas Wahl + affiliation: DKFZ + country: Germany + +- name: Lina Bucher + affiliation: KIT/DKFZ + country: Germany + +- name: Francesca Spadea + affiliation: KIT + country: Germany + +- name: Csaba Pinter + affiliation: EBATINCA + country: Spain + +--- + +# Project Description + + + + +We will continue the extension of the treatment planning capabilities of SlicerRT by upgrading the corresponding user interface to better separate plan optimization and dose calculation. Algorithms will be interfaced from the open source treatment planning toolkit matRad via its new Python extension pyRadPlan. +Last year, we managed rudimentary treatment planning capabilities - this year, the goal is to allow full treatment planning on data loaded directly in Slicer, returning planned dose cubes for further analysis in Slicer. + + + + +## Objective + + + + +1. Photon & Ion Dose calculation engines available and configurable within SlicerRT ExternalBeamPlanning +2. SlicerRT ExternalBeamPlanning UI to handle plan optimization objectives defined in c++ and Python +3. Infrastructure for interfacing optimizers from Python and C++ +4. Interface to pyRadPlan objectives and optimizers + + + +## Approach and Plan + + + + +1. Update the existing rudimentary interface prototype for dose engines and optimization to the recent pyRadPlan version +2. Build an Optimization Objective Infrastructure derived from SlicerRT's way of handling python and C++ dose engines +3. Create a dedicated Objective view in the SlicerRT graphical user interface +4. Track potential compatibility conflicts and integrate them into the main pyRadPlan release + + + + +## Progress and Next Steps + +1. Updated to the latest version of pyRadPlan for Python native dose calculation and inverse planning +2. Extension of SlicerRT infrastructure to manage Plan Optimizers & Objectives +3. Add pyRadPlan Optimization Interface & Objective Interface +4. Extension of pyRadPlan/SlicerRT interface to handle multiple beams +5. First fully intensity-modulated photon and proton plans + +## Next steps +1. Performance improvement in data transfers +2. Improve GUI flexibility +3. PR & Code Review SlicerRT + +# Illustrations + + +## Extended ExternalBeamPlanning Interface with Optimization Settings +![Photons_5Beams_Plan](https://github.com/user-attachments/assets/25df44e6-0e2b-4972-9958-7305172993d3) + +## 5 beam photon plan with SlicerRT pyRadPlan interface: +![Photons_5Beams](https://github.com/user-attachments/assets/4bc2bd58-83ca-433c-8669-51a6937af3e2) + +## Dose Influence storage accessible from Python for Beam Nodes: +![Photons_5Beams_DVH](https://github.com/user-attachments/assets/8195eb7d-c6cc-4498-90a1-1f7a4439181e) + +# Background and References + + + + +- [https://pypi.org/project/pyRadPlan](https://pypi.org/project/pyRadPlan) +- [https://github.com/e0404/matRad](https://github.com/e0404/matRad) diff --git a/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/DarkThemeIconsIndex.html b/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/DarkThemeIconsIndex.html new file mode 100644 index 000000000..46f83f1cc --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/DarkThemeIconsIndex.html @@ -0,0 +1,7696 @@ + + + + Dark Theme Slicer Icons + + + +

3D Slicer Icon Design Guidelines & Dark Theme Icons Preview

+

+1. Design 3D Slicer icons as vector images on a transparent background. Reuse existing Slicer symbols for data and concepts, symbolic colors and other UI patterns where appropriate. +

+2. At 24x24 pixel resolution, stroke width should be 1dp = 1px for pixel-perfect rendering at resolution multiples of 24. Most of 3D Slicer's icons are designed at 200% scale, at 48x48 pixel resolution, with 1dp=2px. +

+3. Use simple stroked elements without fill where possible. Stroke caps and corners can be sharp or rounded with r = dp/2. +

+4. If filled elements are required, use limited color, preferably from Slicer's SimpleColorPalette, consistently across your UI, compatibly with 3D Slicer's application UI. Ensure the fill color works well in both Dark and Light themes. +

+5. Respect stroke, fill and background colors as defined and named for use in 3D Slicer's SimpleColorPalette for both Dark and Light Themes. +

+6. Use face-forward icons where possible and orthographic perspective with 45 degree angles where required. +

+7. Avoid gradients, shadows and other 3D effects. +

+8. Maintain a padding of 2dp around the icon perimeter when possible. +

+9. Preview Dark and Light Theme versions of your icons on Dark and Light Theme backgrounds, at multiple resolution to make sure they look great. +

+10. Ensure all hidden or unused vector elements from SVG files before finalizing work. +

+3DViewers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +3DViewersHideRuler.svg + + + + + + + + + + + + + + + + + + +3DViewersStartStereoscopic.svg + + + + + + + + + + +3DViewersViewFromSuperior.svg + + + + + + + + + + +3DViewersVisibility.svg + + + + + + + + + + + + + + + + + + +3DViewersShowRuler.svg + + + + + + + + + + + + + + + + + + + + + +3DViewersYaw.svg + + + + + + + + + + +3DViewersViewFromRight.svg + + + + + + + + + + + + +3DViewersUsePerspective.svg + + + + + + + + + + +3DViewersViewFromPosterior.svg + + + + + + + + + + + + + + + + + + + + +3DViewersStopStereoscopic.svg +
+ + + + + + + + + + + + F/S + + + + + + +3DViewersShowFPS.svg + + + + + + + + + + + + + + + + + + + + + +3DViewersSpin.svg + + + + + + + + + + + + + + + + + + + + + +3DViewersPitch.svg + + + + + + + + + + + + + + + + + + + +3DViewersRoll.svg + + + + + + + + + + + + + + + + + + + +3DViewersUnlinkAll3DViewers.svg + + + + + + + + + + +3DViewersViewFromInferior.svg + + + + + + + + + + + + + + + +3DViewersLinkAll3DViewers.svg + + + + + + + + + + + + + + +3DViewersOrientationMarker.svg + + + + + + + + + + + +3DViewersZoomIn.svg + + + + + + + + + + + + + + + + + + + +3DViewersHideFPS.svg +
+ + + + + + + + + +3DViewersZoomOut.svg + + + + + + + + + + + + + + + + + + + + + + + +3DViewersViewRecentered.svg + + + + + + + + + + +3DViewersViewFromLeft.svg + + + + + + + + + + + + + + + + + + + + +3DViewersRock.svg + + + + + + + + + + + + +3DViewersUseOrthographic.svg + + + + + + + + + + + + +3DViewersShadows.svg + + + + + + + + + + +3DViewersViewFromAnterior.svg + + + + + + + + + + + + + + + + + + + + + +3DViewersCentered.svg + + + + + + + + + +3DViewersMoreOptions.svg +
+

+

+ApplicationBasics/Arrows + + + + + + + + + + + +
+ + + + + + + +SlicerRightArrow.svg + + + + + + + + + +SlicerDoubleArrowLeft.svg + + + + + + + + + +SlicerDoubleArrowRight.svg + + + + + + + + +SlicerDownArrow.svg + + + + + + + + +SlicerUpArrow.svg + + + + + + + + + +SlicerDoubleArrowUp.svg + + + + + + + + +SlicerLeftArrow.svg + + + + + + + + + +SlicerDoubleArrowDown.svg +
+

+

+ApplicationBasics/Communications + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +SlicerInfoLeft.svg + + + + + + + + + +SlicerPending1-of-3.svg + + + + + + + + + + + +SlicerStarting.svg + + + + + + + + + +SlicerInfo.svg + + + + + + + + + + +SlicerHourglassBottomFull.svg + + + + + + + + + +SlicerWarning.svg + + + + + + + + + +SlicerComplete.svg + + + + + + + + + +SlicerError.svg + + + + + + + + + + +SlicerHome.svg + + + + + + + + + + +SlicerInfoRight.svg +
+ + + + + + + + +SlicerHelp.svg + + + + + + + + + + + + +SlicerComment.svg + + + + + + + + + + + + +SlicerHourglassWorking.svg + + + + + + + + + + + +SlicerPending3-of-3.svg + + + + + + + + + + +SlicerPending2-of-3.svg + + + + + + + + + + +SlicerEmptyHourglass.svg + + + + + + + + + + +SlicerHourglassTopFull.svg +
+

+

+ApplicationBasics/Configure + + + + + +
+ + + + + + + +SlicerConfigure.svg + + + + + + + + +SlicerExtensions.svg +
+

+

+ApplicationBasics/Layers + + + + + + + + + +
+ + + + + + + + + + +LowerLayer.svg + + + + + + + + + + +ShowLayersUI.svg + + + + + + + + + + + +RaiseLayer.svg + + + + + + + + + + + + +HideLayersUI.svg + + + + + + + + + + + + +DeleteLayer.svg + + + + + + + + + + + + +AddLayer.svg +
+

+

+ApplicationBasics/LogicalAndMathOps + + + + + + + +
+ + + + + + + +SlicerUnion.svg + + + + + + + + + +SlicerDifference.svg + + + + + + + + + + + + +SlicerIntersection.svg + + + + + + + + + + + + +SlicerExclusion.svg +
+

+

+ApplicationBasics/Plotting + + + + + + +
+ + + + + + + + + + + + + + +SlicerSegmentStatistics.svg + + + + + + + + + + + + + + + + + + + + +SlicerPlotSeries.svg + + + + + + + + + + + + + + + + + + +SlicerInteractivePlotting.svg +
+

+

+ApplicationBasics/SequencePlayer + + + + + + + + + + + + +
+ + + + + + + +SlicerRecordOrRecording.svg + + + + + + + + + +SlicerFirstOrSkipBackward.svg + + + + + + + + + + + +SlicerPause.svg + + + + + + + + + +SlicerLastOrSkipForward.svg + + + + + + + + +SlicerForward.svg + + + + + + + + + + + +SlicerLoop.svg + + + + + + + + +SlicerStoppedOrNotRecording.svg + + + + + + + + + +SlicerPlay.svg + + + + + + + + +SlicerBack.svg +
+

+

+ApplicationBasics/SubjectAndStudyRepresentation + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +MouseHierarchy.svg + + + + + + + + + + + + + + + + + + +BasicStudyHierarchy.svg + + + + + + + + + + + + + + + + + + + +CatHierarchy.svg + + + + + + + + + + + + + + + + + + + +PigHierarchy.svg + + + + + + + + + + +MouseSubject.svg + + + + + + + + + + + + + + +CatSubject.svg + + + + + + + + + + + + +MonkeySubject.svg + + + + + + + + + + + + +PigSubject.svg + + + + + + + + + + + + + + + + + + + + +MonkeyHierarchy.svg + + + + + + + + + + + + + + +TeddySubject.svg +
+ + + + + + + + + + + +HumanSubject.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerDataHierarchy.svg + + + + + + + + + + + + +BasicStudy.svg + + + + + + + + + + + + + + + + +RectangleHierarchy.svg + + + + + + + + + + + + + + + + + +HumanHierarchy.svg + + + + + + + + + + + + + + + + + +RatHierarchy.svg + + + + + + + + +RectangleSubject.svg + + + + + + + + + + +RatSubject.svg + + + + + + + + + + + + + + + + + + +TeddyHierarchy.svg +
+

+

+ApplicationBasics/TableOps + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +SlicerTableUnLockRowHeader.svg + + + + + + + + + + + + + + + + +SlicerTableMoveColumnRight.svg + + + + + + + + + + + + + + +SlicerTable.svg + + + + + + + + + + + + + + + + + +SlicerTableUnLockColumnHeader.svg + + + + + + + + + + + + + + + + +SlicerTableMoveColumnLeft.svg + + + + + + + + + + + + + + + + +SlicerTableMoveRowUp.svg + + + + + + + + + + + + + + + + + +SlicerTableDeleteColumn.svg + + + + + + + + + + + + + + + + +SlicerTableMoveRowDown.svg + + + + + + + + + + + + + + + + + +SlicerTableAddRow.svg + + + + + + + + + + + + + + + + + +SlicerTableLockColumnHeader.svg +
+ + + + + + + + + + + + + + + + +SlicerTableAddColumn.svg + + + + + + + + + + + + + + + + + + + +SlicerTableLockRowHeader.svg + + + + + + + + + + + + + + + + + +SlicerTableDeleteRow.svg +
+

+

+ApplicationBasics/Toggles + + + + + + + + + + + +
+ + + + + + + + + +SlicerToggleOffLeft.svg + + + + + + + + + + + +SlicerToggleCheckBox.svg + + + + + + + + + + +SlicerToggleOnRight.svg + + + + + + + + + + + +SlicerToggleRadioButton.svg + + + + + + + + + + + + + + + + + + + + +SlicerToggleCheckBoxAll.svg + + + + + + + + + + + + + + +SlicerToggleVisibility.svg + + + + + + + + + + + + + + +SlicerToggleLock.svg + + + + + + + + + + + + +SlicerToggleLink.svg +
+

+

+ApplicationBasics/Transforms + + + + + + + + +
+ + + + + + + + + + + + + +SlicerLinearTransform.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerRemoveTransform.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTransform.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerAddTransform.svg + + + + + + + + + + + + + + +SlicerDeformableTransform.svg +
+

+

+ApplicationBasics/UIActions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +SlicerLock.svg + + + + + + + + +SlicerUnCheckBox.svg + + + + + + + + + + + + + + + +SlicerLink.svg + + + + + + + + + + + +SlicerCheckBoxAll.svg + + + + + + + + + + + + + + + + +SlicerTrash.svg + + + + + + + + + +SlicerMoreOptionsHoriz.svg + + + + + + + + + +SlicerSearch.svg + + + + + + + + + +SlicerBlock.svg + + + + + + + + +SlicerMinus.svg + + + + + + + + + + +SlicerCancel.svg +
+ + + + + + + + + + + + + + + + + + +SlicerUnPin.svg + + + + + + + + + + + +SlicerReset.svg + + + + + + + + + +SlicerDone.svg + + + + + + + + + +SlicerRemove.svg + + + + + + + + + + + + + +SlicerRestore.svg + + + + + + + + + +SlicerUndo.svg + + + + + + + + + +SlicerMoreOptionsVert.svg + + + + + + + + + +SlicerSelectColor.svg + + + + + + + + + + +SlicerUnCheckBoxAll.svg + + + + + + + + + +SlicerAdd.svg +
+ + + + + + + + + +SlicerUnlock.svg + + + + + + + + + + + + + + + + + + + +SlicerPin.svg + + + + + + + + + + + + + + + + + + + +SlicerUnlink.svg + + + + + + + + + +SlicerCheckBox.svg + + + + + + + + + + + + + + + + + +SlicerUnHotLink.svg + + + + + + + + + + + + + + + +SlicerHotLink.svg + + + + + + + + + +SlicerRedo.svg +
+

+

+ApplicationBasics/Visibility + + + + + + +
+ + + + + + + + + + + + +SlicerInvisible.svg + + + + + + + + + + +SlicerVisible.svg + + + + + + + + + + + + + + + +SlicerSubsetVisibility.svg +
+

+

+Capture + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +SlicerSceneViewCaptureV2.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerSceneViewRestoreOrDeleteV2.svg + + + + + + + + + + + + + + + + + + + + +SlicerSceneViewCapture.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerSceneViewRestoreOrDelete.svg + + + + + + + + + + + + + + + + + + +SlicerCapture.svg +
+

+

+DataIO + + + + + + + + + + + + +
+ + + + + + + + + + +SlicerDownload.svg + + + + + + + + + + + + + + + + +DICOMIO.svg + + + + + + + + + +SlicerLoadData.svg + + + + + + + +SlicerLoadFromCloud.svg + + + + + + + + + + + + + + +SlicerDownloadBundle.svg + + + + + + + +SlicerSaveToCloud.svg + + + + + + + + + +SlicerSaveData.svg + + + + + + + + + + + + + +SlicerUpload.svg + + + + + + + + + + + + +SlicerDownloadExtension.svg +
+

+

+ModuleNavigation + + + + + + + + +
+ + + + + + + + + + + + + + + +ModuleHistory.svg + + + + + + + + + + + + + + + +ModulePrevious.svg + + + + + + + + + + + + + + + +ModuleNext.svg + + + + + + + + + + +ModuleSearch.svg + + + + + + + + + + + + + + + + + +SelectModule.svg +
+

+

+Modules/CoreToolbarModules + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +MarkupsModule.svg + + + + + + + + + + + + + + + + + + +TransformsModule.svg + + + + + + + + + + + + + + + + +SegmentEditorModule.svg + + + + + + + + + + + + + + + + + + + +WelcomeModule.svg + + + + + + + + + + + + + + + + +Annotations.svg + + + + + + + + + + + + + + + + + + + + + + + + +SegmentationsModule.svg + + + + + + + + + + + + + + + + + +DefaultModule.svg + + + + + + + + + + + + + + + + + + + +DataModule.svg + + + + + + + + + + + + + + + + + +ModelsModule.svg + + + + + 24dpSlicerIconTemplate + + + + 24dpSlicerIconTemplate + + + + + + + + + + + + + + + + + + + + + + + + +VolumesModule.svg +
+

+

+Modules/MarkupsModule + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +MarkupsAddLine.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddPointList.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddROI.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddClosedCurve.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddAngle.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddPlane.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddOpenCurve.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddPoint.svg +
+

+

+Modules/SegmentEditorModule + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +Margins.svg + + + + + + + + + + + + + + + +Show3D.svg + + + + + + + + + + + + +FastMarching.svg + + + + + + + + + + + + + + + + +Threshold.svg + + + + + + + + + + + + +Draw.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SplitVolume.svg + + + + + + + + + + + + +Paint.svg + + + + + + + + + + + + + + + + +SlicerTrash.svg + + + + + + + + + + + + + + + + + + +FillBetweenSlices.svg + + + + + + + + + + + + + +FloodFill.svg +
+ + + + + + + + + + + + + + + +Engrave.svg + + + + + + + + + + + + + +SegmentTubes.svg + + + + + + + + + + + + + + + + + + + + +LogicalOperations.svg + + + + + + + + + + + + + + + + + + + + + + +LocalThreshold.svg + + + + + + + + + + + + + + + + + + +Hide3D.svg + + + + + + + + + + +Hollow.svg + + + + + + + + + +SlicerRemove.svg + + + + + + + + + + + + + + + + + + +Erase.svg + + + + + + + + + + + + + + + + +GrowFromSeed.svg + + + + + + + + + + + + + + + + + + +SurfaceCut.svg +
+ + + + + + + + + + + + + + + +Watershed.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +GoToSegmentationsModule.svg + + + + + + + + + +SlicerUndo.svg + + + + + + + + + +Select.svg + + + + + + + + + +SlicerAdd.svg + + + + + + + + + + + + + + + + + + + + + + + +MaskVolume.svg + + + + + + + + + + +Smooth.svg + + + + + + + + + + + + + +LevelTrace.svg + + + + + + + + + + + + + + +Islands.svg + + + + + + + + + +SlicerRedo.svg +
+ + + + + + + + + + +ContentCut.svg +
+

+

+Modules/SlicerIHEviewer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +SlicerIHE-Pan.svg + + + + + + + + + + + +SlicerIHE-CineStepForward.svg + + + + + + + + + + +SlicerIHE-SelectViewport.svg + + + + + + + + + + + + + +SlicerIHE-LayoutWithinViewport.svg + + + + + + + + + + +SlicerIHE-CinePlay.svg + + + + + + + + ? + + + +SlicerIHE-Help.svg + + + + + + + + + +SlicerIHE-FlipHorizontal.svg + + + + + + + + + + + + + +SlicerIHE-Crosshair.svg + + + + + + + + + + +SlicerIHE-CineGoToStart.svg + + + + + + + + + +SlicerIHE-PreviousFrame.svg +
+ + + + + + + + + + + + + + + + + +SlicerIHE-UnLink.svg + + + + + + + + + + + + + + + +SlicerIHE-Rotate90Clockwise.svg + + + + + + + + + +SlicerIHE-NextFrame.svg + + + + + + + + + + + + +SlicerIHE-ZoomIn.svg + + + + + + + + + +SlicerIHE-PrevioustStudy.svg + + + + + + + + + + + + + + +SlicerIHE-LocalizerLines.svg + + + + + + + + + + + + + +SlicerIHE-SelectPatient.svg + + + + + + + + + + + +SlicerIHE-Scroll.svg + + + + + + + + + + + + + +SlicerIHE-Print.svg + + + + + + + + + + + +SlicerIHE-CineStepBackward.svg +
+ + + + + + + + + + + + + +SlicerIHE-DisplayReset.svg + + + + + + + + + + + +SlicerIHE-AdvancedMoreOptions.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerIHE-LayoutMultipleViewports.svg + + + + + + + + + + +SlicerIHE-CineGoToEnd.svg + + + + + + + + + + + + + + + + + + + + +SlicerIHE-RulerMeasure.svg + + + + + + + + + + + + + + + + +SlicerIHE-Link.svg + + + + + + + + + + +SlicerIHE-PreviousFrameSet.svg + + + + + + + + + + + + + + + + +SlicerIHE-ShowReport.svg + + + + + + + + + + + + + + + + + + + +SlicerIHE-CineTools.svg + + + + + + + + + + + + + + + + + + + + + +SlicerIHE-Annotation.svg +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerIHE-WindowLevelColor.svg + + + + + + + + + + +SlicerIHE-NextFrameSet.svg + + + + + + + + + + + + +SlicerIHE-InvertGreyscale.svg + + + + + + + + + +SlicerIHE-NextStudy.svg + + + + + + + + + +SlicerIHE-CineStop.svg + + + + + + + + + + + +SlicerIHE-ZoomOut.svg + + + + + + + + + + + + + + +SlicerIHE-AngleMeasure.svg +
+

+

+MouseModes + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +SlicerMarkupsMenuToggle.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerWindowLevel.svg + + + + + + + + + + + + + +SlicerMouseModeTransformAdjust.svg +
+

+

+SliceViewers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +BlendMode.svg + + + + + + + + + + + + + + + + + + +RulerDisplayOptions.svg + + + + + + + + + + + + + + + + + + + + + +RotateToVolumePlane.svg + + + + + + + + + + + + + + +OrientationMarker.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +LabelMapDisplayFill.svg + + + + + + + + + + + + + + + + + + + + + + +SlicePlanes-Sagittal.svg + + + + + + + + + + + + + + + + +LightboxView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +LabelMapDisplayOutline.svg + + + + + + + + + + + + + + + + + + + + + + + +SegmentationDisplayFillAndOutline.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +ResetFieldOfView.svg +
+ + + + + + + + + + + + + + + + + + + + + +SegmentationLayer.svg + + + + + + + + + + + +ToggleOFF-MaximizeView.svg + + + + + + + + + + + +ToggleOFF-ReformatWidget.svg + + + + + + + + + + + + + + + + + +ForegroundLayer.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +ThickSlabReconstruction.svg + + + + + + + + + + + + + + + + + + + +BackgroundLayer.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + +SliceSpacing.svg + + + + + + + + + + + + + + + + + + + + + +SegmentationDisplayOutline.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +ToggleON-Interpolation.svg + + + + + + + + + + + + + + + + + + + + + + + + + +LabelMapLayer.svg +
+ + + + + + + + + + + + + + + + + +SlicePlanes-Coronal.svg + + + + + + + + + + + + + +EachSegmentVisibility.svg + + + + + + + + + + + + + + +ToggleON-ReformatWidget.svg + + + + + + + + + +ToggleON-MaximizeView.svg + + + + + + + + + + + + + + + + + + + + + + + + +SegmentationDisplayFill.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +ToggleOFF-Interpolation.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +LabelMapDisplayFillAndOutline.svg + + + + + + + + + + + + + + + + + + + + + + +SlicePlanes-Axial.svg +
+

+

+SlicerCommonSymbols + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +SlicerInvisible.svg + + + + +SlicerDataBundle.svg + + + + + + + + + + + +SlicerUpPointingSelect.svg + + + + + + + +SlicerCloud.svg + + + + + + + + + + + +SlicerRightPointingSelect.svg + + + + + + + + + + + + + + + + + + +SlicerSimple3DView.svg + + + + + + + + + +SlicerSearch.svg + + + + + + + + + + +SlicerVisible.svg + + + + + + + + + +SlicerGrid.svg + + + + + + + + + + + + +SlicerDatabase.svg +
+ + + + + + + + + + + + + + + + + +Slicer3DView.svg + + + + + + + + + + + + +SlicerSliceView.svg + + + + + + + + + +SlicerSlashForNoNotOff.svg + + + + + + + +SlicerFolder.svg + + + + + + + + + +SlicerLeftPointingSelect.svg +
+

+

+SlicerDataSymbols + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +SlicerMarkupPoint.svg + + + + + + + + + + + + + + +SlicerTable.svg + + + + + + + + + +SlicerAIsparkles.svg + + + + + + + + + + + + + + +SlicerDataBundle.svg + + + + + + + + + + + + +SlicerDatabase.svg + + + + + 24dpSlicerIconTemplate + + + + 24dpSlicerIconTemplate + + + + + + + + + + + + + + + +SlicerSourceVolume.svg + + + + + + + + + + + + + + + + + + +SlicerModel.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerDataHierarchy.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTransform.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerSegmentation.svg +
+ + + + + + + + + + + + + + + + + + + + + + + + +SlicerScalarVolume.svg +
+

+

+SpatialProbes + + + + + +
+ + + + + + + + + + + + + +SlicerSlicePlanesOptions.svg + + + + + + + + + + + + +SlicerCrosshair.svg +
+

+

+ViewerConfiguration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +SlicerRedSliceOnlyView.svg + + + + + + + + + + + + +SlicerYellowSliceOnlyView.svg + + + + + + + + + + + + + + + + + + + + +SlicerGreenSliceLightbox.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerDual3DView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerCompareGridView.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpPlotView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerThreeOverThreePlotView.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourByTwoSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTabbed3DView.svg + + + + + + + + + + + + + + + + + + + + +SlicerRedSliceLightbox.svg +
+ + + + + + + + + + + + + + + + + + + +SlicerYellowSliceLightbox.svg + + + + + + + + + + + + +SlicerGreenSliceOnlyView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpQuantitativeView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourByThreeSliceView.svg + + + + + + + + + + + + + + + + +SlicerCompareWidescreenView.svg + + + + + + + + + + + + + + + + + + + + +SlicerConventionalWidescreenView.svg + + + + + + + + + + + + + + + + + +SlicerPlotOnlyView.svg + + + + + + + + + + + + + + + +SlicerTwoOverTwoSliceView.svg + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpTableView.svg + + + + + + + + + + + + + + + + + + + + +SlicerConventionalView.svg +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerConventionalPlotView.svg + + + + + + + + + + + + + + + + + + + +SlicerThreeOverThreeSliceView.svg + + + + + + + + + + + + + + + + + + +Slicer3DOnlyView.svg + + + + + + + + + + + + + + + + + + +SlicerSideBySideSliceView.svg + + + + + + + + + + + + + + + + + + + + +SlicerTabbedSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTriple3DView.svg + + + + + + + + + + + + + + + + + + + +SlicerFourUpView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerThreeByThreeSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourOverFourView.svg + + + + + + + + + + + + + + + + +SlicerCompareView.svg +
+ + + + + + + + + + + + + + + + + + + + + + +Slicer3DTableView.svg +
+

+

+ViewerConfigurationV2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +SlicerRedSliceOnlyView.svg + + + + + + + + + + + + +SlicerYellowSliceOnlyView.svg + + + + + + + + + + + + + + + + + + + + +SlicerGreenSliceLightbox.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerDual3DView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerCompareGridView.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpPlotView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerThreeOverThreePlotView.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourByTwoSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTabbed3DView.svg + + + + + + + + + + + + + + + + + + + + +SlicerRedSliceLightbox.svg +
+ + + + + + + + + + + + + + + + + + + +SlicerYellowSliceLightbox.svg + + + + + + + + + + + + +SlicerGreenSliceOnlyView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpQuantitativeView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourByThreeSliceView.svg + + + + + + + + + + + + + + + + +SlicerCompareWidescreenView.svg + + + + + + + + + + + + + + + + + + + + + +SlicerConventionalWidescreenView.svg + + + + + + + + + + + + + + + + + +SlicerPlotOnlyView.svg + + + + + + + + + + + + + + + +SlicerTwoOverTwoSliceView.svg + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpTableView.svg + + + + + + + + + + + + + + + + + + + + + +SlicerConventionalView.svg +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerConventionalPlotView.svg + + + + + + + + + + + + + + + + + + + +SlicerThreeOverThreeSliceView.svg + + + + + + + + + + + + + + + + + + +Slicer3DOnlyView.svg + + + + + + + + + + + + + + + + + + +SlicerSideBySideSliceView.svg + + + + + + + + + + + + + + + + + + + + +SlicerTabbedSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTriple3DView.svg + + + + + + + + + + + + + + + + + + + + +SlicerFourUpView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerThreeByThreeSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourOverFourView.svg + + + + + + + + + + + + + + + + +SlicerCompareView.svg +
+ + + + + + + + + + + + + + + + + + + + + + +Slicer3DTableView.svg +
+

+

+VisualizationAndDisplayOptions + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerHideModelScalarOverlay.svg + + + + + + + + + +SlicerShowDeformationGrid.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerShowVolumeScalarOverlay.svg + + + + + + + + + +SlicerShowGrid.svg + + + + + + + + + +SlicerHideDeformationGrid.svg + + + + + + + + + +SlicerHideSphereGlyphs.svg + + + + + + + + + +SlicerHideIsoContours.svg + + + + + + + + + +SlicerShowIsoContours.svg + + + + + + + + + +SlicerShowSphereGlyphs.svg + + + + + + + + + +SlicerShowConeGlyphs.svg +
+ + + + + + + + +SlicerHideArrowGlyphs.svg + + + + + + + + + +SlicerShowArrowGlyphs.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerHideVolumeScalarOverlay.svg + + + + + + + + + +SlicerShowIsoSurfaces.svg + + + + + + + + + +SlicerHideIsoSurfaces.svg + + + + + + + + + +SlicerHideConeGlyphs.svg + + + + + + + + + +SlicerSelectColor.svg + + + + + + + + + +SlicerHideGrid.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerShowModelScalarOverlay.svg +
+

+ + diff --git a/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/LightThemeIconsIndex.html b/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/LightThemeIconsIndex.html new file mode 100644 index 000000000..cfee39870 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/LightThemeIconsIndex.html @@ -0,0 +1,7960 @@ + + + + Light Theme Slicer Icons + + + + +

3D Slicer Icon Design Guidelines & Light Theme Icons Preview

+

+1. Design 3D Slicer icons as vector images on a transparent background. Reuse existing Slicer symbols for data and concepts, symbolic colors and other UI patterns where appropriate. +

+2. At 24x24 pixel resolution, stroke width should be 1dp = 1px for pixel-perfect rendering at resolution multiples of 24. Most of 3D Slicer's icons are designed at 200% scale, at 48x48 pixel resolution, with 1dp=2px. +

+3. Use simple stroked elements without fill where possible. Stroke caps and corners can be sharp or rounded with r = dp/2. +

+4. If filled elements are required, use limited color, preferably from Slicer's SimpleColorPalette, consistently across your UI, compatibly with 3D Slicer's application UI. Ensure the fill color works well in both Dark and Light themes. +

+5. Respect stroke, fill and background colors as defined and named for use in 3D Slicer's SimpleColorPalette for both Dark and Light Themes. +

+6. Use face-forward icons where possible and orthographic perspective with 45 degree angles where required. +

+7. Avoid gradients, shadows and other 3D effects. +

+8. Maintain a padding of 2dp around the icon perimeter when possible. +

+9. Preview Dark and Light Theme versions of your icons on Dark and Light Theme backgrounds, at multiple resolution to make sure they look great. +

+10. Ensure all hidden or unused vector elements from SVG files before finalizing work. +

+3DViewers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +3DViewersHideRuler.svg + + + + + + + + + + + + + + + + + + +3DViewersStartStereoscopic.svg + + + + + + + + + + +3DViewersViewFromSuperior.svg + + + + + + + + + + +3DViewersVisibility.svg + + + + + + + + + + + + + + + + + + +3DViewersShowRuler.svg + + + + + + + + + + + + + + + + + + + + + +3DViewersYaw.svg + + + + + + + + + + +3DViewersViewFromRight.svg + + + + + + + + + + + + +3DViewersUsePerspective.svg + + + + + + + + + + +3DViewersViewFromPosterior.svg + + + + + + + + + + + + + + + + + + + + +3DViewersStopStereoscopic.svg +
+ + + + + + + + + + + + F/S + + + + + + +3DViewersShowFPS.svg + + + + + + + + + + + + + + + + + + + + + +3DViewersSpin.svg + + + + + + + + + + + + + + + + + + + + + +3DViewersPitch.svg + + + + + + + + + + + + + + + + + + + +3DViewersRoll.svg + + + + + + + + + + + + + + + + + + + +3DViewersUnlinkAll3DViewers.svg + + + + + + + + + + +3DViewersViewFromInferior.svg + + + + + + + + + + + + + + + + + +3DViewersLinkAll3DViewers.svg + + + + + + + + + + + + + + +3DViewersOrientationMarker.svg + + + + + + + + + + + +3DViewersZoomIn.svg + + + + + + + + + + + + + + + + + + + +3DViewersHideFPS.svg +
+ + + + + + + + + +3DViewersZoomOut.svg + + + + + + + + + + + + + + + + + + + + + + + +3DViewersViewRecentered.svg + + + + + + + + + + +3DViewersViewFromLeft.svg + + + + + + + + + + + + + + + + + + + + +3DViewersRock.svg + + + + + + + + + + + + +3DViewersUseOrthographic.svg + + + + + + + + + + + + +3DViewersShadows.svg + + + + + + + + + + +3DViewersViewFromAnterior.svg + + + + + + + + + + + + + + + + + + + + + +3DViewersCentered.svg + + + + + + + + + +3DViewersMoreOptions.svg +
+

+

+ApplicationBasics/Arrows + + + + + + + + + + + +
+ + + + + + + +SlicerRightArrow.svg + + + + + + + + + +SlicerDoubleArrowLeft.svg + + + + + + + + + +SlicerDoubleArrowRight.svg + + + + + + + + +SlicerDownArrow.svg + + + + + + + + +SlicerUpArrow.svg + + + + + + + + + +SlicerDoubleArrowUp.svg + + + + + + + + +SlicerLeftArrow.svg + + + + + + + + + +SlicerDoubleArrowDown.svg +
+

+

+ApplicationBasics/Communications + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +SlicerInfoLeft.svg + + + + + + + + +SlicerConfigure.svg + + + + + + + + + +SlicerPending1-of-3.svg + + + + + + + + + + + +SlicerStarting.svg + + + + + + + + + +SlicerInfo.svg + + + + + + + + + + +SlicerHourglassBottomFull.svg + + + + + + + + + +SlicerWarning.svg + + + + + + + + + +SlicerComplete.svg + + + + + + + + + +SlicerError.svg + + + + + + + + + + +SlicerHome.svg +
+ + + + + + + + + +SlicerInfoRight.svg + + + + + + + + + +SlicerHelp.svg + + + + + + + + + + + + +SlicerComment.svg + + + + + + + + + + + + +SlicerHourglassWorking.svg + + + + + + + + + + + +SlicerPending3-of-3.svg + + + + + + + + + + +SlicerPending2-of-3.svg + + + + + + + + +SlicerExtensions.svg + + + + + + + + + + +SlicerEmptyHourglass.svg + + + + + + + + + + +SlicerHourglassTopFull.svg +
+

+

+ApplicationBasics/Configure + + + + + +
+ + + + + + + +SlicerConfigure.svg + + + + + + + + +SlicerExtensions.svg +
+

+

+ApplicationBasics/Layers + + + + + + + + + +
+ + + + + + + + + + +LowerLayer.svg + + + + + + + + + + +ShowLayersUI.svg + + + + + + + + + + + +RaiseLayer.svg + + + + + + + + + + + + +HideLayersUI.svg + + + + + + + + + + + + +DeleteLayer.svg + + + + + + + + + + + + +AddLayer.svg +
+

+

+ApplicationBasics/LogicalAndMathOps + + + + + + + +
+ + + + + + + +SlicerUnion.svg + + + + + + + + + +SlicerDifference.svg + + + + + + + + + + + + +SlicerIntersection.svg + + + + + + + + + + + + +SlicerExclusion.svg +
+

+

+ApplicationBasics/Plotting + + + + + + +
+ + + + + + + + + + + + + + +SlicerSegmentStatistics.svg + + + + + + + + + + + + + + + + + + + + +SlicerPlotSeries.svg + + + + + + + + + + + + + + + + + + +SlicerInteractivePlotting.svg +
+

+

+ApplicationBasics/SequencePlayer + + + + + + + + + + + + +
+ + + + + + + +SlicerRecordOrRecording.svg + + + + + + + + + +SlicerFirstOrSkipBackward.svg + + + + + + + + + + + +SlicerPause.svg + + + + + + + + + +SlicerLastOrSkipForward.svg + + + + + + + + +SlicerForward.svg + + + + + + + + + + + +SlicerLoop.svg + + + + + + + + +SlicerStoppedOrNotRecording.svg + + + + + + + + + +SlicerPlay.svg + + + + + + + + +SlicerBack.svg +
+

+

+ApplicationBasics/SubjectAndStudyRepresentation + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +MouseHierarchy.svg + + + + + + + + + + + + + + + + + + +BasicStudyHierarchy.svg + + + + + + + + + + + + + + + + + + + +CatHierarchy.svg + + + + + + + + + + + + + + + + + + + +PigHierarchy.svg + + + + + + + + + + +MouseSubject.svg + + + + + + + + + + + + + + +CatSubject.svg + + + + + + + + + + + + +MonkeySubject.svg + + + + + + + + + + + + +PigSubject.svg + + + + + + + + + + + + + + + + + + + + +MonkeyHierarchy.svg + + + + + + + + + + + + + + +TeddySubject.svg +
+ + + + + + + + + + + +HumanSubject.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerDataHierarchy.svg + + + + + + + + + + + + +BasicStudy.svg + + + + + + + + + + + + + + + + +RectangleHierarchy.svg + + + + + + + + + + + + + + + + + +HumanHierarchy.svg + + + + + + + + + + + + + + + + + +RatHierarchy.svg + + + + + + + + +RectangleSubject.svg + + + + + + + + + + +RatSubject.svg + + + + + + + + + + + + + + + + + + + + +TeddyHierarchy.svg +
+

+

+ApplicationBasics/TableOps + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +SlicerTableUnLockRowHeader.svg + + + + + + + + + + + + + + + + +SlicerTableMoveColumnRight.svg + + + + + + + + + + + + + + +SlicerTable.svg + + + + + + + + + + + + + + + + + +SlicerTableUnLockColumnHeader.svg + + + + + + + + + + + + + + + + +SlicerTableMoveColumnLeft.svg + + + + + + + + + + + + + + + + +SlicerTableMoveRowUp.svg + + + + + + + + + + + + + + + + + +SlicerTableDeleteColumn.svg + + + + + + + + + + + + + + + + +SlicerTableMoveRowDown.svg + + + + + + + + + + + + + + + + + +SlicerTableAddRow.svg + + + + + + + + + + + + + + + + + +SlicerTableLockColumnHeader.svg +
+ + + + + + + + + + + + + + + + +SlicerTableAddColumn.svg + + + + + + + + + + + + + + + + + + + +SlicerTableLockRowHeader.svg + + + + + + + + + + + + + + + + + +SlicerTableDeleteRow.svg +
+

+

+ApplicationBasics/Toggles + + + + + + + + + + + +
+ + + + + + + + + +SlicerToggleOffLeft.svg + + + + + + + + + + + +SlicerToggleCheckBox.svg + + + + + + + + + + +SlicerToggleOnRight.svg + + + + + + + + + + + +SlicerToggleRadioButton.svg + + + + + + + + + + + + + + + + + + + + +SlicerToggleCheckBoxAll.svg + + + + + + + + + + + + + + +SlicerToggleVisibility.svg + + + + + + + + + + + + + + +SlicerToggleLock.svg + + + + + + + + + + + + +SlicerToggleLink.svg +
+

+

+ApplicationBasics/Transforms + + + + + + + + +
+ + + + + + + + + + + + + +SlicerLinearTransform.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerRemoveTransform.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTransform.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerAddTransform.svg + + + + + + + + + + + + + + +SlicerDeformableTransform.svg +
+

+

+ApplicationBasics/UIActions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +SlicerLock.svg + + + + + + + + +SlicerUnCheckBox.svg + + + + + + + + + + + + + + + +SlicerLink.svg + + + + + + + + + + + +SlicerCheckBoxAll.svg + + + + + + + + + + + + + + + + +SlicerTrash.svg + + + + + + + + + +SlicerMoreOptionsHoriz.svg + + + + + + + + + +SlicerSearch.svg + + + + + + + + + +SlicerBlock.svg + + + + + + + + +SlicerMinus.svg + + + + + + + + + + +SlicerCancel.svg +
+ + + + + + + + + + + + + + + + + + +SlicerUnPin.svg + + + + + + + + + + + +SlicerReset.svg + + + + + + + + + +SlicerDone.svg + + + + + + + + + +SlicerRemove.svg + + + + + + + + + + + + + +SlicerRestore.svg + + + + + + + + + +SlicerUndo.svg + + + + + + + + + +SlicerMoreOptionsVert.svg + + + + + + + + + +SlicerSelectColor.svg + + + + + + + + + + +SlicerUnCheckBoxAll.svg + + + + + + + + + +SlicerAdd.svg +
+ + + + + + + + + +SlicerUnlock.svg + + + + + + + + + + + + + + + + + + + +SlicerPin.svg + + + + + + + + + + + + + + + + + + + + +SlicerUnlink.svg + + + + + + + + + +SlicerCheckBox.svg + + + + + + + + + + + + + + + + + +SlicerUnHotLink.svg + + + + + + + + + + + + + + + +SlicerHotLink.svg + + + + + + + + + +SlicerRedo.svg +
+

+

+ApplicationBasics/Visibility + + + + + + +
+ + + + + + + + + + + + +SlicerInvisible.svg + + + + + + + + + + +SlicerVisible.svg + + + + + + + + + + + + + + + +SlicerSubsetVisibility.svg +
+

+

+Capture + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +SlicerSceneViewCaptureV2.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerSceneViewRestoreOrDeleteV2.svg + + + + + + + + + + + + + + + + + + + + +SlicerSceneViewCapture.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerSceneViewRestoreOrDelete.svg + + + + + 24dpSlicerIconTemplate + + Layer 1 + + + + + + + + + + + + + + + + 24dpSlicerIconTemplate + + + + +SlicerCapture.svg +
+

+

+DataIO + + + + + + + + + + + + +
+ + + + + + + + + + +SlicerDownload.svg + + + + + + + + + + + + + + + + +DICOMIO.svg + + + + + + + + + +SlicerLoadData.svg + + + + + + + +SlicerLoadFromCloud.svg + + + + + + + + + + + + +SlicerDownloadBundle.svg + + + + + + + +SlicerSaveToCloud.svg + + + + + + + + + +SlicerSaveData.svg + + + + + + + + + + + + + +SlicerUpload.svg + + + + + + + + + + + + +SlicerDownloadExtension.svg +
+

+

+ModuleNavigation + + + + + + + + +
+ + + + + + + + + + + + + + + +ModuleHistory.svg + + + + + + + + + + + + + + + +ModulePrevious.svg + + + + + + + + + + + + + + + +ModuleNext.svg + + + + + + + + + + +ModuleSearch.svg + + + + + + + + + + + + + + + + + +SelectModule.svg +
+

+

+Modules/CoreToolbarModules + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +MarkupsModule.svg + + + + + + + + + + + + + + + + + + +TransformsModule.svg + + + + + + + + + + + + + + + + +SegmentEditorModule.svg + + + + + + + + + + + + + + + + + + + +WelcomeModule.svg + + + + + + + + + + + + + + + + +Annotations.svg + + + + + + + + + + + + + + + + + + + + + + + + +SegmentationsModule.svg + + + + + + + + + + + + + + + + + +DefaultModule.svg + + + + + + + + + + + + + + + + + + + +DataModule.svg + + + + + + + + + + + + + + + + + +ModelsModule.svg + + + + + 24dpSlicerIconTemplate + + + + 24dpSlicerIconTemplate + + + + + + + + + + + + + + + + + + + + + + + + +VolumesModule.svg +
+

+

+Modules/MarkupsModule + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +MarkupsAddLine.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddPointList.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddROI.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddClosedCurve.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddAngle.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddPlane.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddOpenCurve.svg + + + + + + + + + + + + + + + + + + + +MarkupsAddPoint.svg +
+

+

+Modules/SegmentEditorModule + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +Margins.svg + + + + + + + + + + + + + + + +Show3D.svg + + + + + + + + + + + + +FastMarching.svg + + + + + + + + + + + + + + + + +Threshold.svg + + + + + + + + + + + + +Draw.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SplitVolume.svg + + + + + + + + + + + + + +Paint.svg + + + + + + + + + + + + + + + + +SlicerTrash.svg + + + + + + + + + + + + + + + + + + +FillBetweenSlices.svg + + + + + + + + + + + + + +FloodFill.svg +
+ + + + + + + + + + + + + + + +Engrave.svg + + + + + + + + + + + + + +SegmentTubes.svg + + + + + + + + + + + + + + + + + + + + +LogicalOperations.svg + + + + + + + + + + + + + + + + + + + + + + +LocalThreshold.svg + + + + + + + + + + + + + + + + + + +Hide3D.svg + + + + + + + + + + +Hollow.svg + + + + + + + + + +SlicerRemove.svg + + + + + + + + + + + + + + + + + + +Erase.svg + + + + + + + + + + + + + + + + +GrowFromSeed.svg + + + + + + + + + + + + + + + + + + +SurfaceCut.svg +
+ + + + + + + + + + + + + + + +Watershed.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +GoToSegmentationsModule.svg + + + + + + + + + +SlicerUndo.svg + + + + + + + + + +Select.svg + + + + + + + + + +SlicerAdd.svg + + + + + + + + + + + + + + + + + + + + + + + +MaskVolume.svg + + + + + + + + + + +Smooth.svg + + + + + + + + + + + + + +LevelTrace.svg + + + + + + + + + + + + + + +Islands.svg + + + + + + + + + +SlicerRedo.svg +
+ + + + + + + + + + +ContentCut.svg +
+

+

+Modules/SlicerIHEviewer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +SlicerIHE-Pan.svg + + + + + + + + + + + +SlicerIHE-CineStepForward.svg + + + + + + + + + + +SlicerIHE-SelectViewport.svg + + + + + + + + + + + + + +SlicerIHE-LayoutWithinViewport.svg + + + + + + + + + + +SlicerIHE-CinePlay.svg + + + + + + + + ? + + + +SlicerIHE-Help.svg + + + + + + + + + +SlicerIHE-FlipHorizontal.svg + + + + + + + + + + + + + +SlicerIHE-Crosshair.svg + + + + + + + + + + +SlicerIHE-CineGoToStart.svg + + + + + + + + + +SlicerIHE-PreviousFrame.svg +
+ + + + + + + + + + + + + + + + + +SlicerIHE-UnLink.svg + + + + + + + + + + + + + + + +SlicerIHE-Rotate90Clockwise.svg + + + + + + + + + +SlicerIHE-NextFrame.svg + + + + + + + + + + + + +SlicerIHE-ZoomIn.svg + + + + + + + + + +SlicerIHE-PrevioustStudy.svg + + + + + + + + + + + + + + +SlicerIHE-LocalizerLines.svg + + + + + + + + + + + + + +SlicerIHE-SelectPatient.svg + + + + + + + + + + + +SlicerIHE-Scroll.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Slicer-IHEWindowLevelColor.svg + + + + + + + + + + + + + +SlicerIHE-Print.svg +
+ + + + + + + + + + +SlicerIHE-CineStepBackward.svg + + + + + + + + + + + + + + +SlicerIHE-DisplayReset.svg + + + + + + + + + + + +SlicerIHE-AdvancedMoreOptions.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerIHE-LayoutMultipleViewports.svg + + + + + + + + + + +SlicerIHE-CineGoToEnd.svg + + + + + + + + + + + + + + + + + + + + +SlicerIHE-RulerMeasure.svg + + + + + + + + + + + + + + + + +SlicerIHE-Link.svg + + + + + + + + + + +SlicerIHE-PreviousFrameSet.svg + + + + + + + + + + + + + + + + +SlicerIHE-ShowReport.svg + + + + + + + + + + + + + + + + + + + +SlicerIHE-CineTools.svg +
+ + + + + + + + + + + + + + + + + + + + +SlicerIHE-Annotation.svg + + + + + + + + + + +SlicerIHE-NextFrameSet.svg + + + + + + + + + + + + +SlicerIHE-InvertGreyscale.svg + + + + + + + + + +SlicerIHE-NextStudy.svg + + + + + + + + + +SlicerIHE-CineStop.svg + + + + + + + + + + + +SlicerIHE-ZoomOut.svg + + + + + + + + + + + + + + +SlicerIHE-AngleMeasure.svg +
+

+

+MouseModes + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +SlicerMarkupsMenuToggle.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerWindowLevel.svg + + + + + + + + + + + + + +SlicerMouseModeTransformAdjust.svg +
+

+

+SliceViewers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +BlendMode.svg + + + + + + + + + + + + + + + + + + +RulerDisplayOptions.svg + + + + + + + + + + + + + + + + + + + + + +RotateToVolumePlane.svg + + + + + + + + + + + + + + +OrientationMarker.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +LabelMapDisplayFill.svg + + + + + + + + + + + + + + + + + + + + + + +SlicePlanes-Sagittal.svg + + + + + + + + + + + + + + + + +LightboxView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +LabelMapDisplayOutline.svg + + + + + + + + + + + + + + + + + + + + + + + +SegmentationDisplayFillAndOutline.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +ResetFieldOfView.svg +
+ + + + + + + + + + + + + + + + + + + + + +SegmentationLayer.svg + + + + + + + + + + + +ToggleOFF-MaximizeView.svg + + + + + + + + + + + +ToggleOFF-ReformatWidget.svg + + + + + + + + + + + + + + + + + + + + +ForegroundLayer.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +ThickSlabReconstruction.svg + + + + + + + + + + + + + + + + + + + + +BackgroundLayer.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + +SliceSpacing.svg + + + + + + + + + + + + + + + + + + + + + +SegmentationDisplayOutline.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +ToggleON-Interpolation.svg + + + + + + + + + + + + + + + + + + + + + + + + + +LabelMapLayer.svg +
+ + + + + + + + + + + + + + + + + +SlicePlanes-Coronal.svg + + + + + + + + + + + + + +EachSegmentVisibility.svg + + + + + + + + + + + + + + +ToggleON-ReformatWidget.svg + + + + + + + + + +ToggleON-MaximizeView.svg + + + + + + + + + + + + + + + + + + + + + + + +SegmentationDisplayFill.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +ToggleOFF-Interpolation.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +LabelMapDisplayFillAndOutline.svg + + + + + + + + + + + + + + + + + + + + + + +SlicePlanes-Axial.svg +
+

+

+SlicerCommonSymbols + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +SlicerInvisible.svg + + + + + + + + + + + +SlicerUpPointingSelect.svg + + + + + + + +SlicerCloud.svg + + + + + + + + + + + +SlicerRightPointingSelect.svg + + + + + + + + + + + + + + + + + + +SlicerSimple3DView.svg + + + + + + + + + +SlicerSearch.svg + + + + + + + + + + +SlicerVisible.svg + + + + + + + + + +SlicerGrid.svg + + + + + + + + + + + + + + + + + + + +Slicer3DView.svg + + + + + + + + + + + + +SlicerSliceView.svg +
+ + + + + + + + +SlicerSlashForNoNotOff.svg + + + + + + + +SlicerFolder.svg + + + + + + + + + +SlicerLeftPointingSelect.svg +
+

+

+SlicerDataSymbols + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +SlicerMarkupPoint.svg + + + + + + + + + + + + + + +SlicerTable.svg + + + + + + + + + + + + + + + + + + + + + +SlicerImage.svg + + + + + + + + + +SlicerAIsparkles.svg + + + + + + + + + + + + + + +SlicerDataBundle.svg + + + + + + + + + + + + +SlicerDatabase.svg + + + + + 24dpSlicerIconTemplate + + + + 24dpSlicerIconTemplate + + + + + + + + + + + + + + + +SlicerSourceVolume.svg + + + + + + + + + + + + + + + + + + +SlicerModel.svg + + + + + + + + + + + + + + + + + + + + + +SlicerLabelMapImage.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerDataHierarchy.svg +
+ + + + + + + + + + + + + + + + + + + + + + + + +SlicerTransform.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerSegmentation.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerScalarOverlayImage.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerScalarVolume.svg +
+

+

+SpatialProbes + + + + + +
+ + + + + + + + + + + + + +SlicerSlicePlanesOptions.svg + + + + + + + + + + + + +SlicerCrosshair.svg +
+

+

+ViewerConfiguration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +SlicerRedSliceOnlyView.svg + + + + + + + + + + + + +SlicerYellowSliceOnlyView.svg + + + + + + + + + + + + + + + + + + + + +SlicerGreenSliceLightbox.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerDual3DView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerCompareGridView.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpPlotView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerThreeOverThreePlotView.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourByTwoSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTabbed3DView.svg + + + + + + + + + + + + + + + + + + + + +SlicerRedSliceLightbox.svg +
+ + + + + + + + + + + + + + + + + + + +SlicerYellowSliceLightbox.svg + + + + + + + + + + + + +SlicerGreenSliceOnlyView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpQuantitativeView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourByThreeSliceView.svg + + + + + + + + + + + + + + + + +SlicerCompareWidescreenView.svg + + + + + + + + + + + + + + + + + + + + +SlicerConventionalWidescreenView.svg + + + + + + + + + + + + + + + + + +SlicerPlotOnlyView.svg + + + + + + + + + + + + + + + +SlicerTwoOverTwoSliceView.svg + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpTableView.svg + + + + + + + + + + + + + + + + + + + + +SlicerConventionalView.svg +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerConventionalPlotView.svg + + + + + + + + + + + + + + + + + + + +SlicerThreeOverThreeSliceView.svg + + + + + + + + + + + + + + + + + + +Slicer3DOnlyView.svg + + + + + + + + + + + + + + + + + + +SlicerSideBySideSliceView.svg + + + + + + + + + + + + + + + + + + + + +SlicerTabbedSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTriple3DView.svg + + + + + + + + + + + + + + + + + + + +SlicerFourUpView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerThreeByThreeSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourOverFourView.svg + + + + + + + + + + + + + + + + +SlicerCompareView.svg +
+ + + + + + + + + + + + + + + + + + + + + + +Slicer3DTableView.svg +
+

+

+ViewerConfigurationV2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +SlicerRedSliceOnlyView.svg + + + + + + + + + + + + +SlicerYellowSliceOnlyView.svg + + + + + + + + + + + + + + + + + + + + +SlicerGreenSliceLightbox.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerDual3DView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerCompareGridView.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpPlotView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerThreeOverThreePlotView.svg + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourByTwoSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTabbed3DView.svg + + + + + + + + + + + + + + + + + + + + +SlicerRedSliceLightbox.svg +
+ + + + + + + + + + + + + + + + + + + +SlicerYellowSliceLightbox.svg + + + + + + + + + + + + +SlicerGreenSliceOnlyView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpQuantitativeView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourByThreeSliceView.svg + + + + + + + + + + + + + + + + +SlicerCompareWidescreenView.svg + + + + + + + + + + + + + + + + + + + + + +SlicerConventionalWidescreenView.svg + + + + + + + + + + + + + + + + + +SlicerPlotOnlyView.svg + + + + + + + + + + + + + + + +SlicerTwoOverTwoSliceView.svg + + + + + + + + + + + + + + + + + + + + + + +SlicerFourUpTableView.svg + + + + + + + + + + + + + + + + + + + + + +SlicerConventionalView.svg +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerConventionalPlotView.svg + + + + + + + + + + + + + + + + + + + +SlicerThreeOverThreeSliceView.svg + + + + + + + + + + + + + + + + + + + +Slicer3DOnlyView.svg + + + + + + + + + + + + + + + + + + +SlicerSideBySideSliceView.svg + + + + + + + + + + + + + + + + + + + + +SlicerTabbedSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerTriple3DView.svg + + + + + + + + + + + + + + + + + + + + +SlicerFourUpView.svg + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerThreeByThreeSliceView.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerFourOverFourView.svg + + + + + + + + + + + + + + + + +SlicerCompareView.svg +
+ + + + + + + + + + + + + + + + + + + + + + + +Slicer3DTableView.svg +
+

+

+VisualizationAndDisplayOptions + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerHideModelScalarOverlay.svg + + + + + + + + + +SlicerShowDeformationGrid.svg + + + + + + + + + + + + + + + + + + + + + + + + +SlicerShowVolumeScalarOverlay.svg + + + + + + + + + +SlicerShowGrid.svg + + + + + + + + + +SlicerHideDeformationGrid.svg + + + + + + + + + +SlicerHideSphereGlyphs.svg + + + + + + + + + +SlicerHideIsoContours.svg + + + + + + + + + +SlicerShowIsoContours.svg + + + + + + + + + +SlicerShowSphereGlyphs.svg + + + + + + + + + +SlicerShowConeGlyphs.svg +
+ + + + + + + + +SlicerHideArrowGlyphs.svg + + + + + + + + + +SlicerShowArrowGlyphs.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerHideVolumeScalarOverlay.svg + + + + + + + + + +SlicerShowIsoSurfaces.svg + + + + + + + + + +SlicerHideIsoSurfaces.svg + + + + + + + + + +SlicerHideConeGlyphs.svg + + + + + + + + + +SlicerSelectColor.svg + + + + + + + + + +SlicerHideGrid.svg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +SlicerShowModelScalarOverlay.svg +
+

+ + diff --git a/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/README.md b/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/README.md new file mode 100644 index 000000000..17b03712a --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/README.md @@ -0,0 +1,81 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Finalize Slicer Icon set update infrastructure +category: Infrastructure + +key_investigators: + +- name: Sam Horvath + affiliation: Kitware + country: Inc + +- name: Wendy Plesniak + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +We will be continuing the work to integrate the new icon set into 3D Slicer. + + + +## Objective + + + + +1. Finalize icon switching logic initially developed in last project week + + + + +## Approach and Plan + + + + +1. Review icon switching approach from last PW +2. Finish remaining work items and create a PR on Slicer Core +3. Create dependent PR with the actual updated icons +4. Fix broken images in PW 41 page + + + + +## Progress and Next Steps + + + + +1. Reviewed final icon set from Wendy +2. CLeaned up PR - should be ready to go now + + + +# Illustrations + + + +[Light](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/LightThemeIconsIndex.html) +[Dark](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/DarkThemeIconsIndex.html) + +_No response_ + + + +# Background and References + + + + +- [PW 41 Project Page](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/UpdatedIconsAndThemeSwitching/) diff --git a/PW42_2025_GranCanaria/Projects/ITKForAndroidProj/README.md b/PW42_2025_GranCanaria/Projects/ITKForAndroidProj/README.md new file mode 100644 index 000000000..855c52ab8 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ITKForAndroidProj/README.md @@ -0,0 +1,169 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Compile ITK for Android ARM64 +category: Infrastructure + +key_investigators: +- name: Attila Nagy + affiliation: University of Szeged + country: Hungary + +- name: Attila Tanács + affiliation: University of Szeged + country: Hungary + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA +--- + +# Project Description + + +To investigate if and how ITK (later VTK, and sometime, maybe, in the distant future, Slicer) can be compiled an run on a mobile phone. Especially interesting is Samung's DeX mode, with a desktop like experience. + +## Objective + + + +1. Objective A. Set up a build environment +1. Objective B. See if and how ITK can be built +1. Objective C. Hopefully test it too + +## Approach and Plan + + + +1. Review Android tools, cross-compilation options +1. Set up the environment + +## Progress and Next Steps + + + +1. Environment setup done +1. Compilation done, at 100%! :) + +After setting up the software and the environment variables, the build was done on a freshly installed Debian 12. +Only cmake was installed from a repo- + +Several problems were encountered: + +- ITK’s CMake configuration tries to determine whether libc++ (LLVM C++ standard library) is available when cross-compiling for Android, but the test fails due to the lack of a runtime execution environment. Since Android cross-compilation cannot execute tests on the build machine, we need to manually provide the expected results. +It is fixed by addig +```cmake + -D_libcxx_run_result=0 \ + -D_libcxx_run_result__TRYRUN_OUTPUT="" \ +``` +to the configure line. + +- ld complained about ZLIB, likely because the build system tried to use a different zlib, so we have to force ITK to use the Android NDK zlib. +Example (several similar others popped up too): +``` +ld.lld: error: version script assignment of 'ZLIB_1.2.0' to symbol 'compressBound' failed: symbol not defined +``` +It is fixed by: +```cmake + -DITK_USE_SYSTEM_ZLIB=ON +``` + +- Android does not provide iconv, so it had to be built, as GDCM needs it. +After some play, it was easy, and it could be compilaed, and later linked into ITK by using the following ENV vars and configure line: +``` +export ANDROID_NDK_HOME=/home/attila/Android/Sdk/ndk/28.0.12916984 +export TOOLCHAIN=$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64 +export TARGET=aarch64-linux-android +export API=21 # Adjust API level as needed +export AR=$TOOLCHAIN/bin/llvm-ar +export AS=$TOOLCHAIN/bin/llvm-as +export CC=$TOOLCHAIN/bin/$TARGET$API-clang +export CXX=$TOOLCHAIN/bin/$TARGET$API-clang++ +export LD=$TOOLCHAIN/bin/ld +export RANLIB=$TOOLCHAIN/bin/llvm-ranlib +export STRIP=$TOOLCHAIN/bin/llvm-strip +``` +``` +./configure --host=$TARGET --prefix=$PWD/android-build --disable-static --enable-shared +``` +The built iconv of course had to be set in the cmake configure stage: +```cmake + -DGDCM_USE_SYSTEM_ICONV=ON \ + -DCMAKE_C_FLAGS:STRING="-I/home/attila/libiconv-1.18/android-build/include -Dfar=far_nifti" \ + -DCMAKE_CXX_FLAGS:STRING="-Dfar=far_nifti" \ + -DCMAKE_EXE_LINKER_FLAGS="-L/home/attila/libiconv-1.18/android-build/lib /home/attila/libiconv-1.18/android-build/lib/libiconv.so" \ + -DCMAKE_SHARED_LINKER_FLAGS="-L/home/attila/libiconv-1.18/android-build/lib /home/attila/libiconv-1.18/android-build/lib/libiconv.so" + -DGDCM_ICONV_INCLUDE_DIR=/home/attila/libiconv-1.18/android-build/include +``` + +There was another problem: +On Android, far is sometimes defined as a macro (especially in or ) for compatibility with legacy 16-bit platforms. +This means that the compiler throws errors in: +``` +/home/attila/ITK/Modules/ThirdParty/NIFTI/src/nifti/niftilib/nifti1_io.c:5022:20: error: expected identifier or '(' + 5022 | float *far = (float *)dataptr ; size_t jj,nj ; + | ^ +/home/attila/ITK/Modules/ThirdParty/NIFTI/src/nifti/niftilib/nifti1_io.c:5025:34: error: expected expression + 5025 | if( !IS_GOOD_FLOAT(far[jj]) ){ + | ^ +/home/attila/ITK/Modules/ThirdParty/NIFTI/src/nifti/niftilib/nifti1_io.c:5026:18: error: expected expression + 5026 | far[jj] = 0 ; + | ^ +/home/attila/ITK/Modules/ThirdParty/NIFTI/src/nifti/niftilib/nifti1_io.c:5034:21: error: expected identifier or '(' + 5034 | double *far = (double *)dataptr ; size_t jj,nj ; + | ^ +/home/attila/ITK/Modules/ThirdParty/NIFTI/src/nifti/niftilib/nifti1_io.c:5037:34: error: expected expression + 5037 | if( !IS_GOOD_FLOAT(far[jj]) ){ + | ^ +/home/attila/ITK/Modules/ThirdParty/NIFTI/src/nifti/niftilib/nifti1_io.c:5038:18: error: expected expression + 5038 | far[jj] = 0 ; + | ^ +``` + +This could be overcome by forcing CMake to override the far macro by adding this flag to the configure line: +``` + -DCMAKE_C_FLAGS="-Dfar=far_nifti" \ +``` + +So the final configure line was this: +```cmake +cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_HOME/ndk/28.0.12916984/build/cmake/android.toolchain.cmake \ + -S /home/attila/ITK \ + -B /home/attila/ITK/itk-build \ + -DANDROID_ABI=arm64-v8a \ + -DANDROID_PLATFORM=android-21 \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=ON \ + -DITK_BUILD_DEFAULT_MODULES=ON \ + -DITK_WRAP_PYTHON=OFF \ + -DCMAKE_CROSSCOMPILING_EMULATOR="" \ + -DHAVE_CLOCK_GETTIME_RUN=1 \ + -DITK_USE_SYSTEM_ZLIB=ON \ + -D_libcxx_run_result=0 \ + -D_libcxx_run_result__TRYRUN_OUTPUT="" \ + -DGDCM_USE_SYSTEM_ICONV=ON \ + -DCMAKE_C_FLAGS:STRING="-I/home/attila/libiconv-1.18/android-build/include -Dfar=far_nifti" \ + -DCMAKE_CXX_FLAGS:STRING="-Dfar=far_nifti" \ + -DCMAKE_EXE_LINKER_FLAGS="-L/home/attila/libiconv-1.18/android-build/lib /home/attila/libiconv-1.18/android-build/lib/libiconv.so" \ + -DCMAKE_SHARED_LINKER_FLAGS="-L/home/attila/libiconv-1.18/android-build/lib /home/attila/libiconv-1.18/android-build/lib/libiconv.so" + -DGDCM_ICONV_INCLUDE_DIR=/home/attila/libiconv-1.18/android-build/include +``` + +A few shortcomings: +- Python bindings were not built +- No deployment has yet been made to test it on device + +# Illustrations + + +[Screenshot of a 100% build](VirtualBox_Slicer_Android_30_01_2025_23_40_43.png) + + +# Background and References + +This project relies on the ["Builds of Slicer for ARM-based systems Mac and Linux"] project (https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/BuildsOfSlicerForArmBasedSystemsMacAndLinux/) diff --git a/PW42_2025_GranCanaria/Projects/ITKForAndroidProj/VirtualBox_Slicer_Android_30_01_2025_23_40_43.png b/PW42_2025_GranCanaria/Projects/ITKForAndroidProj/VirtualBox_Slicer_Android_30_01_2025_23_40_43.png new file mode 100644 index 000000000..839f094f6 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/ITKForAndroidProj/VirtualBox_Slicer_Android_30_01_2025_23_40_43.png differ diff --git a/PW42_2025_GranCanaria/Projects/ImprovedAutomatedSegmentationOfDentalCbctImagesWithAuto3Dseg/README.md b/PW42_2025_GranCanaria/Projects/ImprovedAutomatedSegmentationOfDentalCbctImagesWithAuto3Dseg/README.md new file mode 100644 index 000000000..ad08a0c62 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ImprovedAutomatedSegmentationOfDentalCbctImagesWithAuto3Dseg/README.md @@ -0,0 +1,98 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Improved automated segmentation of dental CBCT images with Auto3DSeg +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Csaba Pinter + affiliation: EBATINCA + country: Spain + +- name: Daniel Palkovics + affiliation: Semmelweis University + country: Hungary + +- name: Andres Diaz-Pinto + affiliation: NVIDIA + country: UK + +--- + +# Project Description + + + + +Majority of currently available deep learning (DL) cone-beam computed tomography (CBCT) segmentation models were trained on data of healthy, completely dentated patients. These models might not produce accurate segmentations of datasets with dentoalveolar hard tissue defects. Our group has perviously developed a Deep Learning-based model for the automatic segmentation of dental cone-beam computed tomography (CBCT) scans which was trained on CBCT images with dentoalveolar pathological processes [1][2]. The current model uses a two-staged SegResNet-based architecture from MONAILabel. Despite of the relatively low sample training data it produced sufficient accuracy (93% compared to semi-automatic segmentation). However, the model's robustness has to be improved. Using the MONAI Auto3DSeg framework and an enlarged training database the project aims to develop an improved model for the automatic segmentation of dental CBCT scans present with dentoalveolar pathological processes. + + + +## Objective + + + + +We have previously trained a two-stage SegResNet-based model for the automatic segmentation of dental CBCT scans. The project was initiated at the [36th project week](https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/AutomaticSegmentationofTeethandAlveolarBone/). +The goal is to re-train the model including the new training data and the latest DL tools. + + + + +## Approach and Plan + + + + +1. Established and enlarged training database with uniformly annotated CBCT data. +2. Decide for an adequate network framework and architecture (MONAI Auto3DSeg?) +3. Come up with an initial configuration of the chosen architecture (stages, options, pre- and post-processing) +4. Perform preliminary training on the available data + + + +## Progress and Next Steps + + + +* Discussion with Andres: + * The existing teeth models were trained using MONAILabel using a two-stage approach + * Stage 1 did a single label teeth segmentation mainly to determine the narrower ROI for the next stage + * Stage 2 cropped the image to the spine label of stage 1's ROI and ran the multi-stage inference + * Auto3DSeg does not inherently support a multi-stage approach, but it can be done by successively running two individual models + * Ebatinca developed a labelmap to labelmap model, which worked great and required little training data for vertebra posterior element removal, maybe we could leverage this approach +* The single stage approach for bone (and nerve) segmentation can be kept. Training using Auto3DSeg is very similar to training using MONAILabel +* Possible approaches for the teeth + 1. Try single stage segmentation; it is possible that Auto3DSeg has an superior performance even with a single stage (it automatically predicts the best hyperparameters) + 2. Reproduce the same two-stage approach as before. One model to get the ROI, and a subsequent step segments the individual teeth + 3. Different two-stage approach, where the first stage segments all teeth as a single label, and a second stage separates them to individual teeth (see vertebra body segmentation above) + 4. Implement all the above and vote + + +# Illustrations + + + + + +![Fig1 copy](https://github.com/user-attachments/assets/f681cb64-609c-47dc-8f33-08205141bd6a) +Two-stage SegResNet architecture + + +![Preop](https://github.com/user-attachments/assets/fb5ac395-64c0-4eb2-ad31-7a70a0a65769) +A: semi-automatic segmentation, B: deep learning segmentation + + + +# Background and References + + + + +1. Hegyi, A., Somodi, K., Pintér, C., Molnár, B., Windisch, P., García-Mato, D., Diaz-Pinto, A., & Palkovics, D. (2024). Mesterséges intelligencia alkalmazása fogászati cone-beam számítógépes tomográfiás felvételek automatikus szegmentációjára [Automatic segmentation of dental cone-beam computed tomography scans using a deep learning framework]. Orvosi hetilap, 165(32), 1242–1251. [https://doi.org/10.1556/650.2024.33098](https://doi.org/10.1556/650.2024.33098) +2. Palkovics, D., Hegyi, A., Molnar, B., Frater, M., Pinter, C., García-Mato, D., Diaz-Pinto, A., & Windisch, P. (2025). Assessment of hard tissue changes after horizontal guided bone regeneration with the aid of deep learning CBCT segmentation. Clinical oral investigations, 29(1), 59. [https://doi.org/10.1007/s00784-024-06136-w](https://doi.org/10.1007/s00784-024-06136-w) diff --git a/PW42_2025_GranCanaria/Projects/InfrastructureForCustomTerminologyAndColorTablesInSlicer/README.md b/PW42_2025_GranCanaria/Projects/InfrastructureForCustomTerminologyAndColorTablesInSlicer/README.md new file mode 100644 index 000000000..35fbb2a8b --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/InfrastructureForCustomTerminologyAndColorTablesInSlicer/README.md @@ -0,0 +1,98 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Infrastructure for custom terminology and color tables in Slicer +category: Infrastructure + +key_investigators: + +- name: Csaba Pinter + affiliation: Ebatinca + country: Spain + +- name: Andras Lasso + affiliation: Queens University + country: Canada + +- name: Murat Maga + affiliation: University of Washington + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Andriy Fedorov + affiliation: BWH + country: USA + +- name: David Clunie + affiliation: PixelMed + country: USA + +--- + +# Project Description + + + + +Terminologies module are meant to give a structure around using pre-determined set of anatomical and developmental terms for segmentation tasks. This is meant to avoid potential typos for people not familiar with anatomical terminology (e.g., humerus vs humorous, sagittal vs saggital) and give a consistent look and feel (e.g., assign consistent colors to a segmentation across multiple datasets). + +But the existing structure is too rigid, and often is missing terms. We need a flexible structure for people to create and use their own terms when the existing terminiologies are insufficient + + + +## Objective + + + + +1. To resolve this we have created an [issue page](https://github.com/Slicer/Slicer/issues/6975) +2. and working towards resolving the [identified issues](https://github.com/Slicer/Slicer/pull/8112) +3. Discuss how to [design and implement an infrastructure to share user-generated color tables (as well as custom terminologies, volume rendering presets, etc).](https://github.com/Slicer/Slicer/issues/6975#issuecomment-2581121209) + +## Approach and Plan + + + + +1. We are looking into using custom color tables and importing them as terminologies as a solution for flexibility and consistentcy. +2. Discuss the current design considering all known use cases in a breakout session + + + +## Progress and Next Steps + + + + +1. There is a [PR](https://github.com/Slicer/Slicer/pull/8112) that address some of the issues. +2. We had a breakout session on Tuesday with all the key participants. Takeaway + * The proposed design generally looks acceptable + * Need a validator to be able to ensure compatibility with main ontologies + * Need a proper documentation of the new features and their interactions with all data types and use cases + * Fix bugs, finalize PR, allow people to test in 5.9 +3. Progress with the PR: small fixes and outstanding issues + + +# Illustrations + + + +![image](https://github.com/user-attachments/assets/7deac812-bc55-4e88-8640-247f89429944) + + + +# Background and References + + + + +* [Pull request](https://github.com/Slicer/Slicer/pull/8112) +* Issue [#7593](https://github.com/Slicer/Slicer/issues/7593) +* Issue [#6975](https://github.com/Slicer/Slicer/issues/6975) diff --git a/PW42_2025_GranCanaria/Projects/IntegrationOfJLichBrainAtlasTo3DSlicerEpistimProject/README.md b/PW42_2025_GranCanaria/Projects/IntegrationOfJLichBrainAtlasTo3DSlicerEpistimProject/README.md new file mode 100644 index 000000000..9c3d17858 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/IntegrationOfJLichBrainAtlasTo3DSlicerEpistimProject/README.md @@ -0,0 +1,116 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Integration of Jülich Brain Atlas to 3D Slicer EpiSTIM project +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Sara Fernandez Vidal + affiliation: Paris Brain Institute + country: France + +- name: Valerio Frazzini + affiliation: Paris Brain Institute - APHP + country: France + +--- + +# Project Description + + + + +We intend to integrate the Julich Brain Atlas into our 3D Slicer Extension dedicaated to SEEG proceuder, called EpiSTIM. + +SEEG, or Stereo-Electroencephalography, is a medical procedure used to study epilepsy. It involves placing electrodes inside the brain to record its electrical activity. Neurologists use SEEG to find the exact areas of the brain causing seizures. It helps to decide the best treatment, like resection surgery or other therapies. + +EpiSTIM is a software project developed to assist neurosurgeons, neurologists and researchers in image processing tasks related to SEEG surgical procedures, from surgical stereotaxic planning to postoperative studies. + + +The Julich-Brain Atlas (Amunts et al. Science 2020) contains cytoarchitectonic maps of more than 200 areas of the human brain including cortical areas and subcortical nuclei. This atlas is widely used in the epileptology community both in SEEG planning and postoperatively to localize intracranial activity recorded during clinical cognitive tasks or other types of tasks. + +The Julich-Brain is the foundation of the [Multilevel Human Brain Atlas](https://atlases.ebrains.eu/viewer/#/a:juelich:iav:atlas:v1.0.0:1/t:minds:core:referencespace:v1.0.0:dafcffc5-4826-4bf1-8ff6-46b8a31ff8e2/p:minds:core:parcellationatlas:v1.0.0:94c1125b-b87e-45e4-901c-00daee7f2579-290/@:0.0.0.-W000.._eCwg.2-FUe3._-s_W.2_evlu..7LIx..0.0.0..1LSm), which integrates neuroanatomical features with complementary maps of the molecular architecture, function and connectivity across multiple scales and is openly available to the research community via the Human Brain Project’s research infrastructure EBRAINS. + + + + +## Objective + + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. + + + + +## Approach and Plan + + + + +1. Download the two versions of Julich Brain Atlas (on fsaverage and MNI templates) and visualize in 3D Slicer atlas. +2. Add Julich atlas terminology to the slicer +3. Add Julich data and terminology in EpiSTIM resources. +4. Map the Julich on Subject natif space for the planning module of the SEEG procedure +5. Add the Julich maps on the MNI visualisation of the postoperative reconstruction of the SEEG procedure + + +## Progress and Next Steps + + + + + + +I spent the most time exploring the latest Julich dataset published on EBRAIN and adapting the formats of certain annotations, colormaps and ontologies (thanks to Murat Maga) to 3D Slicer. In the first figure you can see one of the labelsmap in the MNI template. +And in the second one the Julich anotations in the Freesurfer Fsaverage template in the pial and inflate surfaces. + +#### Next Steps + +I think I will prepare a 3D Slicer module to easily visualize and navigate all components, especially probability maps. + +And integrate the Julich in our SEEG toolbox. + +# Illustrations + + + + +#### Postoperative SEEG reconstruction EpiSTIM module + + +Image + +#### Jülich Brain Atlas + +![Image](https://github.com/user-attachments/assets/aa403d4b-34ce-4207-863b-be3847cdb9b6) + +![Image](https://github.com/user-attachments/assets/a66cfb45-207a-455e-bdc5-23a22634a3a4) + + +#### Jülich Brain Atlas in 3D Slicer + +Capture d’écran 2025-01-31 à 10 17 35 + + +Capture d’écran 2025-01-31 à 10 49 16 + +Capture d’écran 2025-01-31 à 10 50 37 + + +# Background and References + + + + + +[Jülich Atlas 👍](https://www.fz-juelich.de/de/inm/inm-1/aktuelles/meldungen/complete-data-package-of-julich-brain-atlas-released) + + +_No response_ diff --git a/PW42_2025_GranCanaria/Projects/JSONbasedscenefileformat/README.md b/PW42_2025_GranCanaria/Projects/JSONbasedscenefileformat/README.md new file mode 100644 index 000000000..e7eaa7375 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/JSONbasedscenefileformat/README.md @@ -0,0 +1,134 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: JSON based scene file format +category: Infrastructure + +key_investigators: + +- name: Davide Punzo + affiliation: freelancer, DNA-HIVE + country: France + +- name: Andras Lasso + affiliation: Queens University + country: Canada + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA + +- name: Andres Diaz-Pinto + affiliation: NVIDIA + country: UK +--- + +# Project Description + + + +#### **Overview** + +The primary objective of this feature is to introduce support for **JSON format** to enable writing and reading the full MRML scene file and its nodes. This serves as preparatory work for: + +1. Supporting a more structured format for the MRML file with explicit datatypes. +2. Providing APIs in Slicer to facilitate collaborative features, such as retrieving and updating the status of individual nodes. + +With this setup, we would start a first step for future adoption and compatibility with standards like OpenUSD and real-time collaborative toolkits (e.g. Omniverse). For example, having MRML structured in JSON will make it much easier to convert nodes to OpenUSD. + +--- + +#### **Implemented Features** + +- **Node Status Printing**: + Nodes can now output their state in JSON format using the `WriteJSONToString` method. For example, in the Python console: + + ```python + crosshairNode.WriteJSONToString() + ``` + Output: + ```json + { + "Crosshair": { + "id": "vtkMRMLCrosshairNodedefault", + "name": "Crosshair", + "hideFromEditors": true, + "selectable": true, + "selected": false, + "singletonTag": "default", + "crosshairMode": "NoCrosshair", + "crosshairBehavior": "OffsetJumpSlice", + "crosshairThickness": "Fine", + "crosshairRAS": [0.0, 0.0, 0.0] + } + } + ``` + +- **Node State Reading/Updating**: + Nodes can now read or update their state (either fully or partially) from a JSON string. For example: + + ```python + a.ReadJSONFromString('{"Crosshair":{"crosshairRAS":[100.0,100.0,100.0]}}') + ``` + +- **Scene-Level JSON Format Support**: + The format for the entire MRML scene is controlled by a macro: + + ```cpp + vtkMRMLScene::SetUseJSONFormat(true); + ``` + + When enabled, the scene file will be output in JSON format. For example, the attached [sample file](https://github.com/user-attachments/files/18457210/2025-01-17-Scene.zip) demonstrates the current output structure. + +## Objective + +1. Get feedback on the current preliminary implementation [PR](https://github.com/Slicer/Slicer/pull/8141) and work a final design for the JSON based node status/scene file format. +2. Discuss real-time collaboration toolkits for medical application (e.g. Omniverse) + + +## Approach and Plan + +1. Have a meeting/demo with people interested for colletting feedback. +1. Work on the final design of the JSON based node status/scene file format. + + +## Progress and Next Steps + +### Progress +- Draft [PR](https://github.com/Slicer/Slicer/pull/8141) is already functional. The primary advantage of using JSON is that arrays are printed in a standardized format, unlike the XML format, which uses a Slicer-specific structure that can vary between arrays, subject hierarchy items attributes, etc. Below is an example comparing the scene in XML and JSON formats: + +| XML | JSON | +|--- | ---| +| | | + +- Testing reading/writing perfomances for the **Scene-Level** use case with 100 markups lines: + - XML: write 0.009117 ± 0.000674 sec, read 0.137960 ± 0.038905 sec + - JSON: write 0.046738 ± 0.020891 sec, read 0.180462 ± 0.013206 sec + - Relative performance factors are ~5.13x slower for writing and ~1.31x slower for reading when using JSON compared to XML. + - Scene writing could be optimized further, although the time for processing 100 markup lines is < 0.05 seconds. Further investigation is needed to estimate perfomances on very large scenes. +- Meeting done on Tuesday. Key notes taken by JC: +https://github.com/Slicer/Slicer/pull/8141#issuecomment-2618876551 + - Supporting partial updates to the scene is an interesting direction, particularly with Node Status printing/updating to enable partial node modifications. + - Introducing `macros` for automatic schema generation would be beneficial. + - Exploring `GraphQL` support could enable batched updates through mutations. Integration could leverage libraries such as `cppgraphqlgen`, as `libgraphqlparser` appears unmaintained. + - Investigate VTK serialization capabilities in recent versions, which might complement this work. +- The discussion with NVIDIA will be further explored to assess Slicer support and interoperability with OpenUSD/Omniverse for a medical real-time collaboration tool within Omniverse. + + +### Next Steps +1. When calling `WriteJSONToString` for nodes with a `storageNode`, we need to stringify certain parts of the node state information (for the single **Node Status - real-time collaboration** use case). + - **Markups** use `vtkMRMLMarkupsJsonStorageNode`, which already utilizes the JSON format. However, the current infrastructure only allows saving this information to a file. We need to refactor `vtkMRMLMarkupsNode` and `vtkMRMLMarkupsJsonStorageNode` to use `vtkMRMLMarkupsJsonWriter` for stringifying to a stream instead of a file, enabling access to its methods from Python. + - **Transforms** use `vtkMRMLTransformStorageNode` -> `itk::TransformFileWriter` has the the same issue of the Markups writer. We would need to refactor at the `vtkMRMLNode` level to be able to switch between writing to file and to a stream. + - **Volumes/Segmentations/Models**: For now, storing the file location should suffice, but in the future, we may need to pass `imageData` as a blob. + +1. Add automated tests to cover all MRML nodes in Slicer core/modules. +1. Investigate each feedback point gathered during the Tuesday meeting. + +# Background and References + + + +[PR](https://github.com/Slicer/Slicer/pull/8141) diff --git a/PW42_2025_GranCanaria/Projects/KidneyAndTumorSegmentationForSurgeryPlanning/README.md b/PW42_2025_GranCanaria/Projects/KidneyAndTumorSegmentationForSurgeryPlanning/README.md new file mode 100644 index 000000000..aacdaf091 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/KidneyAndTumorSegmentationForSurgeryPlanning/README.md @@ -0,0 +1,105 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Kidney and tumor segmentation for surgery planning +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Sylvia Ladstatter + affiliation: Children's National + country: USA + +- name: Kevin Cleary + affiliation: Children's National + country: USA + +- name: Andres Diaz-Pinto + affiliation: NVIDIA + country: UK + +- name: Mauro I. Dominguez + affiliation: Independent + country: Argentina +--- + +# Project Description + + + + +Our [overall project](https://arpa-h.gov/research-and-funding/mission-office-iso/awardees#:~:text=SARRTS%3A%20Supervised%20Autonomous%20Robotic%20Renal%20Tumor%20Surgery) aims to help automate kidney surgery and requires a fast and accurate way to make detailed segmentations of renal structures. Currently we can do this in 3D Slicer in a few hours using existing segmentation techniques. + +We would like to test improved methods for this task, and also define a good terminology for it. + + + +## Objective + + + + +1. Have a supervised segmentation method that works well with standard pre-op clinical images (typically diagnostic CTs with contrast enhancement at 1mm or smaller pixel size). +2. The method should segment the following structures: + * Aorta + * Vena cava + * Renal cortex + * Renal artery (including accessories, inside and outside the kidneys) + * Renal vein + * Renal pyramids / medulla + * Renal pelvis + * Ureters + * Tumors / masses + 3. Define a [good terminology](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/InfrastructureForCustomTerminologyAndColorTablesInSlicer/) and map it to SNOMED terms. + 4. The method should work well on a wide range of clinically realistic cases, such as noisy images and anatomical variants. + 5. Ideally a method should also work on non-contrast CT and MR as well + + + +## Approach and Plan + + + + +1. Use test data from IDC ([KiTS data](https://kits-challenge.org/kits23/)) as a testbed. See if there are other datasets we could use for testing. +2. Meet with experts to discuss state-of-the-art approaches and find out about any existing kidney segmentation models we can try +3. Experiment with [ScribblePrompt, MultiverSeg](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/DeployingScribblepromptAndMultiversegForInteractiveSegmentationAsA3DSlicerExtension/), [VISTA-3D](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/Vista3D-NIM/), and [Radiology Copilot](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/3Dand2DRadiologyCopilotIntegrationin3DSlicer/) for this task +4. Get input from the IDC team and others on terminologies for this task + + + +## Progress and Next Steps + + +* Worked with Andres on testing [Auto3DSeg challeng-winning models using KiTS model](https://arxiv.org/pdf/2310.04110). [Ready for merging to SlicerMONAIAuto3DSeg](https://github.com/lassoan/SlicerMONAIAuto3DSeg/pull/93). +* Mauro tested the [Renal Structures from Contrast Enhanced CT](https://monai.io/model-zoo.html#:~:text=Renalstructures%20cect%20segmentation,enhanced%20CT%20image) model and [developed instructions](https://gist.github.com/mauigna06/133567c0e8c9134920bf737d6d2608bb) for use. +* Shared ideas and common requirements with [nephrostomy training project](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/ProjectNephrostomyTutorLowCostTrainingSystemForPercutaneousNephrostomy/) + +image + +# Illustrations + + + +![image](https://github.com/user-attachments/assets/5ce6d0cb-dfe4-473e-a75a-8ad96c339975) + + +![Image](https://github.com/user-attachments/assets/e64fc125-1cc5-4cd8-8e92-b27d5f070a3a) + +![Image](https://github.com/user-attachments/assets/cb5f60f5-60ab-46c7-8e77-a3072e135934) + +![Image](https://github.com/user-attachments/assets/28bad2d7-3476-4247-a899-563c18561099) + + + +# Background and References + +Example KiTS case from IDC: +[https://viewer.imaging.datacommons.cancer.gov/viewer/1.3.6.1.4.1.14519.5.2.1.6919.4624.135173370342136417423953641748](https://viewer.imaging.datacommons.cancer.gov/viewer/1.3.6.1.4.1.14519.5.2.1.6919.4624.135173370342136417423953641748) diff --git a/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenVR_NoShadow.png b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenVR_NoShadow.png new file mode 100644 index 000000000..64ee9547e Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenVR_NoShadow.png differ diff --git a/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenVR_Shadow.png b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenVR_Shadow.png new file mode 100644 index 000000000..88dcf4612 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenVR_Shadow.png differ diff --git a/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenXR_NoShadow.png b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenXR_NoShadow.png new file mode 100644 index 000000000..01a15a048 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenXR_NoShadow.png differ diff --git a/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenXR_Shadow.png b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenXR_Shadow.png new file mode 100644 index 000000000..e145c783f Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/OpenXR_Shadow.png differ diff --git a/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/README.md b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/README.md new file mode 100644 index 000000000..46f173f39 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/README.md @@ -0,0 +1,111 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Lighting problems with latest SlicerVR +category: VR/AR and Rendering + +key_investigators: + +- name: Csaba Pinter + affiliation: EBATINCA + country: Spain + +- name: Andras Lasso + affiliation: PerkLab, Queen's University + country: Canada + +- name: Matt Jolley + affiliation: CHOP + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + +--- + +# Project Description + + + + +There is a regression with how latest SlicerVR lights the scene, both with the old VR and the new XR backend. +- OpenVR: Default lighting looks as expected, but now SSAO and Lights module options are not applied on the VR view (even if the view node IDs are explicitly selected) +- OpenXR: Default lighting looks washed out + + + +## Objective + + + + +1. Make both backends of SlicerVR work like before the regression + * "Normal" lighting by default + * Lights module changes have effect on VR view as well + + + +## Approach and Plan + + + + +1. Investigate the problem with the help of people directly involved in the OpenXR integration +2. If we find the root cause of either issues, try to address them + + + +## Progress and Next Steps + + + + +1. The Kitware team looked into the issue with OpenXR + * The issue seems to be the SRGB conversion. There is already a workaround possible, with a proper solution suggested ([see comment](https://github.com/KitwareMedical/SlicerVirtualReality/issues/182#issuecomment-2624608363)) +2. OpenVR support is being removed from Windows, so probably not worth trying to address the issue + + + +# Illustrations + + + + +The following screenshots demonstrate shadows vs no shadows in OpenVR: + +![OpenVR no shadows](OpenVR_NoShadow.png) +Left: Slicer view using OpenVR without shadows +Right: VR view (with back lights / without two sided lighting) + +![OpenVR_Shadow](OpenVR_Shadow.png) +Left: Slicer view using OpenVR with shadows +Right: VR view (with back lights / without two sided lighting) + + +__________________________________________________________________________________________________________ + +The following screenshots demonstrate different lighting options in OpenXR: + +![OpenXR_NoShadow](OpenXR_NoShadow.png) +Left: Slicer view using OpenXR without shadows +Right: VR view (with back lights / without two sided lighting) + +![OpenXR_Shadow](OpenXR_Shadow.png) +Left: Slicer view using OpenXR without shadows +Right: VR view (without back lights / without two sided lighting) + + + + +# Background and References + +* [SlicerVirtualReality issue](https://github.com/KitwareMedical/SlicerVirtualReality/issues/182) + + + + +For those who have access to SlicerHeart internals, this is the link to the issue: [https://github.com/JolleyLab/Internal/issues/205#event-14879920416](https://github.com/JolleyLab/Internal/issues/205#event-14879920416) diff --git a/PW42_2025_GranCanaria/Projects/MRunner2/README.md b/PW42_2025_GranCanaria/Projects/MRunner2/README.md new file mode 100644 index 000000000..bd84a2eb0 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/MRunner2/README.md @@ -0,0 +1,100 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: MHubRunner - MHub.ai for 3D Slicer (v2) +category: Infrastructure + +key_investigators: +- name: Leonard Nürnberg + affiliation: Brigham and Women’s Hospital, Harvard Medical Schools, Maastricht University + country: The Netherlands + +- name: Andrey Fedorov + affiliation: Brigham and Women’s Hospital, Harvard Medical Schools + country: USA + +- name: Hugo Aerts + affiliation: AIM Lab, Brigham and Women’s Hospital, Harvard Medical Schools + country: USA + +--- + +# Project Description + +MHub.ai provides a collection of dockerized, DICOM compatible AI models. +We will provide a new extension for 3D Slicer that allows to run models from MHub.ai directly from within 3D Slicer. + +## Objective + +This project aims to provide a 3D Slicer extension to run arbitrary MHub.ai models (segmentation) directly from Slicer by running the standard Dicom to Dicom workflow on a Dicom image and automatically importing the generated mask into Dicom memory without the need for any model-specific setup. + +## Approach and Plan + +Our first version of the plugin ran a slicer specific nrrd-to-nrrd workflow. While this allowed to run any model directly on a loaded node, the generated results were not stored automatically and a slicer specific nrrd-to-nrrd workflow was required to support a specific MHub.ai model. In this iteration of the extension, we aim to improve on these constrains and add some useful features like GPU selection and image management. + +## Progress and Next Steps + +Roadmap: +- [x] Run the default workflow (dicom-dicom) of MHub.ai models +- [x] Add generated DICOMSEG files to the 3D Slicer DICOM store +- [x] Conenct to the MHub.ai API to provide a list of available models +- [x] Detect available GPUs and provide GPU selection +- [x] Display a list of available mhubai images +- [x] ~~Provide alternative backends (e.g., udocker)~~ +- [x] ~~Run models on a remote server (via ssh)~~ +- [x] Display image information (~~version~~, disk space, ..) +- [x] Update and delete images +- [x] Update the selected node in 3D Slicer (red, green, yellow) upon selection +- [ ] Automatically display generated results after computation -> bug? +- [ ] ~~Store run information~~ +- [x] Implement a simple text-based model search +- [x] Include output descriptors and model description in model search +- [x] Provide additional model information +- [x] Disable models incompatible with extension (only segmentation models and single-input models aas of now) +- [x] Show async raw output stream of MHub run command +- [x] Display JSON and CSV files in Slicer Table view +- [x] Maintain a run history of generated output files +- [x] Organize all extension files into a single temp / user folder: input, output, logs, runs + +Ideas to be discussed: +- [ ] Docker SDK (Python) vs. Docker CLI (Subprocess) +- [ ] Modularize extension such that it can be extended with specific configuration +- [x] Support for non-segmentation models (using the 3D Slicer table view or DICOM SR) +- [ ] Open Slicer and a specific model in MHubRunner from a link via the mhub.ai website (possible?) +- [x] Explore: Multi-Select (now possible with new History view) and modality check (for dynamic model compatibility) + +# Illustrations + +Updated UI and Model Search. + +![Bildschirmfoto 2025-01-14 um 09 09 50](https://github.com/user-attachments/assets/5d277996-d491-4452-bf38-faed63b027ad) + +Updated Model Information and Details. + +Bildschirmfoto 2025-01-27 um 11 11 28 +Bildschirmfoto 2025-01-30 um 15 02 04 + +Search for models by segmentation ROI + +Bildschirmfoto 2025-01-30 um 15 02 18 + +View and search for model modalities + +Bildschirmfoto 2025-01-30 um 15 02 27 + +Run prediction models and display as table view + +Bildschirmfoto 2025-01-30 um 15 03 22 + +Manage local images + +Bildschirmfoto 2025-01-30 um 15 03 39 + +# Background and References + +- The [MHub.ai model repository](https://mhub.ai/models) +- The MHub.ai [Documentation](https://github.com/MHubAI/documentation) +- Guides for [model contributions](https://mhub.ai/contribute). +- The [first version of the MRunner extension](https://github.com/MHubAI/SlicerMRunner). diff --git a/PW42_2025_GranCanaria/Projects/MorphoDepot/README.md b/PW42_2025_GranCanaria/Projects/MorphoDepot/README.md new file mode 100644 index 000000000..575cdc79d --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/MorphoDepot/README.md @@ -0,0 +1,98 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'MorphoDepot: Collaborative segmentation projects ' +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Murat Maga + affiliation: Seattle Children's + country: USA + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + +We are developing tools for segmentation of biological specimens (e.g. 3D microCT of fish or snakes). The idea is that a Lab Director will define a project, such as what scan to segment, what anatomical structures to segment, the terminologies to use, etc. Students or lab members would be assigned to segment subsets of the data. We want to leverage existing data management tools, such as github for organizing issues and contributions, and jetstream2 for hosting data and computation. + +There is a presentation about the ideas here: [https://morphocloud.github.io/MorphoDepotDocs/](https://morphocloud.github.io/MorphoDepotDocs/) + +And there is an existing extension here: [https://github.com/MorphoCloud/SlicerMorphoDepot](https://github.com/MorphoCloud/SlicerMorphoDepot) + +We are interested in facilitating collaborative segmentation, including dividing a whole project into tasks, managing allocation to tasks to segmenters, managing/merging contributions, etc. + +If you are interested in similar topics, please join our project! + + +## Objective + + + + +1. Networking: we would like to know how this fits with anyone else's projects and possibly collaborate +2. Talk with developers who are improving the terminologies and color modules about how to better structure our segmentations +3. Harden the infrastructure for Mac/Windows (install gh cli for users). +4. Work on MorphoDepotAccession module to make it easier to create segmentation task repositories (design ideas are here: [https://github.com/MorphoCloud/SlicerMorphoDepot/issues/10](https://github.com/MorphoCloud/SlicerMorphoDepot/issues/10)) +5. Explore how to improve the GH tasks efficiency as querying through tags to find repos, issues and PRs can be slow and may not scale. +6. Bonus: if time brainstorm tools for comparing segmentations / reviewing and merging segmentations from team members more effectively. +7. Extra Bonus: Come up with plans on how to use collaboratively segmented datasets to train AI models (and iteratively refine them) + +## Approach and Plan + + + +1. Define/create a JSON schema for the mandatory metadata for each MorphoDepot archive. These should include specimen metadata, and some key imaging metadata. +2. Explore how to create a staging area for data donors to upload their scenes (volume + segmentation, if exists) to be reviewed by MorphoDepot team (quality control, metadata check, etc). + + +## Progress and Next Steps +1. Extension is fully functional and has been used in classroom with multiple users for basic segmentation tasks. (e.g., go to [https://github.com/muratmaga/pinecone/issues](https://github.com/muratmaga/pinecone/issues) and open an issue for yourself to test). +2. Implement the MorphoDepotAccession + +### During Project Week +* Chance to discuss MorpoDepot with the wider community and meet with our research group (Murat, Steve, and Jc) +* Agreement that Accession module will include the following technologies: + * A Slicer module that relies on the `gh` github.amrom.workers.devmand line tool to simplify accessioning data + * Use a JSON Schema to define accession metadata and create UI form: [https://pieper.github.io/sites/schemaform/](https://pieper.github.io/sites/schemaform/) + * Use the new colortable with coded concepts infrastructure to define the segmentation to be performed on the specimen + * Create a service that will allow uploading of volume data of the specimen that is already in Slicer to an s3 bucket in zarr format by allocating a [presigned URL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html) only to people whose github account has been previously approved as a MorphoDepot contributor. + * Then create a github repo for the specimen that includes the color table and the URL to the zarr bucket for use with the MorphoDepot system. The repo will be based on a repository template that has the correct setting, like the MorpoDepot label and other properties. + + +# Illustrations + + +![image](https://github.com/user-attachments/assets/09f94c3d-9d7c-4688-b9b4-f4d7b70a8e65) + +MorphoDepot module lists pending issues assigned to this user and allows you to load/segment/commit them and then request review. + +![image](https://github.com/user-attachments/assets/2d81e4f3-8d8b-49e4-97f4-f906053d375f) + +MorphoDepotReview module lists pending pull requests and allows PI to accept edits or request changes. + +![image](https://github.com/user-attachments/assets/9481ce0f-dc37-4900-9cdc-14bb0922df59) + +# Background and References + + + +Previous Project Week work: +* [https://projectweek.na-mic.org/PW41_2024_MIT/Projects/MorphodepotCollaborativeSegmentationProjects/](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/MorphodepotCollaborativeSegmentationProjects/) diff --git a/PW42_2025_GranCanaria/Projects/NewSlicerModuleForVisualAssessmentOfPulmonaryCongestionFromUltrasound/README.md b/PW42_2025_GranCanaria/Projects/NewSlicerModuleForVisualAssessmentOfPulmonaryCongestionFromUltrasound/README.md new file mode 100644 index 000000000..5d6c03742 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/NewSlicerModuleForVisualAssessmentOfPulmonaryCongestionFromUltrasound/README.md @@ -0,0 +1,86 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: New Slicer Module for Visual Assessment of Pulmonary Congestion from Ultrasound +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Mike Jin + affiliation: Centaur Labs, Brigham and Women's Hospital + country: USA + +- name: Tamas Ungi + affiliation: Queens's University + country: Canada + +- name: Fahimeh Fooladgar + affiliation: University of British Columbia + country: Canada + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + + +This work is part of an NIH Trailblazer R21 grant to our team to develop and validate computational methods for quantifying pulmonary congestion using B-lines in heart failure patients from bedside lung ultrasound in emergency settings. Tools for automated quantification could help emergency department physicians more rapidly and frequently examine patients to assess progress and adjust treatment, resulting in improved care and patient outcomes. + +![grid_Case001_0](https://github.com/user-attachments/assets/87bd5a3e-9601-45fb-a1c1-c0cdc99a665f) + + +## Objective + + + + +1. Add a new public module for annotation of pulmonary congestion in ultrasound +2. Add new feature to existing public Anonymizer module: AI-assisted detection of image fan boundaries in ultrasound to streamline anonymization, followed by OCR in output which produces warning if any text is detected in image + + + +## Approach and Plan + + + + +We will spend Project Week developing the software to support these features and hopefully release the modules publicly. + + + +## Progress and Next Steps + + + +1. We have added a new AnnotateUltrasound module to the Ultrasound extension that allows for easy annotation of sectors representing B-lines (indicating pulmonary congestion). +2. As part of the public Anonymizer module, we have added a new button that uses an AI model to auto-detect the boundaries of the ultrasound image fan. + + Next steps: we will work on adding OCR text detection to add an additional check that anonymized images don't contain any remaining PHI text prior to export. + + +# Illustrations + + + +![](https://github.com/user-attachments/assets/80e53527-a968-4319-a9b0-c5b75f6bc8c0) + + +![image001](https://github.com/user-attachments/assets/90aeaef1-8d07-4efa-b434-830adacfc671) + + + +# Background and References + + + + +1. Asgari-Targhi et al. (2024). Can Crowdsourced Annotations Improve AI-based Congestion Scoring For Bedside Lung Ultrasound? MICCAI 2024. ([link](https://papers.miccai.org/miccai-2024/paper/3582_paper.pdf) to paper) diff --git a/PW42_2025_GranCanaria/Projects/NousnavFuturePlansAndGrantBrainstorming/README.md b/PW42_2025_GranCanaria/Projects/NousnavFuturePlansAndGrantBrainstorming/README.md new file mode 100644 index 000000000..287e3624f --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/NousnavFuturePlansAndGrantBrainstorming/README.md @@ -0,0 +1,112 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: NousNav future plans and grant brainstorming +category: IGT and Training + +key_investigators: + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Rebecca Hisey + affiliation: Queen's University + country: Canada + +- name: Gabriella d'Albenzio + affiliation: Queen's University + country: Canada + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Alex Golby + affiliation: BWH + country: USA + +- name: Tina Kapur + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +The NousNav project team will meet and discuss future goals for the project. + + + +## Objective + + + +We plan to do a demo of the current system, and discuss future plans. + + +## Approach and Plan + + + + +### Tasks: +- Do a demo of the hardware / software available + - Should be full system with camera +- Deliver system to team traveling to Dakar + - Make sure they have the latest NousNav software + - Hand off hardware + +### Discussion Items +- Funding sources +- Regulatory approval approaches + - Design history file elements + + + +## Progress and Next Steps + + + + +- Tested the hardware, ready to go to Dakar +- Meeting with regulatory consultant to discuss initial steps toward FDA and CE approval + - We have worked out a good approach for the US approval + - Gathered initial templates for the design history file + + + +# Illustrations + + + +![1000008250](https://github.com/user-attachments/assets/cfbd9ee0-1e4d-443e-a64d-6f13f7fe820d) + +![1000008251](https://github.com/user-attachments/assets/5a1fbe00-3417-49e6-a73b-44bc5328dcf4) + +![Screenshot 2025-01-27 05 52 43](https://github.com/user-attachments/assets/bf800ad3-ba49-49ba-bb6f-9a9b0539d945) + + +_No response_ + + + +# Background and References + + + + +Previous Project Week Pages: +- [PW 35](https://projectweek.na-mic.org/PW35_2021_Virtual/Projects/NousNav/) +- [PW 36](https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/NousNav/) +- [PW 36 Skin Segmentation](https://projectweek.na-mic.org/PW36_2022_Virtual/Projects/SkinSegmentation/) +- [PW 39](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/NousNavRelease/) +- [PW 39 Tracked Ultrasound](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/TrackedUltrasoundIntegrationIntoNousnavALowCostNeuronavigationSystem/) +- [PW 41 Skin Segmentation](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/SkinSurfaceSegmentationForNousnav/) diff --git a/PW42_2025_GranCanaria/Projects/OHIF-fusion Scroll Tool/README.md b/PW42_2025_GranCanaria/Projects/OHIF-fusion Scroll Tool/README.md new file mode 100644 index 000000000..db40361ee --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/OHIF-fusion Scroll Tool/README.md @@ -0,0 +1,83 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'OHIF Tools: StackScroll for fusion viewport, 4D interaction' +category: Infrastructure + +key_investigators: + +- name: Joost van Griethuysen + affiliation: The Netherlands Cancer Institute + country: The Netherlands + +--- + +# Project Description + + + +OHIF viewer is a much used and much developed open source project exposing a fully functional DICOM viewer in React.js. +However some features, such as support for 4D images and fusion may be further improved. + +## Objective + + + +Create or update the StackScroll tool for fusion windows. +Horizontal scrolling should update the opacity of the fusion. + +Review window-levelling tools for PET, to allow window-level with +fixed minimum. + +Create overlay tools for working with 4D images. +- Selection of split-tag +- Selection of time point to display + +## Approach and Plan + + + +Create local test environment with OHIF viewer, Cornerstone3D tools and data (DONE). + +Overlay controls for 4D images were already created in the XNAT-viewer project. +Investigate whether these can be easily implemented in the main project. + + +## Progress and Next Steps + + + +- Identified & reported bugs in Viewport colorbar + - [#4743](https://github.com/OHIF/Viewers/issues/4743): Mouse action is exactly opposite for PET data, compared to window-level tool in the viewport. + - [#4744](https://github.com/OHIF/Viewers/issues/4744): Changing colormap of fusion volume applies change to background volume. +- Create PR proposing fix for the 2nd bug ([#4746](https://github.com/OHIF/Viewers/pull/4746)). +- Create Cornerstone3D tool: [ReferenceProbe](https://github.com/JoostJM/cornerstone3D/tree/feat/reference-probe) + - Using click & dragging in a viewport, show corresponding location in other viewports (jumping slices as necessary). +- Create FusionStackScroll tool: [FusionStackScroll](https://github.com/JoostJM/cornerstone3D/tree/feat/fusion-stack-scroll) + - Extends StackScroll tool: delta X adjusts opacity of overlay volume in fusion viewport. + +### Next Steps: +- Adjust StackScroll to allow switching timepoints in dynamic volume in delta X direction. + - In case of Dynamic volume, determine directin (X or Y) of largest change --> Only adjust in this direction (i.e. either stack position or timepoint) +- Investigate option of combining multiple tools on the mouse buttons by adding modifiers (Ctrl, Alt). + - Change cursor on hover + modifier to provide tooltip indicating function. + +# Illustrations + + + +## Reference Probe +![Screenshot from 2025-01-31 11-52-14](https://github.com/user-attachments/assets/a79ddaa6-e201-43cf-831d-8b0b4490f388) + + + +# Background and References + + + +- [Ohif main repo](https://github.com/OHIF/Viewers) +- [Cornerstone3D repo](https://github.com/cornerstonejs/cornerstone3D) diff --git a/PW42_2025_GranCanaria/Projects/OpenModelForAnatomySegmentationInComputerTomography/README.md b/PW42_2025_GranCanaria/Projects/OpenModelForAnatomySegmentationInComputerTomography/README.md new file mode 100644 index 000000000..f70d12818 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/OpenModelForAnatomySegmentationInComputerTomography/README.md @@ -0,0 +1,110 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Open Model for Anatomy Segmentation in Computer Tomography +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Murong Xu + affiliation: University of Zurich + country: Switzerland + +- name: Tamaz Amiranashvili + affiliation: University of Zurich + country: Switzerland + +- name: Bjoern Menze + affiliation: University of Zurich + country: Switzerland + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + + +We have developed a state-of-the-art automated segmentation model capable of identifying 167 anatomical structures in volumetric CT scans. This model has been trained on a combined dataset of more than 22,000 diverse, partially-annotated CT scans, setting a new benchmark in medical imaging. Our goal is to integrate this model into a 3D Slicer extension, making it widely available to the community. + + + +## Objective + + + + +1. Improve general user experience of the Slicer extension and finalize the development. +2. Prepare for performing large-scale inference on the IDC database. + + + +## Approach and Plan + + + + +1. Enhance user experience of our current prototype of the Slicer extension + a. explore options for faster CPU-only inference + b. add DICOM support + c. incorporate SNOMED naming conventions +2. Finalize extension development (test extension on various OSs, writing tests) +3. Benchmark inference performance and prepare for large-scale inference on the NLST/IDC databases + + + +## Progress and Next Steps + + + + +Current Achievements: + +- finalized Slicer extension UI and added DICOM support +- added support for SNOMED-CT naming conventions +- evaluated hardware requirements for inference on laptops + - limited memory/CPU only: Trained smaller models + +- initial process for working with IDC data: image retrieve, DICOM nifti conversion, restore + +Next Steps: +- In progress: test on different OSs + + + + +# Illustrations + + + + +![334442645-dfbe0cbf-0341-4dfc-991d-bdcf2c621c2d](https://github.com/user-attachments/assets/de0d6f1d-8389-4cde-b597-683e84bb60ea) + + +![](https://github.com/user-attachments/assets/5cfcf858-b2ac-4c95-99e8-b83307426e58) + + + + + + + +# Background and References + + + + +* Earlier PW41 project on this topic: [OMAS CT: Open Model for Anatomy Segmentation in Computer Tomography](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/OmasCtOpenModelForAnatomySegmentationInComputerTomography/) +* Earlier work on mapping OMAS labels to SNOMED-CT: [spreadsheet](https://docs.google.com/spreadsheets/d/1pBicNskjMDJBnD3w4yAQroj8SGSAhDfA_TUK24dLEyc/edit?gid=1390863317#gid=1390863317) diff --git a/PW42_2025_GranCanaria/Projects/OptimizeDicomwebAccessToIdcData/README.md b/PW42_2025_GranCanaria/Projects/OptimizeDicomwebAccessToIdcData/README.md new file mode 100644 index 000000000..613fb79cb --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/OptimizeDicomwebAccessToIdcData/README.md @@ -0,0 +1,99 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'Optimize DICOMweb access to IDC data ' +category: DICOM + +key_investigators: + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + +- name: David Clunie + affiliation: PixelMed + country: USA + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Joël Spaltenstein + affiliation: Agora Care SA + country: Switzerland + +--- + +# Project Description + + + + +As the DICOM standard is increasingly used in digital pathology imaging, conversion of available datasets from proprietary formats into DICOM format can make the data more FAIR and improve transparency and reproducibility of research conducted with these data. For this reason, the NCI Imaging Data Commons (IDC) hosts all its data in DICOM format. + +DICOMweb™ is a set of RESTful services that allows search, access and storage of DICOM objects via network. IDC data are also available (currently outdated, but soon to be updated) from a [DICOM store](https://cloud.google.com/healthcare-api/docs/resources/public-datasets/idc). This project aims to specifically try to optimize access to digital pathology IDC data via DICOMweb capable libraries ([wsidicom](), [dicomslide](), [ez-wsi-dicomweb](https://github.com/GoogleCloudPlatform/EZ-WSI-DICOMweb)) based on prior works on that issue. + + + +## Objective + + + + +1. **Objective A**: Have a code snippet for randomly and efficiently selecting tiles of arbitrary size from the IDC-DICOM store. +2. **Objective B (if time permits)**: Have an efficiency comparison for different libraries used for DICOMweb access. + + + +## Approach and Plan + + + + +**Objective A**: +1. Summarize current state. +2. Implement DICOMweb access (problems have priorily been observed wrt authorization). + +**Objective B**: +1. Create scripts for efficiency comparison. +2. Summarize results. + + +## Progress and Next Steps + + + +**Objective A**: +1. Current state summarized in document (will be made public following PW). +2. Created scripts for access with dicomslide, wsidicom, ez-wsi-dicom. + +**Objective B**: +1. Time didn't permit further efficiency comparison. But based on previous results and discussions, it became clear that ez-wsi-dicom is currently the most efficient tool probably due to the use of multiple ways of parallelizing requests and caching. +2. The main downside of ez-wsi-dicom is that it only works with Google DICOMweb stores. + +# Illustrations + + + +![Conceptual overview of DICOMweb](./dicomweb.png) \ +*Conceptual overview of DICOMweb. Taken from: https://www.dicomstandard.org/using/dicomweb/capabilities.* + + +# Background and References + + + + +Background reading: +- [https://www.dicomstandard.org/using/dicomweb](https://www.dicomstandard.org/using/dicomweb) + +Further resources: +- Related earlier project from Project Week 40: [Current state of DICOMweb for pathology](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/CurrentStateOfDicomwebForPathology/). +- Extensive tutorial notebook of ez-wsi_dicomweb: [Getting Started with EZ-WSI DICOMweb](https://colab.research.google.com/github/GoogleCloudPlatform/EZ-WSI-DICOMweb/blob/main/ez_wsi_demo.ipynb#scrollTo=pK9lTJaN9tuk) diff --git a/PW42_2025_GranCanaria/Projects/OptimizeDicomwebAccessToIdcData/dicomweb.png b/PW42_2025_GranCanaria/Projects/OptimizeDicomwebAccessToIdcData/dicomweb.png new file mode 100644 index 000000000..3e96ac352 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/OptimizeDicomwebAccessToIdcData/dicomweb.png differ diff --git a/PW42_2025_GranCanaria/Projects/ProjectDicomMetadataDatabases/README.md b/PW42_2025_GranCanaria/Projects/ProjectDicomMetadataDatabases/README.md new file mode 100644 index 000000000..a543bf1b6 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ProjectDicomMetadataDatabases/README.md @@ -0,0 +1,291 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'DICOM metadata databases' +category: DICOM + +key_investigators: + +- name: Marco Nolden + affiliation: German Cancer Research Center + country: Germany + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +--- + +# Project Description + + + + + +Medical imaging applications and systems which manage large collections of DICOM images usually need some kind of database to allow for browsing and selecting images or image collections, to support curation and control of ML training tasks, batch analysis etc. +Goal of the project is to investigate existing and new approaches to handle the metadata of large image collections for different purposes, create experimental setups, and report on results. + +- DICOM objects contains rich metadata + +- depending on the use case, record linkage to non-imaging data might be an additional requirement + +- extracted metadata can be represented in different JSON styles, stored in document databases like CouchDB, Apache OpenSearch etc.. + +- there is a FHIR imaging study ([https://www.hl7.org/fhir/imagingstudy.html](https://www.hl7.org/fhir/imagingstudy.html)), FHIR data could be stored in FHIR stores, or regular SQL databases … + +- custom approaches, like the CTK DICOM database, or IDC's representation in BigQuery; one has also observed flattened FHIR in SQL databases, combined with object stores etc. + +- DICOM to JSON could be done according to the DICOM JSON model ([https://dicom.nema.org/medical/dicom/current/output/chtml/part18/chapter\_F.html](https://dicom.nema.org/medical/dicom/current/output/chtml/part18/chapter_F.html)) , e.g. using DCMTK, or custom approaches, but also generic metadata extractors like Apache Tika could be an option + + + + +## Objective + + + + +1. Objective A. A report on the experiments and their results. + + + + +## Approach and Plan + + + + +1. put DICOM JSON in JSON columns of sqlite or postgres, test jsonpath and similar +2. create FHIR imaging studies, put to FHIR endpoint or other databases +3. Connect with out-of-the-box visualization solutions for e.g. json documents + + + + +## Progress and Next Steps + + + +1. Generate DICOM JSON representation of some TCIA datasets using pydicom +2. Read about JSON and JSONB columns in SQLite +3. Try different queries + +> [!CAUTION] +> SQLite's [arrow operator ->](https://sqlite.org/json1.html#jptr) seems to behave differently if the key consists solely of digits. This can lead to surprises when working with DICOM tags whose hexadecimal representation sometimes contains letters, but sometimes only digits ... + +### Count images per modality + +```SQL +SELECT COUNT(*) AS [Number of records], json_extract(jsonb_data, "$.00080060.Value") +FROM dicom_files +GROUP BY json_extract(jsonb_data, "$.00080060.Value"); +``` +It's also possible to create an index on fields, e.g. for modality + +```SQL +CREATE INDEX modality ON dicom_files (json_extract(jsonb_data, "$.00080060.Value") ); +``` + +### Count number of series per modality + +```SQL +-- Count number of series per modality +-- SQLite +SELECT + json_extract(jsonb_data, "$.00080060.Value"), + COUNT(DISTINCT(json_extract(jsonb_data, "$.0020000D.Value"))) AS num_series +FROM + dicom_files +GROUP BY + json_extract(jsonb_data, "$.00080060.Value") +ORDER BY + num_series desc +``` + +```SQL +-- Count number of series per modality +-- IDC BigQuery +SELECT + Modality, + COUNT(DISTINCT(SeriesInstanceUID)) AS num_series +FROM + `bigquery-public-data.idc_current.dicom_all` +GROUP BY + Modality +ORDER BY + num_series desc +``` + +### Find all series which contain a LOCALIZER ImageType + +```SQL +-- Find all series which contain a LOCALIZER ImageType +-- SQLite JSON +SELECT DISTINCT + json_extract(jsonb_data, "$.0020000D.Value") +FROM + dicom_files, json_each (json_extract(jsonb_data, "$.00080008.Value") ) + WHERE +json_each.value IS 'LOCALIZER' +``` + + +```SQL +-- Find all series which contain a LOCALIZER ImageType +-- IDC BigQuery +WITH + ImageTypeAgg AS ( + SELECT + ARRAY_TO_STRING(ImageType,'/') AS image_type_str, + SeriesInstanceUID + FROM + `bigquery-public-data.idc_current.dicom_all` ) +SELECT + SeriesInstanceUID, + image_type_str +FROM + ImageTypeAgg +WHERE + image_type_str LIKE "%LOCALIZER%" +``` + +### Findings and future experiments + +- putting the standard JSON representation of DICOM files in SQLite works +- json functions of SQLite can be used to extract elements +- queries seem to be reasonably fast, database indices can be created on elements to speed this up, e.g. ~5x for modality + +Ideas for future experiments: +- validate results by comparing them to IDC BigQuery results for selected collections +- measure performance in a systematic way +- replace numeric tag values with names, makes queries more readable but might also increase database size +- compare performance and size with PostgreSQL +- use alternative JSON structure from the proposed [DICOM Supplement 219](https://dicom.nema.org/medical/dicom/Supps/Frozen/sup219_fz_14_JSONSR.pdf) , [Presentation](https://dicom.nema.org/medical/dicom/Supps/Frozen/sup219_fz_JSONSR_TrialUse_Slides_20200116.pptx) . There is also a trial implementation by @dclunie which could be evaluated for this. +- +- + + + + + + + +Sample code to do so could look like this: + +```Python +#!/usr/bin/env python + +"""Populate SQLite with DICOM metadata. + +This code recursively crawls a directory and populates a SQLite database with JSON representations of +the DICOM files found. + +Warning: highly experimental code, use at your own risk! + +""" + +import pydicom +import os +import sys +import sqlite3 +import json + +def bulk_data_handler(data_element): + # we are not interested in any bulk data + return None + +crawl_directory = sys.argv[2] +db_path = sys.argv[1] + +# Connect to the database +conn = sqlite3.connect(db_path) +cursor = conn.cursor() + +# Create the table if it doesn't exist +cursor.execute(''' +CREATE TABLE IF NOT EXISTS dicom_files ( + filename TEXT PRIMARY KEY NOT NULL, + jsonb_data BLOB +) +''') + +conn.commit() + +# iterate over crawl directory, and insert json dumps of all files to the database +for root,dirs,files in os.walk(crawl_directory): + for file_name in files: + dicom_file_path = os.path.join(root,file_name) + json_data = "" + + # check if file is already in the database + cursor.execute("SELECT filename FROM dicom_files WHERE filename = ?", (dicom_file_path,)) + data=cursor.fetchone() + + if data: + continue + + try: + ds = pydicom.dcmread(dicom_file_path) + json_data = ds.to_json_dict(bulk_data_element_handler=bulk_data_handler) + + # print(json_data) + + except pydicom.errors.InvalidDicomError: + print("Skipped %s", dicom_file_path) + continue + + cursor.execute(''' + INSERT INTO dicom_files (filename, jsonb_data ) + VALUES ( ? , jsonb( ? ) ) + ''', (dicom_file_path, json.dumps(json_data) ) ) + + conn.commit() + + print(f"Inserted DICOM file: {dicom_file_path}") + +conn.close() +``` + +https://github.com/nolden/namic-pw24/ + + +# Illustrations + + + +![Image](https://github.com/user-attachments/assets/45f6c2d3-4ad6-44f2-a88b-749c0a815333) + +![Image](https://github.com/user-attachments/assets/83248ec0-1531-477c-9792-da0be11cb502) + +![Image](https://github.com/user-attachments/assets/66e5c440-b634-4e14-9fd0-e457de6eab34) + +_No response_ + + + +# Background and References + + + + + +- Opensearch dashboards +- [https://www.metabase.com/](https://www.metabase.com/) +- [https://superset.apache.org/](https://superset.apache.org/) +- [https://python.langchain.com/v0.1/docs/modules/model\_io/output\_parsers/types/json/](https://python.langchain.com/v0.1/docs/modules/model_io/output_parsers/types/json/) +- [https://echarts.apache.org/examples/en/index.html\#chart-type-line](https://echarts.apache.org/examples/en/index.html#chart-type-line) +- [https://projectweek.na-mic.org/PW41\_2024\_MIT/Projects/Dcm2Parquet/](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/Dcm2Parquet/) +- [\[2407.09064\] Multi-Modal Dataset Creation for Federated Learning with DICOM Structured Reports](https://arxiv.org/abs/2407.09064) +- [https://learn.microsoft.com/en-us/industry/healthcare/healthcare-data-solutions/dicom-data-transformation-mapping](https://learn.microsoft.com/en-us/industry/healthcare/healthcare-data-solutions/dicom-data-transformation-mapping) +- [https://github.com/pydicom/pydicom/issues/2187](https://github.com/pydicom/pydicom/issues/2187) +- Potential example queries for experiments: [https://docs.google.com/document/d/1qC5\_qUFBQ2HmEjfYQa9WaH1Y-erMlfis00bbU8UPnRs/edit?tab%3Dt.0%23heading%3Dh.ti11twc8h457\&sa=D](https://www.google.com/url?q=https://docs.google.com/document/d/1qC5_qUFBQ2HmEjfYQa9WaH1Y-erMlfis00bbU8UPnRs/edit?tab%3Dt.0%23heading%3Dh.ti11twc8h457&sa=D&source=docs&ust=1737743914441474&usg=AOvVaw0MsdEgNMafuMPSajRC6WNP) +- [https://learn.canceridc.dev/cookbook/bigquery](https://learn.canceridc.dev/cookbook/bigquery) +- [https://github.com/bebbi/dcm-organize](https://github.com/bebbi/dcm-organize) diff --git a/PW42_2025_GranCanaria/Projects/ProjectNephrostomyTutorLowCostTrainingSystemForPercutaneousNephrostomy/README.md b/PW42_2025_GranCanaria/Projects/ProjectNephrostomyTutorLowCostTrainingSystemForPercutaneousNephrostomy/README.md new file mode 100644 index 000000000..593960cc4 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ProjectNephrostomyTutorLowCostTrainingSystemForPercutaneousNephrostomy/README.md @@ -0,0 +1,107 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'Nephrostomy tutor low cost training system for percutaneous nephrostomy' +category: IGT and Training + +key_investigators: + +- name: Rebecca Hisey + affiliation: Queen's University + country: Canada + +- name: Gabriella d'Albenzio + affiliation: Queen's University + country: Canada + +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada + +- name: Mamadou Camara + affiliation: ESP + country: Senegal + +- name: Gabor Fichtinger + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + + +Working to integrate recent developments on AI-based volume reconstruction from US into the existing Nephrostomy tutor system to improve guidance for trainees. + + + +## Objective + + + + +1. Integrate AI-based volume reconstruction from US segmentations +2. Test updated system on a low-cost phantom + + + + + +## Approach and Plan + + + + +1. AI Model Integration + +- Review & Select AI Model: Identify and evaluate recent AI-based models for US segmentation that meet the needs of the Nephrostomy tutor system. +- Model Integration: Develop an interface to integrate the selected AI model with the existing tutor system, ensuring seamless data flow from ultrasound segmentations to the volume reconstruction process. + +2. System Update and Refinement + +- System Architecture Modification: Modify the Nephrostomy tutor system architecture to incorporate real-time volume reconstruction from the AI model. +- User Interface Adjustments: Update the user interface to display reconstructed volumes alongside the current guidance information for trainees. + + + + +## Progress and Next Steps + + + + +1. Low-cost phantom created. +2. Integrated real-time UNet predictions into Nephrostomy tutor +3. Updated visualizations to show predictions +4. Initial ultrasound segmentations in progress. + +Next steps: +1. Collect and segment more ultrasound kidney scans +2. Train an effective model + + +# Illustrations + + + + + + +https://github.com/user-attachments/assets/74945fc2-c866-4739-8a3b-3a295c87484c + + + + + +# Background and References + + + + +[Feasibility of video‐based skill assessment for percutaneous nephrostomy training in Senegal](https://pmc.ncbi.nlm.nih.gov/articles/PMC11665799/) diff --git a/PW42_2025_GranCanaria/Projects/Pyradiomics/README.md b/PW42_2025_GranCanaria/Projects/Pyradiomics/README.md new file mode 100644 index 000000000..a10de2b48 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/Pyradiomics/README.md @@ -0,0 +1,103 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'Update PyRadiomics and SlicerRadiomics build and CI' +category: Quantification and Computation + +key_investigators: + +- name: Joost van Griethuysen + affiliation: The Netherlands Cancer Institute + country: The Netherlands + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware Inc. + country: USA + +--- + +# Project Description + + + + +PyRadiomics was first introduced in 2017 and remains a very popular python package +to compute radiomics features. It is integrated into 3D slicer via the SlicerRadiomics module. +However, both projects are not heavily maintained anymore, causing the CI chain and build tools +to be out of date. + + +## Objective + + + + +Update build tools and CI pipelines for both PyRadiomics and SlicerRadiomics to be up to date +with the latest python versions and build tools + + + +## Approach and Plan + + + +- Scikit-ci is deprecated and should be replaced +- Invesitate option of using `cibuildwheel` +- Update metadata to adhere to new standards. + + + +## Progress and Next Steps + + + + +- Update `pyproject.toml` and remove deprecated files (e.g. `setup.py`, `setup.cfg`, `requirements*.txt`, `MANIFEST`) +- Run cookiecutter from [Scientific Python Library Development Guide](https://learn.scientific-python.org/development/) separately and use it to update pyradiomics + structure. + - Change build system to scikit-build-core + - Build C-extensions using CMake + - Moves CI/CD from CircleCI/AppVeyor to GitHub Actions. +- Create [PR #203](https://github.com/Grokzen/pykwalify/pull/203) in pykwalify to deal with deprecation warning. Ignore + this warning in the build process of pyradiomics for the time being. +- Disable pre-commit part of CI pending the correct setup of CI/CD. +- Local build & test now passing. +- [CI passing](https://github.com/JoostJM/pyradiomics/actions/runs/13038103168) on pull request branch + +ToDo: +- [x] Update CI configuration to match supported python versions enabled in CircleCI/AppVeyor builds.[PyRadiomics PR #898](https://github.com/AIM-Harvard/pyradiomics/pull/898) +- [x] Currently, CI is failing on METADATA mismatch in wheel generation. This is due to a mismatch in generated + version string (a version parsed from a metadata dir doesn't contain the date suffix, whereas the on in the generated + wheel does). Fix the version string error to allow CI to pass. +- [x] Add exceptions for style errors in pre-commit to allow it to pass initially. [PyRadiomics issue #899](https://github.com/AIM-Harvard/pyradiomics/issues/899) +- [ ] Later, apply style changes to make pyradiomics adhere to the configured style. +- [ ] Review readthedocs documentation generation. +- [ ] Review CD configuration to publish releases on tags pushed onto the master branch in the main repo. + + +# Illustrations + + + +![image](https://github.com/user-attachments/assets/39564879-e69a-42fd-85ed-d11d12b37b66) + + + +# Background and References + + + +Documentation +- [https://pyradiomics.readthedocs.io](https://pyradiomics.readthedocs.io) +- [https://cibuildwheel.pypa.io](https://cibuildwheel.pypa.io) + +Source code +- [https://github.com/AIM-harvard/pyradiomics](https://github.com/AIM-harvard/pyradiomics) +- [https://github.com/AIM-harvard/slicerradiomics](https://github.com/AIM-harvard/slicerradiomics) + +Misc +- [Scientific Python Library Development Guide](https://learn.scientific-python.org/development/) diff --git a/PW42_2025_GranCanaria/Projects/README.md b/PW42_2025_GranCanaria/Projects/README.md new file mode 100644 index 000000000..8c3f694da --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/README.md @@ -0,0 +1,18 @@ +# How to create a new project + +- Post questions about the project idea and team on the [Project Week forum][forum], our communication mechanism as of PW42. +- When you are ready, add a new entry in the list of **Projects** by creating a new `README.md` file in subfolder in `Projects` folder, and copying contents of [project description template][project-description-template] file into it. Step-by-step instructions for this are: + +1. Open [project description template][project-description-template] and copy its full content to the clipboard + * If the link does not work (https issues) please try [here](https://github.com/NA-MIC/ProjectWeek/blob/master/PW42_2025_GranCanaria/Projects/Template/README.md) +3. Go back to [Projects](https://github.com/NA-MIC/ProjectWeek/tree/master/PW42_2025_GranCanaria/Projects) folder on GitHub +4. Click on "Create new file" button +5. Type `YourProjectName/README.md` + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** +6. Paste the previously copied content of project template page into your new `README.md` +7. Update at least your project's __title, category, key investigators, location, and project description sections__ +8. Create a [pull request](https://help.github.com/articles/creating-a-pull-request/) with the new page + + +[forum]: https://discourse.slicer.org/c/community/project-week +[project-description-template]: https://raw.githubusercontent.com/NA-MIC/ProjectWeek/master/PW42_2025_GranCanaria/Projects/Template/README.md diff --git a/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/README.md b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/README.md new file mode 100644 index 000000000..082336c3f --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/README.md @@ -0,0 +1,173 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Review of segmentation results quality across various multi-organ segmentation models +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Ron Kikinis + affiliation: BWH + country: USA + +- name: David Clunie + affiliation: Pixelmed Publishing + country: USA + +- name: Steve Pieper + affiliation: Isomics Inc + country: USA + +- name: Andres Diaz-Pinto + affiliation: NVIDIA + country: UK + +- name: Tamaz Amiranashvili + affiliation: University of Zurich + country: Switzerland + +- name: Murong Xu + affiliation: University of Zurich + country: Switzerland + +- name: Klaus Maier-Hein + affiliation: DKFZ + country: Germany + +- name: Bjoern Menze + affiliation: University of Zurich + country: Switzerland + +--- + +# Project Description + + + + +When initially released, TotalSegmentator was perceived to produce superior results, in comparison to the state-of-the-art at the time, anyway. + +Over time, some of the deficiencies in the segmentations produced by TotalSegmentator have been identified. Further, new multi-organ segmentation models have been introduced. + + + +## Objective + + + + +1. Review segmentation results for a sample of images from IDC NLST collection, documenting the problems, across the publicly available multi-organ segmentation models. + + + + + +## Approach and Plan + + + +1. Identify set of cases to review. +2. Collect results from various methods (currently have for MOOSE and TotalSegmentator). +3. Review cases with Ron and David (using [SegmentationVerificationModule](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/SegmentationVerificationModuleForFinalizingMultiLabelAiSegmentations/) +4. Identify more cases with failures, prepare interface/instructions how to help in identifying those. +5. Summarize the results of the review in a publicly available document. + +## Progress and Next Steps + +1. Instructions for downloading initial sample of images: + +`pip install --upgrade idc-index` + +```python +test_series = \ +['1.2.840.113654.2.55.195946682403058845904768502826466194287', \ + '1.2.840.113654.2.55.221581533879834196356530174246594024639', \ + '1.2.840.113654.2.55.71263399928421039572326605504649736531', \ + '1.2.840.113654.2.55.79318439085250760439172236218713769408', \ + '1.2.840.113654.2.55.191661316001774647835097522264785668378', \ + '1.2.840.113654.2.55.304075689731327662774315497031574106725', \ + '1.2.840.113654.2.55.283399418711252976131557177419186072875', \ + '1.2.840.113654.2.55.21461438679308812574178613217680405233', \ + '1.2.840.113654.2.55.97114726565566537928831413367474015470', \ + '1.2.840.113654.2.55.122344168497038128022524906545138736420', \ + '1.2.840.113654.2.55.229650531101716203536241646069123704792', \ + '1.2.840.113654.2.55.257926562693607663865369179341285235858', \ + '1.3.6.1.4.1.14519.5.2.1.7009.9004.135383252566920035150987356231', \ + '1.3.6.1.4.1.14519.5.2.1.7009.9004.315696884435641630605419115484', \ + '1.3.6.1.4.1.14519.5.2.1.7009.9004.230644512623268816899910856967', \ + '1.3.6.1.4.1.14519.5.2.1.7009.9004.330739122093904668699523188451', \ + '1.3.6.1.4.1.14519.5.2.1.7009.9004.690272753571338193252806012518', \ + '1.3.6.1.4.1.14519.5.2.1.7009.9004.310718458447911706151879406927'] + +from idc_index import IDCClient + +c= IDCClient() + +c.download_from_selection(downloadDir=".",seriesInstanceUID=test_series) +``` + +2. 1 session with Ron looking at the initial result for just one case. Issues identified: 1) unrealistic anatomy and large gap between ribs and and [erector spinae](https://www.kenhub.com/en/library/anatomy/erector-spinae-muscles); 2) incorrect segmentation of some vertebrae; 3) large gaps between the structure; 4) segmentation is too coarse. +3. Collected results from Auto3DSeg and OMAS, in addition to TotalSegmentator v1 and MOOSE for the test sample. +4. Finished [SNOMED mapping for OMAS](https://docs.google.com/spreadsheets/d/1pBicNskjMDJBnD3w4yAQroj8SGSAhDfA_TUK24dLEyc/edit?gid=1390863317#gid=1390863317). +5. Harmonized mapping from model-specific labels to SNOMED-CT codes and consistent colors for Auto3DSeg and MOOSE (see CSV files [here](https://github.com/NA-MIC/ProjectWeek/tree/master/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels)) - [Google Sheet with current mapping](https://docs.google.com/spreadsheets/d/10VNy3kjaeXOgCRSgInCKH014134ZbD5Ezju7zZvVCRY/edit?gid=0#gid=0). Didn't finish this for OMAS! Notebook for this harmonization [here](https://colab.research.google.com/drive/1Ag4FwScTFC6CYgtDtmaC6GeBnAkpnKOw?usp=sharing). +6. Explored Slicer capabilities for joint visualization of the segmentation results, identified relevant features thanks to Steve! +7. Received Multitalent model segmentation results from Klaus Maier-Hein. +8. Co-authored with Perplexity a Google Apps script for coloring Google Sheets cells based on the RGB string - helpful for quickly evaluating color selection. +9. Brainstormed ideas for how to approach and scale up this comparison with Bjoern. + +![](ts_a3ds_m.jpg) +![](ts_gaps.jpg) +![](t8.gif) +![](broken_ribs.jpg) + + +```js +function colorCellsFromRGB() { + var sheet = SpreadsheetApp.getActiveSpreadsheet().getActiveSheet(); + var dataRange = sheet.getDataRange(); + var values = dataRange.getValues(); + + for (var i = 0; i < values.length; i++) { + for (var j = 0; j < values[i].length; j++) { + var cell = values[i][j]; + if (typeof cell === 'string' && cell.match(/^\[\d{1,3}, \d{1,3}, \d{1,3}\]$/)) { + var rgb = JSON.parse(cell); + var color = rgbToHex(rgb[0], rgb[1], rgb[2]); + sheet.getRange(i + 1, j + 1).setBackground(color); + } + } + } +} + +function rgbToHex(r, g, b) { + return "#" + ((1 << 24) + (r << 16) + (g << 8) + b).toString(16).slice(1); +} +``` + +Feedback/features: +* identified some issues in SegmentationReview module - Andrey will try to submit a PR +* identified issues related to visualization of segmentations in Slicer - issue submitted https://github.com/Slicer/Slicer/issues/8190 +* would be really helpful to be able to show what structure cursor points to in 3d View (Ron seconds this!) +* review of multiple segmentations is currently difficult - perhaps opportunity for improving SegmentationsReview module + + +# Illustrations + + + +![Image](https://github.com/user-attachments/assets/69b7b3dc-44f6-4d53-8ea9-4c2b62eb0023) + +_No response_ + +# Background and References + + +* Krishnaswamy, D., Thiriveedhi, V. K., Ciausu, C., Clunie, D., Pieper, S., Kikinis, R. & Fedorov, A. Rule-based outlier detection of AI-generated anatomy segmentations. arXiv [eess.IV] (2024). at [http://arxiv.org/abs/2406.14486](http://arxiv.org/abs/2406.14486) +* HuggingFace exploration dashboard: [https://huggingface.co/spaces/ImagingDataCommons/CloudSegmentatorResults](https://huggingface.co/spaces/ImagingDataCommons/CloudSegmentatorResults) diff --git a/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/auto3dseg_with_colors.csv b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/auto3dseg_with_colors.csv new file mode 100644 index 000000000..7e117d7b4 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/auto3dseg_with_colors.csv @@ -0,0 +1,118 @@ +,label_name,SegmentedPropertyCategoryCodeSequence.CodingSchemeDesignator,SegmentedPropertyCategoryCodeSequence.CodeValue,SegmentedPropertyCategoryCodeSequence.CodeMeaning,SegmentedPropertyTypeCodeSequence.CodingSchemeDesignator,SegmentedPropertyTypeCodeSequence.CodeValue,SegmentedPropertyTypeCodeSequence.CodeMeaning,SegmentedPropertyTypeModifierCodeSequence.CodingSchemeDesignator,SegmentedPropertyTypeModifierCodeSequence.CodeValue,SegmentedPropertyTypeModifierCodeSequence.CodeMeaning,AnatomicRegionSequence.CodingSchemeDesignator,AnatomicRegionSequence.CodeValue,AnatomicRegionSequence.CodeMeaning,AnatomicRegionModifierSequence.CodingSchemeDesignator_x,AnatomicRegionModifierSequence.CodeValue_x,AnatomicRegionModifierSequence.CodeMeaning_x,recommendedDisplayRGBValue,recommendedDisplayRGBValue.R,recommendedDisplayRGBValue.G,recommendedDisplayRGBValue.B,label_id,AnatomicRegionModifierSequence.CodeMeaning_y,AnatomicRegionModifierSequence.CodeValue_y,AnatomicRegionModifierSequence.CodingSchemeDesignator_y,AnatomicRegionModifierSequence.CodeMeaning_y,AnatomicRegionModifierSequence.CodeValue_y,AnatomicRegionModifierSequence.CodingSchemeDesignator_y +0,spleen,SCT,123037004,Anatomical Structure,SCT,78961009,Spleen,,,,,,,,,,"[157, 108, 162]",157.0,108.0,162.0,1,,,,,, +1,kidney_right,SCT,123037004,Anatomical Structure,SCT,64033007,Kidney,SCT,24028007.0,Right,,,,,,,"[212, 126, 151]",212.0,126.0,151.0,2,,,,,, +2,kidney_left,SCT,123037004,Anatomical Structure,SCT,64033007,Kidney,SCT,7771000.0,Left,,,,,,,"[212, 126, 151]",212.0,126.0,151.0,3,,,,,, +3,gallbladder,SCT,123037004,Anatomical Structure,SCT,28231008,Gallbladder,,,,,,,,,,"[139, 150, 98]",139.0,150.0,98.0,4,,,,,, +4,liver,SCT,123037004,Anatomical Structure,SCT,10200004,Liver,,,,,,,,,,"[221, 130, 101]",221.0,130.0,101.0,5,,,,,, +5,stomach,SCT,123037004,Anatomical Structure,SCT,69695003,Stomach,,,,,,,,,,"[160, 139, 76]",160.0,139.0,76.0,6,,,,,, +6,pancreas,SCT,123037004,Anatomical Structure,SCT,15776009,Pancreas,,,,,,,,,,"[249, 180, 111]",249.0,180.0,111.0,7,,,,,, +7,adrenal_gland_right,SCT,123037004,Anatomical Structure,SCT,23451007,Adrenal gland,SCT,24028007.0,Right,,,,,,,"[199, 27, 27]",199.0,27.0,27.0,8,,,,,, +8,adrenal_gland_left,SCT,123037004,Anatomical Structure,SCT,23451007,Adrenal gland,SCT,7771000.0,Left,,,,,,,"[199, 27, 27]",199.0,27.0,27.0,9,,,,,, +9,lung_upper_lobe_left,SCT,123037004,Anatomical Structure,SCT,45653009,Upper lobe of lung,SCT,7771000.0,Left,,,,,,,"[112, 162, 95]",112.0,162.0,95.0,10,,,,,, +10,lung_lower_lobe_left,SCT,123037004,Anatomical Structure,SCT,90572001,Lower lobe of lung,SCT,7771000.0,Left,,,,,,,"[242, 150, 232]",242.0,150.0,232.0,11,,,,,, +11,lung_upper_lobe_right,SCT,123037004,Anatomical Structure,SCT,45653009,Upper lobe of lung,SCT,24028007.0,Right,,,,,,,"[173, 69, 44]",173.0,69.0,44.0,12,,,,,, +12,lung_middle_lobe_right,SCT,123037004,Anatomical Structure,SCT,72481006,Middle lobe of right lung,,,,,,,,,,"[202, 164, 140]",202.0,164.0,140.0,13,,,,,, +13,lung_lower_lobe_right,SCT,123037004,Anatomical Structure,SCT,90572001,Lower lobe of lung,SCT,24028007.0,Right,,,,,,,"[212, 191, 32]",212.0,191.0,32.0,14,,,,,, +14,esophagus,SCT,123037004,Anatomical Structure,SCT,32849002,Esophagus,,,,,,,,,,"[211, 171, 143]",211.0,171.0,143.0,15,,,,,, +15,trachea,SCT,123037004,Anatomical Structure,SCT,44567001,Trachea,,,,,,,,,,"[182, 228, 255]",182.0,228.0,255.0,16,,,,,, +16,thyroid_gland,SCT,123037004,Anatomical Structure,SCT,69748006,Thyroid gland,,,,,,,,,,"[45, 82, 160]",,,,17,,,,,, +17,small_bowel,SCT,123037004,Anatomical Structure,SCT,30315005,Small Intestine,,,,,,,,,,"[205, 167, 142]",205.0,167.0,142.0,18,,,,,, +18,duodenum,SCT,123037004,Anatomical Structure,SCT,38848004,Duodenum,,,,,,,,,,"[255, 253, 229]",255.0,253.0,229.0,19,,,,,, +19,colon,SCT,123037004,Anatomical Structure,SCT,71854001,Colon,,,,,,,,,,"[204, 168, 143]",204.0,168.0,143.0,20,,,,,, +20,urinary_bladder,SCT,123037004,Anatomical Structure,SCT,89837001,Urinary bladder,,,,,,,,,,"[222, 154, 132]",222.0,154.0,132.0,21,,,,,, +21,prostate,SCT,123037004,Anatomical Structure,SCT,41216001,Prostate,,,,,,,,,,"[230, 57, 70]",,,,22,,,,,, +22,kidney_cyst_left,SCT,49755003,Morphologically Altered Structure,SCT,367643001,Cyst,,,,SCT,64033007.0,Kidney,SCT,7771000.0,Left,"[255, 209, 102]",,,,23,Left,7771000.0,SCT,Left,7771000.0,SCT +23,kidney_cyst_right,SCT,49755003,Morphologically Altered Structure,SCT,367643001,Cyst,,,,SCT,64033007.0,Kidney,SCT,24028007.0,Right,"[6, 214, 160]",,,,24,Right,24028007.0,SCT,Right,24028007.0,SCT +24,sacrum,SCT,123037004,Anatomical Structure,SCT,54735007,Sacrum,,,,,,,,,,"[163, 140, 140]",163.0,140.0,140.0,25,,,,,, +25,vertebrae_S1,SCT,123037004,Anatomical Structure,SCT,65985001,S1 vertebra,,,,,,,,,,"[17, 138, 178]",,,,26,,,,,, +26,vertebrae_L5,SCT,123037004,Anatomical Structure,SCT,49668003,L5 vertebra,,,,,,,,,,"[117, 78, 55]",117.0,78.0,55.0,27,,,,,, +27,vertebrae_L4,SCT,123037004,Anatomical Structure,SCT,11994002,L4 vertebra,,,,,,,,,,"[255, 255, 128]",255.0,255.0,128.0,28,,,,,, +28,vertebrae_L3,SCT,123037004,Anatomical Structure,SCT,36470004,L3 vertebra,,,,,,,,,,"[157, 126, 0]",157.0,126.0,0.0,29,,,,,, +29,vertebrae_L2,SCT,123037004,Anatomical Structure,SCT,14293000,L2 vertebra,,,,,,,,,,"[255, 144, 32]",255.0,144.0,32.0,30,,,,,, +30,vertebrae_L1,SCT,123037004,Anatomical Structure,SCT,66794005,L1 vertebra,,,,,,,,,,"[255, 255, 1]",255.0,255.0,1.0,31,,,,,, +31,vertebrae_T12,SCT,123037004,Anatomical Structure,SCT,23215003,T12 vertebra,,,,,,,,,,"[166, 89, 255]",166.0,89.0,255.0,32,,,,,, +32,vertebrae_T11,SCT,123037004,Anatomical Structure,SCT,12989004,T11 vertebra,,,,,,,,,,"[16, 164, 144]",16.0,164.0,144.0,33,,,,,, +33,vertebrae_T10,SCT,123037004,Anatomical Structure,SCT,7610001,T10 vertebra,,,,,,,,,,"[26, 97, 228]",26.0,97.0,228.0,34,,,,,, +34,vertebrae_T9,SCT,123037004,Anatomical Structure,SCT,82687006,T9 vertebra,,,,,,,,,,"[96, 188, 62]",96.0,188.0,62.0,35,,,,,, +35,vertebrae_T8,SCT,123037004,Anatomical Structure,SCT,11068009,T8 vertebra,,,,,,,,,,"[164, 216, 199]",164.0,216.0,199.0,36,,,,,, +36,vertebrae_T7,SCT,123037004,Anatomical Structure,SCT,62487009,T7 vertebra,,,,,,,,,,"[109, 0, 145]",109.0,0.0,145.0,37,,,,,, +37,vertebrae_T6,SCT,123037004,Anatomical Structure,SCT,45296009,T6 vertebra,,,,,,,,,,"[88, 119, 255]",88.0,119.0,255.0,38,,,,,, +38,vertebrae_T5,SCT,123037004,Anatomical Structure,SCT,56401006,T5 vertebra,,,,,,,,,,"[8, 82, 73]",8.0,82.0,73.0,39,,,,,, +39,vertebrae_T4,SCT,123037004,Anatomical Structure,SCT,73071006,T4 vertebra,,,,,,,,,,"[73, 228, 202]",73.0,228.0,202.0,40,,,,,, +40,vertebrae_T3,SCT,123037004,Anatomical Structure,SCT,1626008,T3 vertebra,,,,,,,,,,"[30, 128, 188]",30.0,128.0,188.0,41,,,,,, +41,vertebrae_T2,SCT,123037004,Anatomical Structure,SCT,53733008,T2 vertebra,,,,,,,,,,"[141, 186, 216]",141.0,186.0,216.0,42,,,,,, +42,vertebrae_T1,SCT,123037004,Anatomical Structure,SCT,64864005,T1 vertebra,,,,,,,,,,"[0, 2, 145]",0.0,2.0,145.0,43,,,,,, +43,vertebrae_C7,SCT,123037004,Anatomical Structure,SCT,87391001,C7 vertebra,,,,,,,,,,"[255, 0, 0]",255.0,0.0,0.0,44,,,,,, +44,vertebrae_C6,SCT,123037004,Anatomical Structure,SCT,36054005,C6 vertebra,,,,,,,,,,"[253, 134, 125]",253.0,134.0,125.0,45,,,,,, +45,vertebrae_C5,SCT,123037004,Anatomical Structure,SCT,36978003,C5 vertebra,,,,,,,,,,"[118, 40, 9]",118.0,40.0,9.0,46,,,,,, +46,vertebrae_C4,SCT,123037004,Anatomical Structure,SCT,5329002,C4 vertebra,,,,,,,,,,"[250, 14, 116]",250.0,14.0,116.0,47,,,,,, +47,vertebrae_C3,SCT,123037004,Anatomical Structure,SCT,113205007,C3 vertebra,,,,,,,,,,"[167, 117, 152]",167.0,117.0,152.0,48,,,,,, +48,vertebrae_C2,SCT,123037004,Anatomical Structure,SCT,39976000,C2 vertebra,,,,,,,,,,"[193, 99, 36]",193.0,99.0,36.0,49,,,,,, +49,vertebrae_C1,SCT,123037004,Anatomical Structure,SCT,14806007,C1 vertebra,,,,,,,,,,"[255, 186, 107]",255.0,186.0,107.0,50,,,,,, +50,heart,SCT,123037004,Anatomical Structure,SCT,80891009,Heart,,,,,,,,,,"[239, 71, 111]",,,,51,,,,,, +51,aorta,SCT,123037004,Anatomical Structure,SCT,15825003,Aorta,,,,,,,,,,"[224, 97, 76]",224.0,97.0,76.0,52,,,,,, +52,pulmonary_vein,SCT,123037004,Anatomical Structure,SCT,122972007,Pulmonary vein,,,,,,,,,,"[7, 59, 76]",,,,53,,,,,, +53,brachiocephalic_trunk,SCT,123037004,Anatomical Structure,SCT,12691009,Brachiocephalic artery,,,,,,,,,,"[255, 141, 0]",,,,54,,,,,, +54,subclavian_artery_right,SCT,123037004,Anatomical Structure,SCT,29700009,Subclavian artery,SCT,24028007.0,Right,,,,,,,"[0, 168, 120]",,,,55,,,,,, +55,subclavian_artery_left,SCT,123037004,Anatomical Structure,SCT,29700009,Subclavian artery,SCT,7771000.0,Left,,,,,,,"[142, 68, 173]",,,,56,,,,,, +56,common_carotid_artery_right,SCT,123037004,Anatomical Structure,SCT,32062004,Common carotid artery,SCT,24028007.0,Right,,,,,,,"[255, 87, 51]",,,,57,,,,,, +57,common_carotid_artery_left,SCT,123037004,Anatomical Structure,SCT,32062004,Common carotid artery,SCT,7771000.0,Left,,,,,,,"[0, 121, 140]",,,,58,,,,,, +58,brachiocephalic_vein_left,SCT,123037004,Anatomical Structure,SCT,8887007,Brachiocephalic vein,SCT,7771000.0,Left,,,,,,,"[255, 195, 0]",,,,59,,,,,, +59,brachiocephalic_vein_right,SCT,123037004,Anatomical Structure,SCT,8887007,Brachiocephalic vein,SCT,24028007.0,Right,,,,,,,"[75, 0, 130]",,,,60,,,,,, +60,atrial_appendage_left,SCT,123037004,Anatomical Structure,SCT,68786006,Auricular appendage,SCT,7771000.0,Left,,,,,,,"[255, 68, 204]",,,,61,,,,,, +61,superior_vena_cava,SCT,123037004,Anatomical Structure,SCT,48345005,Superior vena cava,,,,,,,,,,"[0, 200, 83]",,,,62,,,,,, +62,inferior_vena_cava,SCT,123037004,Anatomical Structure,SCT,64131007,Inferior vena cava,,,,,,,,,,"[110, 165, 54]",110.0,165.0,54.0,63,,,,,, +63,portal_vein_and_splenic_vein,SCT,123037004,Anatomical Structure,SCT,110765007,Portal vein and splenic vein,,,,,,,,,,"[0, 151, 206]",0.0,151.0,206.0,64,,,,,, +64,iliac_artery_left,SCT,123037004,Anatomical Structure,SCT,73634005,Common iliac artery,SCT,7771000.0,Left,,,,,,,"[217, 162, 48]",217.0,162.0,48.0,65,,,,,, +65,iliac_artery_right,SCT,123037004,Anatomical Structure,SCT,73634005,Common iliac artery,SCT,24028007.0,Right,,,,,,,"[217, 162, 48]",217.0,162.0,48.0,66,,,,,, +66,iliac_vena_left,SCT,123037004,Anatomical Structure,SCT,46027005,Common iliac vein,SCT,7771000.0,Left,,,,,,,"[197, 174, 37]",197.0,174.0,37.0,67,,,,,, +67,iliac_vena_right,SCT,123037004,Anatomical Structure,SCT,46027005,Common iliac vein,SCT,24028007.0,Right,,,,,,,"[197, 174, 37]",197.0,174.0,37.0,68,,,,,, +68,humerus_left,SCT,123037004,Anatomical Structure,SCT,85050009,Humerus,SCT,7771000.0,Left,,,,,,,"[95, 229, 185]",95.0,229.0,185.0,69,,,,,, +69,humerus_right,SCT,123037004,Anatomical Structure,SCT,85050009,Humerus,SCT,24028007.0,Right,,,,,,,"[95, 229, 185]",95.0,229.0,185.0,70,,,,,, +70,scapula_left,SCT,123037004,Anatomical Structure,SCT,79601000,Scapula,SCT,7771000.0,Left,,,,,,,"[199, 204, 131]",199.0,204.0,131.0,71,,,,,, +71,scapula_right,SCT,123037004,Anatomical Structure,SCT,79601000,Scapula,SCT,24028007.0,Right,,,,,,,"[199, 204, 131]",199.0,204.0,131.0,72,,,,,, +72,clavicula_left,SCT,123037004,Anatomical Structure,SCT,51299004,Clavicle,SCT,7771000.0,Left,,,,,,,"[241, 188, 38]",241.0,188.0,38.0,73,,,,,, +73,clavicula_right,SCT,123037004,Anatomical Structure,SCT,51299004,Clavicle,SCT,24028007.0,Right,,,,,,,"[241, 188, 38]",241.0,188.0,38.0,74,,,,,, +74,femur_left,SCT,123037004,Anatomical Structure,SCT,71341001,Femur,SCT,7771000.0,Left,,,,,,,"[130, 222, 207]",130.0,222.0,207.0,75,,,,,, +75,femur_right,SCT,123037004,Anatomical Structure,SCT,71341001,Femur,SCT,24028007.0,Right,,,,,,,"[130, 222, 207]",130.0,222.0,207.0,76,,,,,, +76,hip_left,SCT,123037004,Anatomical Structure,SCT,29836001,Hip,SCT,7771000.0,Left,,,,,,,"[241, 214, 145]",241.0,214.0,145.0,77,,,,,, +77,hip_right,SCT,123037004,Anatomical Structure,SCT,29836001,Hip,SCT,24028007.0,Right,,,,,,,"[241, 214, 145]",241.0,214.0,145.0,78,,,,,, +78,spinal_cord,SCT,123037004,Anatomical Structure,SCT,2748008,Spinal cord,,,,,,,,,,"[131, 56, 236]",,,,79,,,,,, +79,gluteus_maximus_left,SCT,123037004,Anatomical Structure,SCT,181674001,Gluteus maximus muscle,SCT,7771000.0,Left,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,80,,,,,, +80,gluteus_maximus_right,SCT,123037004,Anatomical Structure,SCT,181674001,Gluteus maximus muscle,SCT,24028007.0,Right,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,81,,,,,, +81,gluteus_medius_left,SCT,123037004,Anatomical Structure,SCT,78333006,Gluteus medius muscle,SCT,7771000.0,Left,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,82,,,,,, +82,gluteus_medius_right,SCT,123037004,Anatomical Structure,SCT,78333006,Gluteus medius muscle,SCT,24028007.0,Right,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,83,,,,,, +83,gluteus_minimus_left,SCT,123037004,Anatomical Structure,SCT,75297007,Gluteus minius muscle,SCT,7771000.0,Left,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,84,,,,,, +84,gluteus_minimus_right,SCT,123037004,Anatomical Structure,SCT,75297007,Gluteus minius muscle,SCT,24028007.0,Right,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,85,,,,,, +85,autochthon_left,SCT,123037004,Anatomical Structure,SCT,244849004,Deep muscle of back,SCT,7771000.0,Left,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,86,,,,,, +86,autochthon_right,SCT,123037004,Anatomical Structure,SCT,244849004,Deep muscle of back,SCT,24028007.0,Right,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,87,,,,,, +87,iliopsoas_left,SCT,123037004,Anatomical Structure,SCT,68455001,Iliopsoas muscle,SCT,7771000.0,Left,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,88,,,,,, +88,iliopsoas_right,SCT,123037004,Anatomical Structure,SCT,68455001,Iliopsoas muscle,SCT,24028007.0,Right,,,,,,,"[192, 104, 88]",192.0,104.0,88.0,89,,,,,, +89,brain,SCT,123037004,Anatomical Structure,SCT,12738006,Brain,,,,,,,,,,"[250, 250, 225]",250.0,250.0,225.0,90,,,,,, +90,skull,SCT,123037004,Anatomical Structure,SCT,89546000,Skull,,,,,,,,,,"[255, 111, 97]",,,,91,,,,,, +91,rib_right_4,SCT,123037004,Anatomical Structure,SCT,25523003,Fourth rib,SCT,24028007.0,Right,,,,,,,"[73, 228, 202]",73.0,228.0,202.0,92,,,,,, +92,rib_right_3,SCT,123037004,Anatomical Structure,SCT,25888004,Third rib,SCT,24028007.0,Right,,,,,,,"[30, 128, 188]",30.0,128.0,188.0,93,,,,,, +93,rib_left_1,SCT,123037004,Anatomical Structure,SCT,48535007,First rib,SCT,7771000.0,Left,,,,,,,"[0, 2, 145]",0.0,2.0,145.0,94,,,,,, +94,rib_left_2,SCT,123037004,Anatomical Structure,SCT,78247007,Second rib,SCT,7771000.0,Left,,,,,,,"[141, 186, 216]",141.0,186.0,216.0,95,,,,,, +95,rib_left_3,SCT,123037004,Anatomical Structure,SCT,25888004,Third rib,SCT,7771000.0,Left,,,,,,,"[30, 128, 188]",30.0,128.0,188.0,96,,,,,, +96,rib_left_4,SCT,123037004,Anatomical Structure,SCT,25523003,Fourth rib,SCT,7771000.0,Left,,,,,,,"[73, 228, 202]",73.0,228.0,202.0,97,,,,,, +97,rib_left_5,SCT,123037004,Anatomical Structure,SCT,15339008,Fifth rib,SCT,7771000.0,Left,,,,,,,"[8, 82, 73]",8.0,82.0,73.0,98,,,,,, +98,rib_left_6,SCT,123037004,Anatomical Structure,SCT,59558009,Sixth rib,SCT,7771000.0,Left,,,,,,,"[88, 119, 255]",88.0,119.0,255.0,99,,,,,, +99,rib_left_7,SCT,123037004,Anatomical Structure,SCT,24915002,Seventh rib,SCT,7771000.0,Left,,,,,,,"[109, 0, 145]",109.0,0.0,145.0,100,,,,,, +100,rib_left_8,SCT,123037004,Anatomical Structure,SCT,5953002,Eighth rib,SCT,7771000.0,Left,,,,,,,"[164, 216, 199]",164.0,216.0,199.0,101,,,,,, +101,rib_left_9,SCT,123037004,Anatomical Structure,SCT,22565002,Ninth rib,SCT,7771000.0,Left,,,,,,,"[96, 188, 62]",96.0,188.0,62.0,102,,,,,, +102,rib_left_10,SCT,123037004,Anatomical Structure,SCT,77644006,Tenth rib,SCT,7771000.0,Left,,,,,,,"[26, 97, 228]",26.0,97.0,228.0,103,,,,,, +103,rib_left_11,SCT,123037004,Anatomical Structure,SCT,58830002,Eleventh rib,SCT,7771000.0,Left,,,,,,,"[16, 164, 144]",16.0,164.0,144.0,104,,,,,, +104,rib_left_12,SCT,123037004,Anatomical Structure,SCT,43993008,Twelfth rib,SCT,7771000.0,Left,,,,,,,"[166, 89, 255]",166.0,89.0,255.0,105,,,,,, +105,rib_right_1,SCT,123037004,Anatomical Structure,SCT,48535007,First rib,SCT,24028007.0,Right,,,,,,,"[0, 2, 145]",0.0,2.0,145.0,106,,,,,, +106,rib_right_2,SCT,123037004,Anatomical Structure,SCT,78247007,Second rib,SCT,24028007.0,Right,,,,,,,"[141, 186, 216]",141.0,186.0,216.0,107,,,,,, +107,rib_right_5,SCT,123037004,Anatomical Structure,SCT,15339008,Fifth rib,SCT,24028007.0,Right,,,,,,,"[8, 82, 73]",8.0,82.0,73.0,108,,,,,, +108,rib_right_6,SCT,123037004,Anatomical Structure,SCT,59558009,Sixth rib,SCT,24028007.0,Right,,,,,,,"[88, 119, 255]",88.0,119.0,255.0,109,,,,,, +109,rib_right_7,SCT,123037004,Anatomical Structure,SCT,24915002,Seventh rib,SCT,24028007.0,Right,,,,,,,"[109, 0, 145]",109.0,0.0,145.0,110,,,,,, +110,rib_right_8,SCT,123037004,Anatomical Structure,SCT,5953002,Eighth rib,SCT,24028007.0,Right,,,,,,,"[164, 216, 199]",164.0,216.0,199.0,111,,,,,, +111,rib_right_9,SCT,123037004,Anatomical Structure,SCT,22565002,Ninth rib,SCT,24028007.0,Right,,,,,,,"[96, 188, 62]",96.0,188.0,62.0,112,,,,,, +112,rib_right_10,SCT,123037004,Anatomical Structure,SCT,77644006,Tenth rib,SCT,24028007.0,Right,,,,,,,"[26, 97, 228]",26.0,97.0,228.0,113,,,,,, +113,rib_right_11,SCT,123037004,Anatomical Structure,SCT,58830002,Eleventh rib,SCT,24028007.0,Right,,,,,,,"[16, 164, 144]",16.0,164.0,144.0,114,,,,,, +114,rib_right_12,SCT,123037004,Anatomical Structure,SCT,43993008,Twelfth rib,SCT,24028007.0,Right,,,,,,,"[166, 89, 255]",166.0,89.0,255.0,115,,,,,, +115,sternum,SCT,123037004,Anatomical Structure,SCT,56873002,Sternum,,,,,,,,,,"[0, 230, 118]",,,,116,,,,,, +116,costal_cartilages,SCT,123037004,Anatomical Structure,SCT,50016007,Costal cartilage,,,,,,,,,,"[138, 43, 226]",,,,117,,,,,, diff --git a/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/broken_ribs.jpg b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/broken_ribs.jpg new file mode 100644 index 000000000..fe7050f15 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/broken_ribs.jpg differ diff --git a/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/moose_with_colors.csv b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/moose_with_colors.csv new file mode 100644 index 000000000..b87805631 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/moose_with_colors.csv @@ -0,0 +1,129 @@ +,label_name,recommendedDisplayRGBValue,Model,Structure JSON,label_id,SegmentedPropertyCategoryCodeSequence.CodingSchemeDesignator,SegmentedPropertyCategoryCodeSequence.CodeValue,SegmentedPropertyCategoryCodeSequence.CodeMeaning,SegmentedPropertyTypeCodeSequence.CodingSchemeDesignator,SegmentedPropertyTypeCodeSequence.CodeValue,SegmentedPropertyTypeCodeSequence.CodeMeaning,SegmentedPropertyTypeModifierCodeSequence.CodingSchemeDesignator,SegmentedPropertyTypeModifierCodeSequence.CodeValue,SegmentedPropertyTypeModifierCodeSequence.CodeMeaning,AnatomicRegionSequence.CodingSchemeDesignator,AnatomicRegionSequence.CodeValue,AnatomicRegionSequence.CodeMeaning,AnatomicRegionModifierSequence.CodingSchemeDesignator,AnatomicRegionModifierSequence.CodeValue,AnatomicRegionModifierSequence.CodeMeaning +0,rib_left_1,"[0, 2, 145]",ribs,"rib_left_1: ""1"",",1,SCT,123037004,Anatomical Structure,SCT,48535007,First rib,SCT,7771000.0,Left,,,,,, +1,rib_left_2,"[141, 186, 216]",ribs,"rib_left_2: ""2"",",2,SCT,123037004,Anatomical Structure,SCT,78247007,Second rib,SCT,7771000.0,Left,,,,,, +2,rib_left_3,"[30, 128, 188]",ribs,"rib_left_3: ""3"",",3,SCT,123037004,Anatomical Structure,SCT,25888004,Third rib,SCT,7771000.0,Left,,,,,, +3,rib_left_4,"[73, 228, 202]",ribs,"rib_left_4: ""4"",",4,SCT,123037004,Anatomical Structure,SCT,25523003,Fourth rib,SCT,7771000.0,Left,,,,,, +4,rib_left_5,"[8, 82, 73]",ribs,"rib_left_5: ""5"",",5,SCT,123037004,Anatomical Structure,SCT,15339008,Fifth rib,SCT,7771000.0,Left,,,,,, +5,rib_left_6,"[88, 119, 255]",ribs,"rib_left_6: ""6"",",6,SCT,123037004,Anatomical Structure,SCT,59558009,Sixth rib,SCT,7771000.0,Left,,,,,, +6,rib_left_7,"[109, 0, 145]",ribs,"rib_left_7: ""7"",",7,SCT,123037004,Anatomical Structure,SCT,24915002,Seventh rib,SCT,7771000.0,Left,,,,,, +7,rib_left_8,"[164, 216, 199]",ribs,"rib_left_8: ""8"",",8,SCT,123037004,Anatomical Structure,SCT,5953002,Eighth rib,SCT,7771000.0,Left,,,,,, +8,rib_left_9,"[96, 188, 62]",ribs,"rib_left_9: ""9"",",9,SCT,123037004,Anatomical Structure,SCT,22565002,Ninth rib,SCT,7771000.0,Left,,,,,, +9,rib_left_10,"[26, 97, 228]",ribs,"rib_left_10: ""10"",",10,SCT,123037004,Anatomical Structure,SCT,77644006,Tenth rib,SCT,7771000.0,Left,,,,,, +10,rib_left_11,"[16, 164, 144]",ribs,"rib_left_11: ""11"",",11,SCT,123037004,Anatomical Structure,SCT,58830002,Eleventh rib,SCT,7771000.0,Left,,,,,, +11,rib_left_12,"[166, 89, 255]",ribs,"rib_left_12: ""12"",",12,SCT,123037004,Anatomical Structure,SCT,43993008,Twelfth rib,SCT,7771000.0,Left,,,,,, +12,rib_left_13,,ribs,"rib_left_13: ""13"",",13,SCT,123037004,Anatomical Structure,SCT,1193560003,Thirteenth rib,SCT,7771000.0,Left,,,,,, +13,rib_right_1,"[0, 2, 145]",ribs,"rib_right_1: ""14"",",14,SCT,123037004,Anatomical Structure,SCT,48535007,First rib,SCT,24028007.0,Right,,,,,, +14,rib_right_2,"[141, 186, 216]",ribs,"rib_right_2: ""15"",",15,SCT,123037004,Anatomical Structure,SCT,78247007,Second rib,SCT,24028007.0,Right,,,,,, +15,rib_right_3,"[30, 128, 188]",ribs,"rib_right_3: ""16"",",16,SCT,123037004,Anatomical Structure,SCT,25888004,Third rib,SCT,24028007.0,Right,,,,,, +16,rib_right_4,"[73, 228, 202]",ribs,"rib_right_4: ""17"",",17,SCT,123037004,Anatomical Structure,SCT,25523003,Fourth rib,SCT,24028007.0,Right,,,,,, +17,rib_right_5,"[8, 82, 73]",ribs,"rib_right_5: ""18"",",18,SCT,123037004,Anatomical Structure,SCT,15339008,Fifth rib,SCT,24028007.0,Right,,,,,, +18,rib_right_6,"[88, 119, 255]",ribs,"rib_right_6: ""19"",",19,SCT,123037004,Anatomical Structure,SCT,59558009,Sixth rib,SCT,24028007.0,Right,,,,,, +19,rib_right_7,"[109, 0, 145]",ribs,"rib_right_7: ""20"",",20,SCT,123037004,Anatomical Structure,SCT,24915002,Seventh rib,SCT,24028007.0,Right,,,,,, +20,rib_right_8,"[164, 216, 199]",ribs,"rib_right_8: ""21"",",21,SCT,123037004,Anatomical Structure,SCT,5953002,Eighth rib,SCT,24028007.0,Right,,,,,, +21,rib_right_9,"[96, 188, 62]",ribs,"rib_right_9: ""22"",",22,SCT,123037004,Anatomical Structure,SCT,22565002,Ninth rib,SCT,24028007.0,Right,,,,,, +22,rib_right_10,"[26, 97, 228]",ribs,"rib_right_10: ""23"",",23,SCT,123037004,Anatomical Structure,SCT,77644006,Tenth rib,SCT,24028007.0,Right,,,,,, +23,rib_right_11,"[16, 164, 144]",ribs,"rib_right_11: ""24"",",24,SCT,123037004,Anatomical Structure,SCT,58830002,Eleventh rib,SCT,24028007.0,Right,,,,,, +24,rib_right_12,"[166, 89, 255]",ribs,"rib_right_12: ""25"",",25,SCT,123037004,Anatomical Structure,SCT,43993008,Twelfth rib,SCT,24028007.0,Right,,,,,, +25,rib_right_13,,ribs,"rib_right_13: ""26"",",26,SCT,123037004,Anatomical Structure,SCT,1193560003,Thirteenth rib,SCT,24028007.0,Right,,,,,, +26,sternum,"[0, 230, 118]",ribs,"sternum: ""27""",27,SCT,123037004,Anatomical Structure,SCT,56873002,Sternum,,,,,,,,, +27,heart_myocardium,"[192, 104, 88]",cardiac,"heart_myocardium: ""1"",",1,SCT,123037004,Anatomical Structure,SCT,74281007,Myocardium,,,,,,,,, +28,heart_atrium_left,"[221, 129, 37]",cardiac,"heart_atrium_left: ""2"",",2,SCT,123037004,Anatomical Structure,SCT,82471001,Left atrium,,,,,,,,, +29,heart_atrium_right,"[221, 129, 37]",cardiac,"heart_atrium_right: ""3"",",3,SCT,123037004,Anatomical Structure,SCT,73829009,Right atrium,,,,,,,,, +30,heart_ventricle_left,"[152, 55, 13]",cardiac,"heart_ventricle_left: ""4"",",4,SCT,123037004,Anatomical Structure,SCT,87878005,Left ventricle of heart,,,,,,,,, +31,heart_ventricle_right,"[181, 85, 57]",cardiac,"heart_ventricle_right: ""5"",",5,SCT,123037004,Anatomical Structure,SCT,53085002,Right ventricle of heart,,,,,,,,, +32,aorta,"[224, 97, 76]",cardiac,"aorta: ""6"",",6,SCT,123037004,Anatomical Structure,SCT,15825003,Aorta,,,,,,,,, +33,iliac_artery_left,"[217, 162, 48]",cardiac,"iliac_artery_left: ""7"",",7,SCT,123037004,Anatomical Structure,SCT,73634005,Common iliac artery,SCT,7771000.0,Left,,,,,, +34,iliac_artery_right,"[217, 162, 48]",cardiac,"iliac_artery_right: ""8"",",8,SCT,123037004,Anatomical Structure,SCT,73634005,Common iliac artery,SCT,24028007.0,Right,,,,,, +35,iliac_vena_left,"[197, 174, 37]",cardiac,"iliac_vena_left: ""9"",",9,SCT,123037004,Anatomical Structure,SCT,46027005,Common iliac vein,SCT,7771000.0,Left,,,,,, +36,iliac_vena_right,"[197, 174, 37]",cardiac,"iliac_vena_right: ""10"",",10,SCT,123037004,Anatomical Structure,SCT,46027005,Common iliac vein,SCT,24028007.0,Right,,,,,, +37,inferior_vena_cava,"[110, 165, 54]",cardiac,"inferior_vena_cava: ""11"",",11,SCT,123037004,Anatomical Structure,SCT,64131007,Inferior vena cava,,,,,,,,, +38,portal_splenic_vein,,cardiac,"portal_splenic_vein: ""12"",",12,SCT,123037004,Anatomical Structure,SCT,110765007,Portal vein and splenic vein,,,,,,,,, +39,pulmonary_artery,"[0, 122, 171]",cardiac,"pulmonary_artery: ""13""",13,SCT,123037004,Anatomical Structure,SCT,81040000,Pulmonary artery,,,,,,,,, +40,adrenal_gland_left,"[199, 27, 27]",organs,"adrenal_gland_left: ""1"",",1,SCT,123037004,Anatomical Structure,SCT,23451007,Adrenal gland,SCT,7771000.0,Left,,,,,, +41,adrenal_gland_right,"[199, 27, 27]",organs,"adrenal_gland_right: ""2"",",2,SCT,123037004,Anatomical Structure,SCT,23451007,Adrenal gland,SCT,24028007.0,Right,,,,,, +42,bladder,,organs,"bladder: ""3"",",3,SCT,123037004,Anatomical Structure,SCT,89837001,Urinary bladder,,,,,,,,, +43,brain,"[250, 250, 225]",organs,"brain: ""4"",",4,SCT,123037004,Anatomical Structure,SCT,12738006,Brain,,,,,,,,, +44,gallbladder,"[139, 150, 98]",organs,"gallbladder: ""5"",",5,SCT,123037004,Anatomical Structure,SCT,28231008,Gallbladder,,,,,,,,, +45,kidney_left,"[212, 126, 151]",organs,"kidney_left: ""6"",",6,SCT,123037004,Anatomical Structure,SCT,64033007,Kidney,SCT,7771000.0,Left,,,,,, +46,kidney_right,"[212, 126, 151]",organs,"kidney_right: ""7"",",7,SCT,123037004,Anatomical Structure,SCT,64033007,Kidney,SCT,24028007.0,Right,,,,,, +47,liver,"[221, 130, 101]",organs,"liver: ""8"",",8,SCT,123037004,Anatomical Structure,SCT,10200004,Liver,,,,,,,,, +48,lung_lower_lobe_left,"[242, 150, 232]",organs,"lung_lower_lobe_left: ""9"",",9,SCT,123037004,Anatomical Structure,SCT,90572001,Lower lobe of lung,SCT,7771000.0,Left,,,,,, +49,lung_lower_lobe_right,"[212, 191, 32]",organs,"lung_lower_lobe_right: ""10"",",10,SCT,123037004,Anatomical Structure,SCT,90572001,Lower lobe of lung,SCT,24028007.0,Right,,,,,, +50,lung_middle_lobe_right,"[202, 164, 140]",organs,"lung_middle_lobe_right: ""11"",",11,SCT,123037004,Anatomical Structure,SCT,72481006,Middle lobe of right lung,,,,,,,,, +51,lung_upper_lobe_left,"[112, 162, 95]",organs,"lung_upper_lobe_left: ""12"",",12,SCT,123037004,Anatomical Structure,SCT,45653009,Upper lobe of lung,SCT,7771000.0,Left,,,,,, +52,lung_upper_lobe_right,"[173, 69, 44]",organs,"lung_upper_lobe_right: ""13"",",13,SCT,123037004,Anatomical Structure,SCT,45653009,Upper lobe of lung,SCT,24028007.0,Right,,,,,, +53,pancreas,"[249, 180, 111]",organs,"pancreas: ""14"",",14,SCT,123037004,Anatomical Structure,SCT,15776009,Pancreas,,,,,,,,, +54,spleen,"[157, 108, 162]",organs,"spleen: ""15"",",15,SCT,123037004,Anatomical Structure,SCT,78961009,Spleen,,,,,,,,, +55,stomach,"[160, 139, 76]",organs,"stomach: ""16"",",16,SCT,123037004,Anatomical Structure,SCT,69695003,Stomach,,,,,,,,, +56,thyroid_left,,organs,"thyroid_left: ""17"",",17,SCT,123037004,Anatomical Structure,SCT,69748006,Thyroid gland,SCT,7771000.0,Left,,,,,, +57,thyroid_right,,organs,"thyroid_right: ""18"",",18,SCT,123037004,Anatomical Structure,SCT,69748006,Thyroid gland,SCT,24028007.0,Right,,,,,, +58,trachea,"[182, 228, 255]",organs,"trachea: ""19""",19,SCT,123037004,Anatomical Structure,SCT,44567001,Trachea,,,,,,,,, +59,vertebrae_C1,"[255, 186, 107]",vertebrae,"vertebra_C1: ""1"",",1,SCT,123037004,Anatomical Structure,SCT,14806007,C1 vertebra,,,,,,,,, +60,vertebrae_C2,"[193, 99, 36]",vertebrae,"vertebra_C2: ""2"",",2,SCT,123037004,Anatomical Structure,SCT,39976000,C2 vertebra,,,,,,,,, +61,vertebrae_C3,"[167, 117, 152]",vertebrae,"vertebra_C3: ""3"",",3,SCT,123037004,Anatomical Structure,SCT,113205007,C3 vertebra,,,,,,,,, +62,vertebrae_C4,"[250, 14, 116]",vertebrae,"vertebra_C4: ""4"",",4,SCT,123037004,Anatomical Structure,SCT,5329002,C4 vertebra,,,,,,,,, +63,vertebrae_C5,"[118, 40, 9]",vertebrae,"vertebra_C5: ""5"",",5,SCT,123037004,Anatomical Structure,SCT,36978003,C5 vertebra,,,,,,,,, +64,vertebrae_C6,"[253, 134, 125]",vertebrae,"vertebra_C6: ""6"",",6,SCT,123037004,Anatomical Structure,SCT,36054005,C6 vertebra,,,,,,,,, +65,vertebrae_C7,"[255, 0, 0]",vertebrae,"vertebra_C7: ""7"",",7,SCT,123037004,Anatomical Structure,SCT,87391001,C7 vertebra,,,,,,,,, +66,vertebrae_T1,"[0, 2, 145]",vertebrae,"vertebra_T1: ""8"",",8,SCT,123037004,Anatomical Structure,SCT,64864005,T1 vertebra,,,,,,,,, +67,vertebrae_T2,"[141, 186, 216]",vertebrae,"vertebra_T2: ""9"",",9,SCT,123037004,Anatomical Structure,SCT,53733008,T2 vertebra,,,,,,,,, +68,vertebrae_T3,"[30, 128, 188]",vertebrae,"vertebra_T3: ""10"",",10,SCT,123037004,Anatomical Structure,SCT,1626008,T3 vertebra,,,,,,,,, +69,vertebrae_T4,"[73, 228, 202]",vertebrae,"vertebra_T4: ""11"",",11,SCT,123037004,Anatomical Structure,SCT,73071006,T4 vertebra,,,,,,,,, +70,vertebrae_T5,"[8, 82, 73]",vertebrae,"vertebra_T5: ""12"",",12,SCT,123037004,Anatomical Structure,SCT,56401006,T5 vertebra,,,,,,,,, +71,vertebrae_T6,"[88, 119, 255]",vertebrae,"vertebra_T6: ""13"",",13,SCT,123037004,Anatomical Structure,SCT,45296009,T6 vertebra,,,,,,,,, +72,vertebrae_T7,"[109, 0, 145]",vertebrae,"vertebra_T7: ""14"",",14,SCT,123037004,Anatomical Structure,SCT,62487009,T7 vertebra,,,,,,,,, +73,vertebrae_T8,"[164, 216, 199]",vertebrae,"vertebra_T8: ""15"",",15,SCT,123037004,Anatomical Structure,SCT,11068009,T8 vertebra,,,,,,,,, +74,vertebrae_T9,"[96, 188, 62]",vertebrae,"vertebra_T9: ""16"",",16,SCT,123037004,Anatomical Structure,SCT,82687006,T9 vertebra,,,,,,,,, +75,vertebrae_T10,"[26, 97, 228]",vertebrae,"vertebra_T10: ""17"",",17,SCT,123037004,Anatomical Structure,SCT,7610001,T10 vertebra,,,,,,,,, +76,vertebrae_T11,"[16, 164, 144]",vertebrae,"vertebra_T11: ""18"",",18,SCT,123037004,Anatomical Structure,SCT,12989004,T11 vertebra,,,,,,,,, +77,vertebrae_T12,"[166, 89, 255]",vertebrae,"vertebra_T12: ""19"",",19,SCT,123037004,Anatomical Structure,SCT,23215003,T12 vertebra,,,,,,,,, +78,vertebrae_L1,"[255, 255, 1]",vertebrae,"vertebra_L1: ""20"",",20,SCT,123037004,Anatomical Structure,SCT,66794005,L1 vertebra,,,,,,,,, +79,vertebrae_L2,"[255, 144, 32]",vertebrae,"vertebra_L2: ""21"",",21,SCT,123037004,Anatomical Structure,SCT,14293000,L2 vertebra,,,,,,,,, +80,vertebrae_L3,"[157, 126, 0]",vertebrae,"vertebra_L3: ""22"",",22,SCT,123037004,Anatomical Structure,SCT,36470004,L3 vertebra,,,,,,,,, +81,vertebrae_L4,"[255, 255, 128]",vertebrae,"vertebra_L4: ""23"",",23,SCT,123037004,Anatomical Structure,SCT,11994002,L4 vertebra,,,,,,,,, +82,vertebrae_L5,"[117, 78, 55]",vertebrae,"vertebra_L5: ""24"",",24,SCT,123037004,Anatomical Structure,SCT,49668003,L5 vertebra,,,,,,,,, +83,vertebrae_L6,,vertebrae,"vertebra_L6: ""25"",",25,SCT,123037004,Anatomical Structure,SCT,1156198008,L6 vertebra,,,,,,,,, +84,hip_left,"[241, 214, 145]",vertebrae,"hip_left: ""26"",",26,SCT,123037004,Anatomical Structure,SCT,29836001,Hip,SCT,7771000.0,Left,,,,,, +85,hip_right,"[241, 214, 145]",vertebrae,"hip_right: ""27"",",27,SCT,123037004,Anatomical Structure,SCT,29836001,Hip,SCT,24028007.0,Right,,,,,, +86,sacrum,"[163, 140, 140]",vertebrae,"sacrum: ""28""",28,SCT,123037004,Anatomical Structure,SCT,54735007,Sacrum,,,,,,,,, +87,carpal_left,,peripheral_bones,"carpal_left: ""1"",",1,SCT,123037004,Anatomical Structure,SCT,83936004,Carpus,SCT,7771000.0,Left,,,,,, +88,carpal_right,,peripheral_bones,"carpal_right: ""2"",",2,SCT,123037004,Anatomical Structure,SCT,83936004,Carpus,SCT,24028007.0,Right,,,,,, +89,clavicle_left,,peripheral_bones,"clavicle_left: ""3"",",3,SCT,123037004,Anatomical Structure,SCT,51299004,Clavicle,SCT,7771000.0,Left,,,,,, +90,clavicle_right,,peripheral_bones,"clavicle_right: ""4"",",4,SCT,123037004,Anatomical Structure,SCT,51299004,Clavicle,SCT,24028007.0,Right,,,,,, +91,femur_left,"[130, 222, 207]",peripheral_bones,"femur_left: ""5"",",5,SCT,123037004,Anatomical Structure,SCT,71341001,Femur,SCT,7771000.0,Left,,,,,, +92,femur_right,"[130, 222, 207]",peripheral_bones,"femur_right: ""6"",",6,SCT,123037004,Anatomical Structure,SCT,71341001,Femur,SCT,24028007.0,Right,,,,,, +93,fibula_left,,peripheral_bones,"fibula_left: ""7"",",7,SCT,123037004,Anatomical Structure,SCT,87342007,Fibula,SCT,7771000.0,Left,,,,,, +94,fibula_right,,peripheral_bones,"fibula_right: ""8"",",8,SCT,123037004,Anatomical Structure,SCT,87342007,Fibula,SCT,24028007.0,Right,,,,,, +95,fingers_left,,peripheral_bones,"fingers_left: ""9"",",9,SCT,123037004,Anatomical Structure,SCT,7569003,Fingers,SCT,7771000.0,Left,,,,,, +96,fingers_right,,peripheral_bones,"fingers_right: ""10"",",10,SCT,123037004,Anatomical Structure,SCT,7569003,Fingers,SCT,24028007.0,Right,,,,,, +97,humerus_left,"[95, 229, 185]",peripheral_bones,"humerus_left: ""11"",",11,SCT,123037004,Anatomical Structure,SCT,85050009,Humerus,SCT,7771000.0,Left,,,,,, +98,humerus_right,"[95, 229, 185]",peripheral_bones,"humerus_right: ""12"",",12,SCT,123037004,Anatomical Structure,SCT,85050009,Humerus,SCT,24028007.0,Right,,,,,, +99,metacarpal_left,,peripheral_bones,"metacarpal_left: ""13"",",13,SCT,123037004,Anatomical Structure,SCT,36455000,Metacarpal bones,SCT,7771000.0,Left,,,,,, +100,metacarpal_right,,peripheral_bones,"metacarpal_right: ""14"",",14,SCT,123037004,Anatomical Structure,SCT,36455000,Metacarpal bones,SCT,24028007.0,Right,,,,,, +101,metatarsal_left,,peripheral_bones,"metatarsal_left: ""15"",",15,SCT,123037004,Anatomical Structure,SCT,53884002,Metatarsal,SCT,7771000.0,Left,,,,,, +102,metatarsal_right,,peripheral_bones,"metatarsal_right: ""16"",",16,SCT,123037004,Anatomical Structure,SCT,53884002,Metatarsal,SCT,24028007.0,Right,,,,,, +103,patella_left,,peripheral_bones,"patella_left: ""17"",",17,SCT,123037004,Anatomical Structure,SCT,64234005,Patella,SCT,7771000.0,Left,,,,,, +104,patella_right,,peripheral_bones,"patella_right: ""18"",",18,SCT,123037004,Anatomical Structure,SCT,64234005,Patella,SCT,24028007.0,Right,,,,,, +105,radius_left,,peripheral_bones,"radius_left: ""19"",",19,SCT,123037004,Anatomical Structure,SCT,62413002,Radius,SCT,7771000.0,Left,,,,,, +106,radius_right,,peripheral_bones,"radius_right: ""20"",",20,SCT,123037004,Anatomical Structure,SCT,62413002,Radius,SCT,24028007.0,Right,,,,,, +107,scapula_left,"[199, 204, 131]",peripheral_bones,"scapula_left: ""21"",",21,SCT,123037004,Anatomical Structure,SCT,79601000,Scapula,SCT,7771000.0,Left,,,,,, +108,scapula_right,"[199, 204, 131]",peripheral_bones,"scapula_right: ""22"",",22,SCT,123037004,Anatomical Structure,SCT,79601000,Scapula,SCT,24028007.0,Right,,,,,, +109,skull,"[255, 111, 97]",peripheral_bones,"skull: ""23"",",23,SCT,123037004,Anatomical Structure,SCT,89546000,Skull,,,,,,,,, +110,tarsal_left,,peripheral_bones,"tarsal_left: ""24"",",24,SCT,123037004,Anatomical Structure,SCT,108371006,Tarsal bones,SCT,7771000.0,Left,,,,,, +111,tarsal_right,,peripheral_bones,"tarsal_right: ""25"",",25,SCT,123037004,Anatomical Structure,SCT,108371006,Tarsal bones,SCT,24028007.0,Right,,,,,, +112,tibia_left,,peripheral_bones,"tibia_left: ""26"",",26,SCT,123037004,Anatomical Structure,SCT,12611008,Tibia,SCT,7771000.0,Left,,,,,, +113,tibia_right,,peripheral_bones,"tibia_right: ""27"",",27,SCT,123037004,Anatomical Structure,SCT,12611008,Tibia,SCT,24028007.0,Right,,,,,, +114,toes_left,,peripheral_bones,"toes_left: ""28"",",28,SCT,123037004,Anatomical Structure,SCT,785708006,Toes of left foot,,,,,,,,, +115,toes_right,,peripheral_bones,"toes_right: ""29"",",29,SCT,123037004,Anatomical Structure,SCT,785709003,Toes of right foot,,,,,,,,, +116,ulna_left,,peripheral_bones,"ulna_left: ""30"",",30,SCT,123037004,Anatomical Structure,SCT,23416004,Ulna,SCT,7771000.0,Left,,,,,, +117,ulna_right,,peripheral_bones,"ulna_right: ""31""",31,SCT,123037004,Anatomical Structure,SCT,23416004,Ulna,SCT,24028007.0,Right,,,,,, +118,autochthon_left,"[192, 104, 88]",muscles,"autochthon_left: ""1"",",1,SCT,123037004,Anatomical Structure,SCT,244849004,Deep muscle of back,SCT,7771000.0,Left,,,,,, +119,autochthon_right,"[192, 104, 88]",muscles,"autochthon_right: ""2"",",2,SCT,123037004,Anatomical Structure,SCT,244849004,Deep muscle of back,SCT,24028007.0,Right,,,,,, +120,gluteus_maximus_left,"[192, 104, 88]",muscles,"gluteus_maximus_left: ""3"",",3,SCT,123037004,Anatomical Structure,SCT,181674001,Gluteus maximus muscle,SCT,7771000.0,Left,,,,,, +121,gluteus_maximus_right,"[192, 104, 88]",muscles,"gluteus_maximus_right: ""4"",",4,SCT,123037004,Anatomical Structure,SCT,181674001,Gluteus maximus muscle,SCT,24028007.0,Right,,,,,, +122,gluteus_medius_left,"[192, 104, 88]",muscles,"gluteus_medius_left: ""5"",",5,SCT,123037004,Anatomical Structure,SCT,78333006,Gluteus medius muscle,SCT,7771000.0,Left,,,,,, +123,gluteus_medius_right,"[192, 104, 88]",muscles,"gluteus_medius_right: ""6"",",6,SCT,123037004,Anatomical Structure,SCT,78333006,Gluteus medius muscle,SCT,24028007.0,Right,,,,,, +124,gluteus_minimus_left,"[192, 104, 88]",muscles,"gluteus_minimus_left: ""7"",",7,SCT,123037004,Anatomical Structure,SCT,78333006,Gluteus medius muscle,SCT,7771000.0,Left,,,,,, +125,gluteus_minimus_right,"[192, 104, 88]",muscles,"gluteus_minimus_right: ""8"",",8,SCT,123037004,Anatomical Structure,SCT,78333006,Gluteus medius muscle,SCT,24028007.0,Right,,,,,, +126,iliopsoas_left,"[192, 104, 88]",muscles,"iliopsoas_left: ""9"",",9,SCT,123037004,Anatomical Structure,SCT,68455001,Iliopsoas muscle,SCT,7771000.0,Left,,,,,, +127,iliopsoas_right,"[192, 104, 88]",muscles,"iliopsoas_right: ""10""",10,SCT,123037004,Anatomical Structure,SCT,68455001,Iliopsoas muscle,SCT,24028007.0,Right,,,,,, diff --git a/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/t8.gif b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/t8.gif new file mode 100644 index 000000000..b2ad350da Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/t8.gif differ diff --git a/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/ts_a3ds_m.jpg b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/ts_a3ds_m.jpg new file mode 100644 index 000000000..863c74341 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/ts_a3ds_m.jpg differ diff --git a/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/ts_gaps.jpg b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/ts_gaps.jpg new file mode 100644 index 000000000..a08d60034 Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/ts_gaps.jpg differ diff --git a/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/README.md b/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/README.md new file mode 100644 index 000000000..f977cfa86 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/README.md @@ -0,0 +1,100 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Robust boolean operations library for VTK/Slicer +category: Infrastructure + +key_investigators: + +- name: Mauro I. Dominguez + affiliation: Independent + country: Argentina + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware Inc. + country: USA + +--- + +# Project Description + + + + +Support robust boolean operations for 2D triangle meshes inside VTK or Slicer through an external library. This would allow an alternative to current boolean operations engine, `vtkbool` (from Sandbox's extension CombineModels module), which is better than the default one of vtk but still unstable for complex meshes such as the ones used in the biomedical domain + + + +## Objective + + + + +1. Expose a robust boolean operations library (e.g. Geogram) in Slicer's logic and GUI +2. Compile a robust boolean operations library (e.g. Geogram) as a vtk external module + + + +## Approach and Plan + + + + +1. Add a CLI module for Slicer that encapsulates a robust boolean operation command line as part of Sandbox extension. +2. Create a draft PR to the upstream repo with the code changes that incorporate the feature and documentation on how to use it from GUI and a code snippet on how to use it from the python interpreter +3. If there is enough time, evaluate creating a boolean operations DynamicModeler tool + + + +## Progress and Next Steps + + + + +1. Table that compares most "popular" boolean operations libraries from the [awesome geometry processing](https://github.com/zishun/awesome-geometry-processing#general-libraries) projects list: +[https://docs.google.com/spreadsheets/d/1tWbNuMz7vTIB2efFZE7YdT4vBmRb45DdQlqy_kda32k/edit?usp=sharing](https://docs.google.com/spreadsheets/d/1tWbNuMz7vTIB2efFZE7YdT4vBmRb45DdQlqy_kda32k/edit?usp=sharing) +2. Tested `geogram` successfully with mesh that made `vtkbool` fail +3. Edited the CombineModels module to allow other boolean operations backend, `manifold` through `trimesh` python module +4. Discussed with the team how to expose geogram library to Slicer, mostly CMake related issues. +5. Achieved creation of a CLI module to execute geogram boolean operations that will be later added to Sandbox extensions +6. Next steps are to make all these alternative backends available on CombineModels module as a PRs. This will involve modifying Sandbox extension to be a superbuild extension + + +# Illustrations + + + +## Boolean operation example + +![Boolean operation example](boolean_operation_inputs.png) + +![Boolean operation result](boolean_operation_results.png) + +## Working CLI module exposing geogram + +![geogram_CLI_module](https://github.com/user-attachments/assets/2e0846bd-bfd1-4884-be77-91dbb6a80741) + + + + +# Background and References + + + + +- [Geogram's boolean operations](https://github.com/BrunoLevy/geogram/wiki/BooleanOps) +- [Manifold's boolean operations](https://github.com/elalish/manifold/wiki/Manifold-Library#mesh-boolean) +- [Trimesh (exposes manifold)](https://github.com/mikedh/trimesh) + diff --git a/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/boolean_operation_inputs.png b/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/boolean_operation_inputs.png new file mode 100644 index 000000000..6d364332b Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/boolean_operation_inputs.png differ diff --git a/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/boolean_operation_results.png b/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/boolean_operation_results.png new file mode 100644 index 000000000..6d0d303eb Binary files /dev/null and b/PW42_2025_GranCanaria/Projects/RobustBooleanOperationsLibraryForVtkSlicer/boolean_operation_results.png differ diff --git a/PW42_2025_GranCanaria/Projects/SimulationOfSpineCurveCorrectionThrough3DReconstructionTechniquesAndParameterMeasurement/README.md b/PW42_2025_GranCanaria/Projects/SimulationOfSpineCurveCorrectionThrough3DReconstructionTechniquesAndParameterMeasurement/README.md new file mode 100644 index 000000000..8bd3d5ff5 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/SimulationOfSpineCurveCorrectionThrough3DReconstructionTechniquesAndParameterMeasurement/README.md @@ -0,0 +1,117 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Simulation of Spine Curve Correction Through 3D Reconstruction Techniques and Parameter + Measurement +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Cristina Soriano + affiliation: Digital Anatomics SL + country: Spain + +- name: Maria Ordieres + affiliation: Digital Anatomics SL + country: Spain + +- name: Javier Pascau + affiliation: Universidad Carlos III de Madrid + country: Spain + +--- + +# Project Description + + + + +Digital Anatomics is a company focused on the devolpment of solutions in the field of personalized medical surgeries, specially in the area of the spine. In this project we would like to explore the reconstruction of personalized spine models from 2D imaging. We are currently developing a neural netkwork for this reconstruction task. +Using 3D Slicer as a tool for modeling and parameter measurement, we aim to apply the obtained information to perform precise patient curve measurements, simulate curve correction procedures, design detailed surgical plans, and assess post-operative outcomes. This approach will provide valuable insights to improve decision-making and enhance the precision of spinal surgeries. + + + + + + +## Objective + + + + +1. Obtain DRR projections from CT scans for the obtention of paired training data +2. Obtain precision measurmets for the 3D reconstructions (specially focused on the pedicle region) +3. Automatize landmark indentification on the 2D image for the Cobbs angle measurment and study the correlation with the reconstructed vertebrae curvature + + + + + + +## Approach and Plan + + +We would like to generate a slicer extension that allows us to visualize and study the correlation between the 2D and 3D images and automatically obtain spine curve measurements. + + + + + + +## Progress and Next Steps + + + +1. Generate a custom layout for the joint visualization of: + + a. Lateral XRay plus sagittal CT slices + + ![image](https://github.com/user-attachments/assets/4a94d739-baae-4e0f-b2ef-08d231fd27b4) + + + b. AP XRay plus coronal CT slices + ![image](https://github.com/user-attachments/assets/ec6dc6aa-eb9f-4d86-a75d-cb8a06fb59c1) + + +2. Automatically compute the landmarks for each vertebral segmentation + + ![image](https://github.com/user-attachments/assets/ca8b7702-8cd1-4ed8-b6ce-a505e49333ab) +3. Compute the curvature measurements and show them on the images: we have implemented the computation of 2D angles (Cobbs angle & Kyphosis/Lordosis curvature) + +![image](https://github.com/user-attachments/assets/1e5bad4d-957c-4e4a-beae-cee2c337ef90) + +![image](https://github.com/user-attachments/assets/be8dbc1a-2a2b-47a0-900d-15d31227847f) + +Next steps: + · Compute the 3D curvature by interpolating the vertebral centers of mass and study the correlation with the 2D curvatures + + + +# Illustrations + + + + + + +![Image](https://github.com/user-attachments/assets/0a21a2ac-2506-4410-9938-ddb537aa870f) +**3D reconstruction from 2D X-rays** + + +![Image](https://github.com/user-attachments/assets/b292a716-6661-47fd-aaaa-c36d0deb0b2a) +**Landmark detection and cobbs angle measurement** + + +![image](https://github.com/user-attachments/assets/bf310de0-a58d-414e-8222-bec0a382281f) +**Curve correction simulation** + +# Background and References + + + + +_No response_ diff --git a/PW42_2025_GranCanaria/Projects/SlicerSofaIntegrationOfSofaWith3DSlicerForAdvancedMedicalSimulations/README.md b/PW42_2025_GranCanaria/Projects/SlicerSofaIntegrationOfSofaWith3DSlicerForAdvancedMedicalSimulations/README.md new file mode 100644 index 000000000..2ef70239c --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/SlicerSofaIntegrationOfSofaWith3DSlicerForAdvancedMedicalSimulations/README.md @@ -0,0 +1,114 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'Slicer-SOFA: Integration of SOFA with 3D Slicer for Advanced Medical Simulations' +category: Infrastructure + +key_investigators: + +- name: Rafael Palomar + affiliation: Oslo University Hospital and NTNU + country: Norway + +- name: Paul Baksic + affiliation: Inria + country: France + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware Inc. + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Sam Horvath + affiliation: Kitware Inc + country: USA + +--- + +# Project Description + + + + +The SlicerSOFA project aims to integrate the SOFA (Simulation Open Framework Architecture) with 3D Slicer, enhancing the capabilities of medical simulations by providing advanced physics and interaction models. For this PW we aim to consolidate the first release through the 3D Slicer Extension Manager and establish a dialogue with the community to bring this project forward. + + +## Objective + + + + +1. Objective A. Engage with the 3D Slicer and SOFA communities on the future development of Slicer-SOFA. + +2. Objective B. Resolve existing bugs and improve the stability of the integration. + - [Slicer hangs after starting and stopping the simulation a few times](https://github.com/Slicer/SlicerSOFA/issues/29) + - [SofaIGTLink Plugin Fails to Load on Windows Build](https://github.com/Slicer/SlicerSOFA/issues/33) + +3. Objective C. Expand the functionality by adding new SOFA plugins and abstraction layers to the base Slicer-SOFA setup. + - [Add SOFA default demo as Slicer module](https://github.com/Slicer/SlicerSOFA/issues/37) + - [Bump SOFA to v24.12](https://github.com/Slicer/SlicerSOFA/issues/36) + - [Adding new plugins to base Slicer-SOFA](https://github.com/Slicer/SlicerSOFA/issues/30) + - [Testing and sample data still relying on RafaelPalomar/SlicerSOFATestingData repository](https://github.com/Slicer/SlicerSOFA/issues/25) + - [Add abstraction layer for simplified SOFA scene generation](https://github.com/Slicer/SlicerSOFA/issues/40) + + 4. Objective D. Implement a mechanism to specify a custom SOFA root directory via environment variables. + - [Add feature to specify custom SOFA_ROOT](https://github.com/Slicer/SlicerSOFA/issues/39) + - [Enable use of external SOFA and SOFA plugins](https://github.com/Slicer/SlicerSOFA/issues/32) + + + +## Approach and Plan + + + + +1. Develop a mechanism in the SofaEnvironment to check for an environment variable `SLICER_SOFA_ROOT` and use it if available. +2. Investigate and fix the reported bugs, ensuring the functionality of components like SparseGridSimulation and SofaIGTLink plugin. +3. Integrate additional SOFA plugins (e.g., BeamAdapter, Shell) +4. Update the SOFA framework to the latest version to maintain compatibility and access new features. + + + + +## Progress and Next Steps + + + +1. Resolve existing bugs and improve the stability of the integration + - New issue Found: [Problem loading SlicerSOFA on MS Windows installations with existing SOFA installations](https://github.com/Slicer/SlicerSOFA/issues/41). ([PR#43](https://github.com/Slicer/SlicerSOFA/pull/43)) + +2. Update and expand SlicerSOFA + - Update SOFA and SOFA Python 24.12 ([PR#42](https://github.com/Slicer/SlicerSOFA/pull/42)) + - Add new SOFA plugins ([PR#42](https://github.com/Slicer/SlicerSOFA/pull/35)) + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- [SlicerSOFA GitHub Repository](https://github.com/Slicer/SlicerSOFA) +- [SOFA Framework](https://www.sofa-framework.org/) +- [3D Slicer](https://www.slicer.org/) +- [Slicer-SOFA PW 41 Project](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/SlicerSofa/) +- [Slicer-SOFA PW 40 Project](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/SlicerSofaIntegration/) diff --git a/PW42_2025_GranCanaria/Projects/SlicerimageaugmenterEvolutionAndNewFeatures/README.md b/PW42_2025_GranCanaria/Projects/SlicerimageaugmenterEvolutionAndNewFeatures/README.md new file mode 100644 index 000000000..8926d111c --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/SlicerimageaugmenterEvolutionAndNewFeatures/README.md @@ -0,0 +1,125 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'SlicerImageAugmenter : evolution and new features' +category: Quantification and Computation + +key_investigators: + +- name: Ciro Benito Raggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Paolo Zaffino + affiliation: Magna Græcia University of Catanzaro + country: Italy + +- name: Maria Francesca Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +--- + +# Project Description + + + + +[ImageAugmenter](https://github.com/ciroraggio/SlicerImageAugmenter) is a 3D Slicer extension that provides a simple interface to apply over 20 simultaneous MONAI transforms (spatial, intensity, etc.) to medical image datasets without programming. + +It makes medical image augmentation more accessible, allowing a wider range of users to improve the performance of DL models in medical image analysis by increasing the number of samples available for training. + +Since the extension is officially available in the Extension Manager since version 5.7.0 (current Preview Release), the idea is to fix known bugs, improve various aspects and add new features before the extension will be available in the stable release of 3D Slicer. + + + +## Objective + + + + +1. Faster loading of the interface on first start +2. Improve error handling and view management of 3D Slicer +3. Improving the application of some transformations +4. Possibility to select the sample to be used for the preview function +5. Make the community aware of the extension and release it in the stable release of 3D Slicer + + + +## Approach and Plan + + + + +1. Check all GUI components and/or dynamically load some components +2. Better handling of generic exceptions by preventing the interface from crashing and requiring a manual restart +3. Analyze MONAI documentation regarding available transformations for better interpretation of parameters +4. Added a drop-down menu in preview mode to select the sample to which the selected transformations will be applied and previewed +5. Networking with other people during the PW + + + +## Progress and Next Steps + + + + +### New Features + +1. Enabled selection of specific samples for transformation preview +2. Introduced regex support for defining patterns to locate images and masks + + +### Revisions and Bug Fixes + +1. Refined available transformations to better align with MONAI documentation +2. Improved exception handling for enhanced robustness +3. Revised the UI for better consistency in components and layout +4. Optimised and enhanced extension loading on first startup +5. Fixed minor bugs +6. Updated the extension logo + + + + +# Illustrations + + + +## RegEx Support feature + + + + + +## New Preview Samples Selection feature + + + + + + +## New Logo +![ImageAugmenter](https://github.com/user-attachments/assets/93b0dc68-fb1c-4e87-a2e5-382ed70461bd) + + +# Background and References + + + + +- [SlicerImageAugmenter Repository](https://github.com/ciroraggio/SlicerImageAugmenter) +- [SlicerImageAugmenter Journal Article - Software X, Volume 28, December 2024](https://doi.org/10.1016/j.softx.2024.101923) +- [SlicerImageAugmenter Website](https://ciroraggio.github.io/SlicerImageAugmenter/) diff --git a/PW42_2025_GranCanaria/Projects/SlicersofaSlicerros2Integration/README.md b/PW42_2025_GranCanaria/Projects/SlicersofaSlicerros2Integration/README.md new file mode 100644 index 000000000..b16fd9db8 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/SlicersofaSlicerros2Integration/README.md @@ -0,0 +1,113 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: SlicerSOFA - SlicerROS2 Integration +category: Infrastructure + +key_investigators: + +- name: Eléonore Germond + affiliation: IMT Atlantique + country: France + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +- name: Paul Baksic + affiliation: Inria + country: France + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Junichi Tokuda + affiliation: Brigham and Women's Hospital + country: USA + +- name: Laura Connolly + affiliation: Queen's University + country: Canada + +- name: Anton Deguet + affiliation: Johns Hopkins University + country: USA + +--- + +# Project Description + + + + +SlicerSOFA and SlicerROS2 are two 3D Slicer extensions that bridge mechanical simulations and robotic applications with 3D Slicer. Their recent addition to the 3D Slicer ecosystem opens up new possibilities for developing robotic applications that simulate interactions with medical environments. + +In this project, we aim to create a proof-of-concept integration where a Phantom Omni, controlled by SlicerROS2, interacts with a soft organ through SlicerSOFA. We will examine and discuss aspects such as the interface between ROS and SOFA, as well as performance considerations, to guide the future development of robotic applications that integrate simulated environments in 3D Slicer. + +![https://camo.githubusercontent.com/03c3af0d069321004f86294efe09f49df1236aaa1a5bc49857367b28994e3f59/68747470733a2f2f696d672e796f75747562652e636f6d2f76692f6b335647733059614533672f6d617872657364656661756c742e6a7067](https://camo.githubusercontent.com/03c3af0d069321004f86294efe09f49df1236aaa1a5bc49857367b28994e3f59/68747470733a2f2f696d672e796f75747562652e636f6d2f76692f6b335647733059614533672f6d617872657364656661756c742e6a7067) +(source: [https://github.com/rosmed/slicer_ros2_module](https://github.com/rosmed/slicer_ros2_module)) + + + + + + + +## Objective + + + + +1. Objective A: Creating a 3D Slicer-based setup including SlicerROS2 and SlicerSOFA, able to provide with interaction between a robot model and a soft organ + + + + +## Approach and Plan + + + + +1. Setting up a base 3D Slicer, SlicerROS2 and SlicerSOFA. +2. Loading a Phantom Omni robot model ([https://slicer-ros2.readthedocs.io/en/latest/pages/robot-visualization.html#phantom-omni](https://slicer-ros2.readthedocs.io/en/latest/pages/robot-visualization.html#phantom-omni)). +3. Creating a SOFA simulation for interaction between the robot and a soft organ. +4. Bridging the robot manipulation with the simulation using SlicerSOFA. + + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + + + +# Illustrations + + + + + + +# Background and References + + + + +- Connolly L, Kumar AS, Mehta KK, Al-Zogbi L, Kazanzides P, Mousavi P, Fichtinger G, Krieger A, Tokuda J, Taylor RH, Leonard S, Deguet A. SlicerROS2: A Research and Development Module for Image-Guided Robotic Interventions. IEEE Trans Med Robot Bionics. 2024 Nov;6(4):1334-1344. doi: 10.1109/TMRB.2024.3464683 +- SawSensablePhantom repository: [https://github.com/jhu-saw/sawSensablePhantom](https://github.com/jhu-saw/sawSensablePhantom) +- Phantom Omni in SlicerROS2: [https://slicer-ros2.readthedocs.io/en/latest/pages/robot-visualization.html#phantom-omni](https://slicer-ros2.readthedocs.io/en/latest/pages/robot-visualization.html#phantom-omni) +- SlicerSOFA repository: [https://github.com/Slicer/SlicerSOFA](https://github.com/Slicer/SlicerSOFA) diff --git a/PW42_2025_GranCanaria/Projects/SpinalMusculoskeletalModuleForComputingVertebralSpecificLoading/README.md b/PW42_2025_GranCanaria/Projects/SpinalMusculoskeletalModuleForComputingVertebralSpecificLoading/README.md new file mode 100644 index 000000000..8c99cee51 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/SpinalMusculoskeletalModuleForComputingVertebralSpecificLoading/README.md @@ -0,0 +1,125 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Spinal musculoskeletal module for computing vertebral specific loading +category: Quantification and Computation + +key_investigators: + +- name: Csaba Pinter + affiliation: Ebatinca S.L. + country: Spain + +- name: Ron Alkalay + affiliation: Beth Israel Deaconess Medical Center + country: USA + +- name: Dennis Anderson + affiliation: Beth Israel Deaconess Medical Center + country: USA + +- name: Vy Hong + affiliation: Technical University Munich + country: Germany + +- name: Nils Rehtanz + affiliation: Technical University Munich + country: Germany + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Andras Lasso + affiliation: Queen’s University + country: Canada + +- name: Ron Kikinis + affiliation: Brigham and Women’s Hospital and Harvard Medical School + country: USA + +--- + +# Project Description + + + + +Musculoskeletal models of the spine allow insight into the complex loading states experienced by the human spine that cannot be measured in human subjects noninvasively. We have previously established models for such analyses OpenSim, an open-source modeling software, and developed machine-learning approaches for segmenting cancer patients' spinal column and trunk musculature. However, establishing personalized models to represent individual human subjects is complex and time-consuming, requiring custom scripting for data computation, curation, and assembling of model parameters. +In the previous project week, our group ported our model creation, analysis, and data management scripts to Python and has worked on computing spinal inter-segment centroid and vertebral segment orientation necessary for adapting our generic female and male model to the patient anatomy and the spatial kinematic relationships of the modeled spine (individual vertebral size, inter-discal space, spinal curvature). For project week 42, we propose integrating these tools within the extension framework to enable automation of the segmentation process and visualization of the spine and muscle segmentation outcome, a complete pipeline to allow computing the input file required to model creation in Open sim from this segmentation and visualizing the force and moment values results at each vertebral level in 3Dslicer based on the Open sim model analysis. + +Having such an open-source model in 3d Slicer will significantly contribute to the scientific and clinical community for cancer patient research and to studying the effect of spinal loading on morbidity in elderly populations and surgical outcomes. + + + +## Objective + + + + +1. Create an open-source Slicer module integrating our group's + a) Vertebrae and musculature DL segmentation models + b) Python-based spinal musculoskeletal model preparation and management scripts +for establishing patient-specific spinal model input files for analysis in the OpenSim environment. +2. Automate model creation in Open Sim based on the model input file. +3. Create tools for visualizing and presenting model results at the model skeleton and muscle structures for static simulations. +4. Discuss extending objective 3 for visualizing dynamic simulations (gait, tasks) + + + +## Approach and Plan + + + + +TBD + + + +## Progress and Next Steps + + + +**Progress**: +- **Landmark Identification**: Using segmentations of vertebrae, we identified critical points such as joint centroids and body centroids. +- **Local Coordinate Frames**: Defined subject-specific biomechanical models in OpenSim by creating local coordinate frames for vertebral bodies and intervertebral joints. +- **Integration into OpenSim**: Successfully exported the identified landmarks into OpenSim models, enabling personalized musculoskeletal analyses. + +**Next Steps**: +1. Consider multiple methods for identifying the points and landmarks used. +2. Display resulting frames (joint centers and axes) within 3D Slicer along with spine segmentations, incorporating methods for viewing and correction before exporting to OpenSim. +3. Improve visualization and QA of OpenSim models: + - Export vertebral segmentations into files and use them in the OpenSim visualizer. + - Visualize OpenSim models in Slicer for side-by-side comparisons. + + + +# Illustrations + + +![vertebra_segmentation_front](https://github.com/user-attachments/assets/4d27c9a9-fa4b-46c6-8981-77bffcbbfdec) +![vertebra_segmentation_side](https://github.com/user-attachments/assets/538d45be-a9cc-4c6a-9f15-cda61b96a3b7) +![subregion_segmentation_side](https://github.com/user-attachments/assets/78402110-338b-428d-b454-6d7ecac93315) +![poi_in_vertebra](https://github.com/user-attachments/assets/cc99a5de-fb5d-46c4-a859-364975daded0) +![open_sim_spine_model_front](https://github.com/user-attachments/assets/63c9c325-dd71-4cf2-b233-28eba349d7bf) +![open_sim_spine_model_side](https://github.com/user-attachments/assets/8fb7d50f-a84e-40e4-87c0-158c85f437bd) +![open_sim_fulll_body](https://github.com/user-attachments/assets/139b93c0-ff4d-46b2-957e-8bff813ebc84) + + +![PW42 project page Graphic](https://github.com/user-attachments/assets/767c0b03-0dcf-4e05-b179-b476099c2a68) + + + + +# Background and References + + + + +1. [Evaluation of Load-To-Strength Ratios in Metastatic Vertebrae and Comparison With Age- and Sex-Matched Healthy Individuals](https://www.frontiersin.org/articles/10.3389/fbioe.2022.866970/full) +2. [Metastatic spine disease alters spinal load-to-strength ratios in patients compared to healthy individuals](https://www.medrxiv.org/content/10.1101/2025.01.06.25320075v1) +3. [Automated Segmentation of Trunk Musculature with a Deep CNN Trained from Sparse Annotations in Radiation Therapy Patients with Metastatic Spine Disease](https://www.medrxiv.org/content/10.1101/2025.01.13.25319967v1) diff --git a/PW42_2025_GranCanaria/Projects/Template/README.md b/PW42_2025_GranCanaria/Projects/Template/README.md new file mode 100644 index 000000000..78ac1061b --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/Template/README.md @@ -0,0 +1,57 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Write full project title here +category: Uncategorized + +key_investigators: +- name: Person Doe + affiliation: University + +- name: Person2 Doe2 + affiliation: University2 + country: Spain +--- + +# Project Description + + + +## Objective + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW42_2025_GranCanaria/Projects/Template/README.md.j2 b/PW42_2025_GranCanaria/Projects/Template/README.md.j2 new file mode 100644 index 000000000..e8c82efbc --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/Template/README.md.j2 @@ -0,0 +1,55 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: {{ title | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +category: {{ category | regex_replace("\n$|\n\.\.\.\n$", "") }} + +key_investigators: +{% for investigator in investigators %} +- name: {{ investigator.name | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} + affiliation: {{ investigator.affiliation | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- if investigator.country %} + country: {{ investigator.country | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- endif %} +{% endfor %} +--- + +# Project Description + + + +{{ description }} + +## Objective + + + +{{ objective }} + +## Approach and Plan + + + +{{ approach }} + +## Progress and Next Steps + + + +{{ progress }} + +# Illustrations + + + +{{ illustrations }} + +# Background and References + + + +{{ background }} diff --git a/PW42_2025_GranCanaria/Projects/TorchxrayvisionMeets3DSlicerBridgingDeepLearningAndMedicalImaging/README.md b/PW42_2025_GranCanaria/Projects/TorchxrayvisionMeets3DSlicerBridgingDeepLearningAndMedicalImaging/README.md new file mode 100644 index 000000000..c4d12d821 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/TorchxrayvisionMeets3DSlicerBridgingDeepLearningAndMedicalImaging/README.md @@ -0,0 +1,113 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: 'TorchXRayVision Meets 3D Slicer: Bridging Deep Learning and Medical Imaging' +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Constantin Constantinescu + affiliation: Lucian Blaga University of Sibiu + country: Romania + +- name: Juan Ruiz-Alzola + affiliation: University of Las Palmas de Gran Canaria + country: Spain + +- name: Csaba Pintér + affiliation: EBATINCA + country: Spain + +- name: Oscar Martin + affiliation: EBATINCA + country: Spain + +- name: Borja Fernandez + affiliation: EBATINCA + country: Spain + +--- + +# Project Description + + + + +This project focuses on developing a 3D Slicer module for the automatic processing of chest X-rays, integrating powerful deep learning capabilities provided by TorchXRayVision. The module streamlines radiological analysis by offering the following features: + +- Segmentation: Automatically identify and outline anatomical regions in chest X-rays, such as lungs or other structures. +- Anomaly Detection: Detect abnormalities and highlight regions of interest for further investigation. +- Pathology Classification: Classify pathologies such as pneumonia, atelectasis, or other common conditions. + +By combining the advanced machine learning models from TorchXRayVision with the versatile 3D Slicer platform, this module aims to provide a robust tool for clinicians and researchers to enhance diagnostic workflows, reduce manual workload, and improve consistency in radiological interpretation. + + + +## Objective + + + + +1. A 3D Slicer Module +2. TorchXRayVision models included in the module +3. Torch XRays automatic segmentation, anomaly detection and pathology classification +4. Heatmaps + + + + +## Approach and Plan + + + + +1. Create a Slicer Module +2. Create an interface to upload X-Rays and perform automatic analysis +3. Use TorchXRayVision framework to perform automatic analysis +4. Compute heatmaps + + + + +## Progress and Next Steps + + + + +1. Creating a 3D Slicer Module +2. Building the interface +3. Including the TorchXRayVision models +4. Incorporate mechanisms to facilitate the interpretability of the predictions made by the models + + + + + +# Illustrations + +Loading the X-Ray Image: + +![1](https://github.com/user-attachments/assets/e70dd149-885b-4674-ae5e-7f6a959d2084) + +Running automatic analysis: +![2](https://github.com/user-attachments/assets/a694a23c-aaf3-4e6f-8101-8f955bfe336f) + +View segmentation: +![3](https://github.com/user-attachments/assets/87216562-bc43-4578-8295-7bbf95ba5450) + +Pathologies scores and heatmap validation: +![4](https://github.com/user-attachments/assets/7965821f-d508-4f1d-bdc8-8063e78adef0) + + +![5](https://github.com/user-attachments/assets/c114764a-ddbc-4054-9fa4-bfe701ea6287) + + +# Background and References + +[https://github.com/mlmed/torchxrayvision](https://github.com/mlmed/torchxrayvision) + + diff --git a/PW42_2025_GranCanaria/Projects/Vista3D-NIM/README.md b/PW42_2025_GranCanaria/Projects/Vista3D-NIM/README.md new file mode 100644 index 000000000..a8f4f78d6 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/Vista3D-NIM/README.md @@ -0,0 +1,98 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Remote VISTA3D server (NIM) for CT segmentation +category: Segmentation / Classification / Landmarking + +key_investigators: +- name: Andres Diaz-Pinto + affiliation: NVIDIA + country: UK + +- name: Stephen Aylward + affiliation: NVIDIA + country: USA + + +--- + +# Project Description + +## Objective + +Provide access to MONAI-based foundation models to users running 3D Slicer on low-end +(e.g., no GPU) machines by launching those AI methods on freely available (albeit +limited total usage) high-end servers. + +## Overview +We will develop the first (VISTA3D) of a set of Slicer Extensions that will allow Slicer +users to call AI methods built using MONAI and running on NVIDIA servers. + +Access to the servers requires registration which is free, to get an API key with which +1,000 or more images can be processed for free. Research users can also register as +developers (also for free) to get unlimited local access. + +The first Extension will allow Slicer users (with a free API key) to process images using +the MONAI VISTA-3D segmentation (remote) running on NVIDIA servers. Via this Extension, +Slicer users will be able to perform large / fast CT image AI segmentations on low-end +(e.g., no GPU) machines. + +TLDR: +Try the VISTA-3D module running on +[NVIDIA VISTA3D NIM](https://build.nvidia.com/nvidia/vista-3d) + +We are proposing to make that service callable using a simple Slicer extension, to enable +remote VISTA-3D processing of data from within 3D Slicer. + +## Details +NVIDIA is defined NIMs (NVIDIA Inference Microservices) as optimized containers for +portable, scalable AI. These are nominally offered on NV AI Enterprise / GPU Cloud servers +as callable methods. Anyone can register for free and get 1,000 to 5,000 free credits +(depending on email domain used for registration, with 1 credit used for each image +processed). Additionally, anyone can register for free to become an NVIDIA Developer, and +then they can download NIMs for free for research purposes - enabling unlimited data +processing, albeit using local GPU resources. NIMs can also run on AWS and Azure servers. + +NVIDIA AI Enterprise / GPU-Cloud servers use high-end GPUs (e.g., H100s) so via NIMS +running on these servers it is possible to evaluate very large AI models and very large +images very rapidly. + +1. Developed a "MONAI VISTA-3D segmentation (remote) Extension. +2. Possibly provide a Slicer GUI that simplifies registering for an NVIDIA AI Enterprise account +3. Possibly provide access to advance VISTA3D features + * Only segmenting specific structures + * Using seeds to indicate the inside and outside of an arbitrary object of interest + +## Approach and Plan + +The extension will +1. Allow the user to select a currently loaded image. +2. Ask the user to enter their NVIDIA Enterprise API key. +3. Provide a warning if the intensity range or voxel spacing is outside the norm +4. Convert that data to isotropic voxel spacing and CT intensity range +5. Upload the image to file.io to generate a URL that can be passed to the VISTA3D NIM. +6. Pass the file's URL to the VISTA3D NIM along with the API key. +7. Receive the segmentation results (mask) in the response from the NIM +8. Register the segmentation results (mask) as a segmentation volume associated with the original input image. + +Optionally: +1. Provide a GUI for registering for an NVIDIA Enterprise key +2. Allow the user to select specific anatomic structures to be segmented +3. Allow the user to specify landmarks that indicate the inside and outside of a region of interest + +## Progress and Next Steps + +1. A jupyter notebook that does the above steps outside of Slicer has been implemented and proven to work correctly. + +# Illustrations + + + +# Background and References + +* [VISTA3D NIM URL](https://build.nvidia.com/nvidia/vista-3d) diff --git a/PW42_2025_GranCanaria/Projects/VisualDICOMBrowser/README.md b/PW42_2025_GranCanaria/Projects/VisualDICOMBrowser/README.md new file mode 100644 index 000000000..c5201d889 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/VisualDICOMBrowser/README.md @@ -0,0 +1,125 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: Visual DICOM browser +category: Infrastructure + +key_investigators: + +- name: Davide Punzo + affiliation: freelancer, DNA-HIVE + country: France + +- name: Andras Lasso + affiliation: Queens University + country: Canada + +--- + +# Project Description + + + +The visual DICOM browser provides a new user interface for quick viewing and retrieval of patient images stored on remote DICOM servers. The new tool is optimized for clinical workflows where the focus is on all the images of a single patient - as opposed to the existing DICOM browsing experience, which was more suitable for bringing together images from many patients. + +Both server and local content are located at the same place and are visualized by thumbnails. All data is retrieved in the background using classic DIMSE networking (most commonly used protocols in hospitals), in multiple concurrently running threads. The currently supported operations are: + +- Browsing and filtering with thumbnails of content of local DICOM database and multiple remote DICOM servers. +- Query/Retrieve data from servers (DIMSE `C-FIND`, `C-GET`, `C-MOVE` SCU). All the operations are done in background and in parallel. Downloaded data is automatically cached in the local DICOM database. A unique feature is the possibility to retrieve images using C-GET protocol (suitable for cases when many Slicer instances are running in docker containers) with a clinical PACS that only supports C-MOVE protocol (most clinical PACS), via a proxy server (such as the free Orthanc). +- Import data from local files. +- Receive data sent from remote PACS (DIMSE `C-STORE` SCP). +- Send data to remote PACS (DIMSE `C-STORE` SCU). +- Quick browsing of all DICOM metadata and pixel data. +- Remove data from local database (not from server). + +The widget is currently an experimental feature in Slicer (DICOM module). Current Roadmap is at [link](https://github.com/commontk/CTK/issues/1162). + +**Over the past year, improvements have been made to the performance and stability of the widget (full references in [Background and References](#background-and-references)). +As a result, the widget is now prepared for broader testing and feedback from both users and developers.** + +## Objective + + + +1. Get feedback from users/developers. +1. Prioritize the long term ENH based on the community need/interest. For example: + - Add support for DICOMweb + - Add support for DICOM frame set + - redesign the patient selection widget (currently a tab widget) + - ... + + +## Approach and Plan + + + +1. Have a meeting/demo with people interested for colletting feedback. +1. Prioritize/coordinated any future development based on the feedback. + +## Progress and Next Steps + +1. Feedback has been collected (generally positive!!!): + - **Leonard Nürnberg:** + - Add an option in settings to order series in the study widget by modality. + - Automatically load the source volume when loading a segmentation. + - Render SEG DICOM thumbnails (already a known issue, [https://github.com/commontk/CTK/issues/1162#Thumbnailsserieswidgets](https://github.com/commontk/CTK/issues/1162#Thumbnailsserieswidgets)). + - Improve UI clarity regarding querying PACS servers vs. filtering the local database (e.g., display a dialog explaining the difference when a user opens the visual DICOM browser for the first time). + - **Tina Kapur:** + - Add a button to enable full-screen mode in the Visual DICOM browser. + - Modify *Edit → Application Settings → DICOM → Thumbnail Size* (small, medium, large) to apply changes without requiring a restart. + - Address UI performance issues when importing large cohorts (e.g., 490 patients with 3,931 DICOM series). + - Patient selection UI is not optimal for a number of patient > 50 (already a known issue, [https://github.com/commontk/CTK/issues/1162#Filtering](https://github.com/commontk/CTK/issues/1162#Filtering)) + - Implement support for ultrasound video visualization in the Visual DICOM browser (already a known issue, see [CTK issue #1162](https://github.com/commontk/CTK/issues/1162#long-termENH) and [Slicer Discourse thread](https://discourse.slicer.org/t/new-frame-set-table-in-the-dicom-database/35012)). + +1. The 2025 [roadmap](https://github.com/commontk/CTK/issues/1162) had been updated, but priorities will be evaluated following an assessment of current funding opportunities. + + +# Illustrations + + + + +screenshots: + +|Visual DICOM Browser | Jobs and Settings| +|--- | ---| +| | | + + +video: + + + +UML Diagram: + + + +# Background and References + + + + +- [PR CTK 1187](https://github.com/commontk/CTK/pull/1187) +- [PR CTK 1191](https://github.com/commontk/CTK/pull/1191) +- [PR Slicer 7676](https://github.com/Slicer/Slicer/pull/7676) +- [PR CTk 1201](https://github.com/commontk/CTK/pull/1201) +- [PR CTK 1202](https://github.com/commontk/CTK/pull/1202) +- [PR CTK 1203](https://github.com/commontk/CTK/pull/1203) +- [PR Slicer 7751](https://github.com/Slicer/Slicer/pull/7751) +- [PR CTK 1206](https://github.com/commontk/CTK/pull/1206) +- [PR CTK 1217](https://github.com/commontk/CTK/pull/1217) +- [PR CTK 1218](https://github.com/commontk/CTK/pull/1218) +- [PR Slicer 7811](https://github.com/Slicer/Slicer/pull/7811) +- [PR Slicer 7912](https://github.com/Slicer/Slicer/pull/7912) +- [PR CTK 1221](https://github.com/commontk/CTK/pull/1221) + +- [PW40 Project](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/SlicerVisualDICOMBrowser/) + +- [PW38 Project](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerVisualDICOMbrowser/) diff --git a/PW42_2025_GranCanaria/Projects/WhiteMatterTractSegmentationInSlicer/README.md b/PW42_2025_GranCanaria/Projects/WhiteMatterTractSegmentationInSlicer/README.md new file mode 100644 index 000000000..a928de248 --- /dev/null +++ b/PW42_2025_GranCanaria/Projects/WhiteMatterTractSegmentationInSlicer/README.md @@ -0,0 +1,102 @@ +--- +layout: pw42-project + +permalink: /:path/ + +project_title: White matter tract segmentation in Slicer +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Robin Peretzke + affiliation: German Cancer Research Center + country: Germany + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +--- + +# Project Description + + + + +Diffusion MRI and white matter tract segmentation play a crucial role in many scenarios such as neurosurgery and psychiatry. Although fully automated methods for tract segmentation have been developed, they may fail in specific scenarios, such as cases with significant anatomical deviations caused by tumors, or they lack generalization across diverse species. In these situations, segmentation is typically carried out by human experts. This process is highly time-consuming, challenging to reproduce, and heavily reliant on graphical user interfaces (GUIs) designed for intuitive interaction. + +The aim of this project is to enhance the usability of 3D Slicer for white matter tract segmentation and interaction. Improvements will focus on creating more intuitive tools for user interaction and integrating novel algorithms into the software to streamline the segmentation process. + + + +## Objective + + + + +Objective A +Familiarizing with the existing SlicerDMRI infrastructure is essential, including understanding its current capabilities and workflows. This process involves building a new module while ensuring packages and libraries are updated to maintain compatibility and performance. + +Objective B +Exploring and implementing simple white matter tract dissection interactions, such as boolean operations with fibers and regions of interest (ROIs), to improve interactivity and usability within the platform. + +Objective C +Investigating and incorporating additional (semi-)automatic tract segmentation algorithms into SlicerDMRI to extend its functionality and better support complex use cases (such as atTRACTive¹) + + + + + +## Approach and Plan + + + + +1. The first step involves exploring the SlicerDMRI architecture, existing documentation, and module-building workflows. Efforts will focus on understanding the integration of relevant libraries and updating packages as needed. This step is fundamental for further developing. + +2. Work will focus on enabling basic operations, such as boolean interactions between fibers and ROIs. + +3. The aim is to explore and implement additional tract segmentation algorithms that align with the SlicerDMRI framework. + + +Screenshot 2025-01-31 at 09 58 20 + + + + +## Progress and Next Steps + + + + +1. Started building a 3D Slicer extension for atTRACTive (interactive white matter tract segmentation with active learning). +2. Implemented preprocessing necessary for classification, such as resampling fibers to an equal number of points. +3. Worked on the frontend-implemented interactor; users are now able to annotate subsets of single fibers from the initial input data. + + Screenshot 2025-01-31 at 10 06 27 + + +4. TBD: Implement the classification and active learning setup in the backend + + + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +[1] Peretzke, Robin, et al. "atTRACTive: semi-automatic white matter tract segmentation using active learning." International Conference on Medical Image Computing and Computer-Assisted Intervention. Cham: Springer Nature Switzerland, 2023. [arxiv version](https://arxiv.org/abs/2305.18905) diff --git a/PW42_2025_GranCanaria/README.md b/PW42_2025_GranCanaria/README.md new file mode 100644 index 000000000..d0028d6b0 --- /dev/null +++ b/PW42_2025_GranCanaria/README.md @@ -0,0 +1,157 @@ +--- +permalink: /:path/ +redirect_from: +- /PW42_2025_GranCanaria/README.html +- /PW42_2025_GranCanaria/Readme.html + +project_categories: +- DICOM +- Infrastructure +- IGT and Training +- VR/AR and Rendering +- Segmentation / Classification / Landmarking +- Registration +- Cloud / Web +- Quantification and Computation +--- + +# Welcome to the web page for the 42nd Project Week! + +[This event](https://projectweek.na-mic.org/PW42_2025_GranCanaria/README.html) took place Jan 27th - Jan 31st, 2025 at Las Palmas, Gran Canaria, Spain, in person. If you have any questions, you can contact the [organizers](#organizers). + +## Preparation meetings + +We held weekly preparation meetings at 10AM EST (4PM CET) on Tuesdays on Zoom, starting November 19, 2024. + +## Venue + + + +| | | + +- **Recommended hotels (special rates) and maps** + - NH Imperial Playa [Map](https://cutt.ly/twjO0PO) + - Booking: Discounted room block not available any more + +- **Transportation** from the airport to the city (Las Palmas de Gran Canaria): + - Taxi (line at the airport) + - [Bus line 60](https://guaguasglobal.com/lineas-horarios/linea/?id=60) + - [Map: Airport - San Telmo bus station](https://www.google.com/maps/dir/Gran+Canaria+Airport,+GC-1,+s%2Fn,+35230+Las+Palmas+de+Gran+Canaria,+Las+Palmas/Estacion+De+Guaguas+SAN+TELMO,+35002+Las+Palmas+de+Gran+Canaria,+Las+Palmas/@28.0191886,-15.4859935,12z/data=!3m1!4b1!4m14!4m13!1m5!1m1!1s0xc40a266c3662d1d:0x824bcf7e159f85d4!2m2!1d-15.3874042!2d27.9289223!1m5!1m1!1s0xc40958500f0b3f5:0x3693fb0e3c418af2!2m2!1d-15.4158957!2d28.109201!3e3?entry=ttu) +- The city has good bus/taxi service and is also walkable. + +## Discord +The **Discord** application is used to communicate between team members and organize activities before and during Project Week. Please join the Project Week [Discord server](https://discord.gg/AkxzKvqMBp) as soon as possible and explore its functionality before the workshop. For more information on the use of Discord before and during Project Week, please visit [this page](../common/Discord.md). + +## Agenda + +{% include calendar.md from="2025-01-27" to="2025-01-31"%} + +## Breakout sessions + +- [3D Slicer Breakout Slides](https://docs.google.com/presentation/d/1q_dP4Ck28sK-rlAP9kEdnO5EzTCplyrJH_EIPfb1onU/edit?usp=sharing) +- Web and AI Breakout Slides: [Slicer trame Slides](https://docs.google.com/presentation/d/1gTIz15EWea-isQs1ucngV1z8jJMTo_YYOp6midE6ipo/edit?usp=sharing) + + +## Projects + +To learn how to create or update project pages, please refer to the [contributing project pages](ContributingProjectPages.md) section. + +{% include projects_noloc.md %} + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. + +List of registered participants so far (names will be added here after processing registrations): + + + +1. Rafael Palomar, Oslo University Hospital, Norway +1. Simon Drouin, École de technologie supérieure, Canada +1. Steve Pieper, Isomics, Inc., USA +1. Mamadou Samba Camara, Cheikh Anta Diop University of Dakar, Senegal +1. Davide Punzo, Freelancer DNA HIVE, France +1. Murat Maga, University of Washington, USA +1. Constantin Constantinescu, Lucian Blaga University of Sibiu, Romania +1. Juan Ruiz-Alzola, University of Las Palmas de Gran Canaria, Spain +1. Tina Kapur, Brigham and Women's Hospital, Harvard Medical Schools, USA +1. Sam Horvath, Kitware, USA +1. Ciro Benito Raggio, Karlsruhe Instute of Tecnology, Germany +1. Domenico Riggio, Karlsruhe Instute of Tecnology, Germany +1. Thibault Pelletier, Kitware WAS, France +1. Paolo Zaffino, Magna Graecia University of Catanzaro, Italy +1. Murong Xu, University of Zurich, Switzerland +1. Tamaz Amiranashvili, University of Zurich, Switzerland +1. Hallee Wong, MIT, USA +1. Bjoern Menze, University of Zurich, Switzerland +1. Francesca Spadea, Karlsruher Institut für Technologie, Germany +1. Joost van Griethuysen, The Netherlands Cancer Institute, The Netherlands +1. Lina Bucher, Karlsruher Institut für Technologie, Germany +1. Sara Rolfe, Seattle Children's Research Institue, USA +1. Andrey Fedorov, Brigham and Women's Hospital, Harvard Medical Schools, USA +1. Andras Lasso, Queen's University, Canada +1. Gabor Fichtinger, Queen's University, Canada +1. Kyle Sunderland, Queen's University, Canada +1. Rebecca Hisey, Queen's University, Canada +1. Jacqueline Foody, Mass Gen Brigham/Centaur Labs, USA +1. Andres Diaz-Pinto, NVIDIA, UK +1. Csaba Pintér, EBATINCA, Spain +1. Nayra Pumar Carreras, EBATINCA, Spain +1. Mauro Ignacio Dominguez, Independent, Argentina +1. Mike Jin, Harvard Medical School/Centaur Labs, USA +1. David Clunie, PixelMed, USA +1. Hyejeong Hong, Samsung Medical Center, South Korea +1. Soyoung Lim, Samsung Medical Center, South Korea +1. Niklas Wahl, DKFZ Heidelberg, Germany +1. Ron Kikinis, Brigham and Women's Hospital and Harvard Medical School, USA +1. Zora Kikinis, Brigham and Women's Hospital and Harvard Medical School, USA +1. Jean-Christophe Fillion-Robin, Kitware Inc., USA +1. Deepa Krishnaswamy, Brigham and Women's Hospital and Harvard Medical School, USA +1. Leonard Nürnberg, Harvard Medical School, Brigham and Women's Hospital, USA +1. Douglas Samuel Gonçalves, University of São Paulo (USP), Brazil +1. Lucas Sanchez Silva, University of São Paulo (USP), Brazil +1. Gabriella d'Albenzio, Queen's University, Canada +1. Maximilian Fischer, German Cancer Research Center, Germany +1. Víctor Manuel Montaño Serrano, Universidad Autónoma del Estado de México, Mexico +1. Juan Carlos Avila Vilchis, Universidad Autónoma del Estado de México, Mexico +1. Javier Pascau, Universidad Carlos III de Madrid, Spain +1. Marco Nolden, German Cancer Research Center (DKFZ), Germany +1. Eléonore Germond, IMT Atlantique, France +1. Attila Tanács, University of Szeged, Hungary +1. Adriana H. Vilchis González, Universidad Autnoma del Estado de México, Mexico +1. Joël Spaltenstein, Agora Care SA, Switzerland +1. Attila Nagy, University of Szeged, Hungary +1. Robin Peretzke, German Cancer Research Center, Germany +1. Klaus Maier-Hein, German Cancer Research Center, Germany +1. Sara Fernandez Vidal, Paris Brain Institute, France +1. Cristina Soriano, Digital Anatomics SL, Spain +1. Maria Ordieres, Digital Anatomics SL, Spain +1. Hans Knutsson, Linkoping University, Sweden +1. Meritxell Gomez Martinez, Fundacio Eurecat, Spain +1. Daniela Schacherer, Fraunhofer Mevis, Germany + + + +## Statistics + + + +## Organizers + +### Local organizing committee +Juan Ruiz-Alzola, PhD, Professor of Imaging Technologies, director of the Grupo de Tecnología Médica y Audiovisual (GTMA), [Instituto Universitario de Investigaciones Biomédicas y Sanitarias (IUIBS)](https://www.iuibs.ulpgc.es/), [Universidad de Las Palmas de Gran Canaria (ULPGC)](https://www.ulpgc.es/) + +Csaba Pintér, PhD, CTO, [EBATINCA](https://ebatinca.com) + +[The EBATINCA team](https://ebatinca.com/en/empresa/team). For inquiries please send email to + +### Global Project Week organizing committee + +* [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](http://www.spl.harvard.edu/pages/People/tkapur)), +* [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://drouin-simon.github.io/ETS-web//)) +* [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)) +* [@sjh26](https://github.com/sjh26) ([Sam Horvath, PhD](https://www.kitware.com/samantha-horvath/)) + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](https://www.sciencedirect.com/science/article/pii/S1361841516301128)). diff --git a/PW43_2025_Montreal/BreakoutSessions/ARVRRendering/README.md b/PW43_2025_Montreal/BreakoutSessions/ARVRRendering/README.md new file mode 100644 index 000000000..d3eb37339 --- /dev/null +++ b/PW43_2025_Montreal/BreakoutSessions/ARVRRendering/README.md @@ -0,0 +1,85 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: AR-VR and Rendering + +key_investigators: +- name: Simon Drouin + affiliation: ETS +- name: Steve Pieper + affiliation: Isomic Inc. +- name: Rafael Palomar + affiliation: OUH / NTNU +- name: Sankhesh Jhaveri + affiliation: Kitware Inc. + +--- +# Description +The goals of this breakout session is to discuss the upcoming changes in the rendering infrastructure of Slicer, investigate ways to make the rendering pipeline more customizable and plan future direction for SlicerVirtualReality to ensure is it more usable, customizable and supports a wide range of AR and VR devices. + +## Topics for discussion +* Status of the transition between OpenGL and WebGPU as the rendering backend of VTK + * Andrey's Demo of Anatomy Carve extension (port of Unity project, project page [here](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/SegmentAwareCarvingOfVolumes/)) + * MVP for volume rendering: run a compute shader on a volume texture with output to an RGBA buffer that can be volume rendered. + * Support for an arbitrary number of input channels + * Gradient precomputation +* Status of SlicerVirtualReality + * Stability issues: interaction and markups + * Funding ideas to maintain the extension in the long term + * Possible improvement + * Support for GUI in VR (Show entire Slicer interface?) + * Support for video Passthrough + * Custom interaction for various use cases (a more outside-in experience as opposed to the current inside-out immersive experience) + +## Meeting notes + +This document was created in the context of the [_Rendering and XR Breakout session_][breakout-session] taking place [43nd Slicer Project Week][43-project-week] + +[43-project-week]: https://projectweek.na-mic.org/PW43_2025_Montreal/ +[breakout-session]: https://projectweek.na-mic.org/PW43_2025_Montreal/#breakout-sessions + +### SegmentAwareCarvingOfVolumes + +Discussed the [PW43 project](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/SegmentAwareCarvingOfVolumes/) on interactive carving for volume rendering. + +| Unity | Slicer | +|--|--| +| ![image](https://github.com/user-attachments/assets/3d62c8ec-41e5-4a35-b796-52bd417f5f80) | ![image](https://github.com/user-attachments/assets/0713fcdc-7ca0-401b-abaa-060d2fb4c883) | + +The implementation uses a sphere of influence that selectively clips volumes based on segmented objects (user-selected). Antialiasing is currently missing in the 3D SLicer implementation (FXAA was used for the Unity implementation). For this project, @AndreyTitov implemented a loadable module (c++) to expose the texture unit by volume rendering to Python. He will re-formulate his solution in a PR to the Slicer core so that the functionality is accessible to all Python extensions. + +### Discussion about improving the current rendering pipeline in Slicer + +* It is a high priority for the Slicer commmunity to have VTK exposing the entry points for internal rendering data (e.g., texture ids) in such way that it could be used directly in Python. +* One approach is to change the Slicer Core to expose these internals to Python. This could be done while waiting for VTK to catch up and providing access to these from Python. + +### Updates on VTK + +- New version (V9.5.0) available (not yet in 3D Slicer). See [PR-8427](https://github.com/Slicer/Slicer/pull/8427). A comprehensive list of changes; is in https://docs.vtk.org/en/latest/release_details/9.5.html +- WebGPU support for image mapper & volume rendering is not available in VTK. Target is VTK 10.x. In the meantime, WebGPU compute pipelines are available. See https://www.kitware.com/webgpu-compute-api-in-vtk/ +- Shaders are going to change quite some in the future of VTK (e.g., tag replacement in strings will become function re-definitions). This is not yet ready in v9.5.0. However compute shaders infrastructure is ready to play with. +- VTK & wasm. See https://kitware.github.io/vtk-wasm/ + +### SlicerVR + +- History of transition of technologies (OpenVR - OpenXR) with some uncertainty in the development towards the future. +- It is partially working but different rusers have had different experiences using it. +- @SimonDrouin raised an issue with manipulation of Markups in OpenVR/XR. Some of the markups handles (particularly the ROI) seem to be 2D facing the camera. This could be causing problems in the context of VR/AR (rendering issue). Interaction with Markups is known to not work (@AndrasLasso). +- Details about mapping of controller actions to VTK events, see [this page](https://github.com/KitwareMedical/SlicerVirtualReality/blob/master/DeveloperGuide.md) +- Meta Quest is not supported out of the box yet in OpenXR, but could be done using [this documentation](https://www.kitware.com/using-vtk-with-the-meta-quest/) +- Future for SlicerVirtualReality: + - There is currently not grant to support development and maintenance + - There is interest by multiple parties (Chi Zhang, Ron Kikinis, Simon Drouin, Sylvain Bouix) to fix problems and optimize the experience for a limitted set of use cases with common requirements that all include exploring patient scans + - A version of SlicerVR that is well maintained and stable for these use cases would generate more interest for the module + - Needed fix: + - Support major HMD brands ensuring a similar interaction under OpenXR + - Improve interaction for the above-mentionned use cases + - Fix functionality that doesn't behave the same way in VR and in the 3D View, i.e. Markups + +## References +* [Notes from PW40](https://projectweek.na-mic.org/PW40_2024_GranCanaria/BreakoutSessions/Rendering/) +* [Notes from PW39](https://projectweek.na-mic.org/PW39_2023_Montreal/BreakoutSessions/RenderingBreakout/) +* [Slicer WebGPU project from PW37](https://projectweek.na-mic.org/PW37_2022_Virtual/Projects/SlicerWGPU/)(Steve Piper) + diff --git a/PW43_2025_Montreal/BreakoutSessions/Neuroanatomy/README.md b/PW43_2025_Montreal/BreakoutSessions/Neuroanatomy/README.md new file mode 100644 index 000000000..ff41032f3 --- /dev/null +++ b/PW43_2025_Montreal/BreakoutSessions/Neuroanatomy/README.md @@ -0,0 +1,20 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Neuroanatomy + +key_investigators: +- name: Jarrett Rushmore + affiliation: BWH + +--- +# Description + +Breakout Session Description + +## Topics + + +# Notes diff --git a/PW43_2025_Montreal/BreakoutSessions/Slicer/README.md b/PW43_2025_Montreal/BreakoutSessions/Slicer/README.md new file mode 100644 index 000000000..6d49fa564 --- /dev/null +++ b/PW43_2025_Montreal/BreakoutSessions/Slicer/README.md @@ -0,0 +1,21 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: 3D Slicer Breakout session + +key_investigators: +- name: Sam Horvath + affiliation: Kitware + +--- +# Slicer Status Update + +[Slides](https://docs.google.com/presentation/d/1hGwzeJkBrqgCCywOtmEkEllCzsSHHGH6mXi5lDqRhC8/edit?usp=sharing) + +## Topics + +Add your questions in [this document](https://docs.google.com/document/d/12_s_XYz0ks1VG-IVJu6UJZBRGkrNcAjDo-fUPREo3DQ/edit?tab=t.0) and they will be discussed during the breakout + +# Notes diff --git a/PW43_2025_Montreal/BreakoutSessions/Workflows/README.md b/PW43_2025_Montreal/BreakoutSessions/Workflows/README.md new file mode 100644 index 000000000..8489c998f --- /dev/null +++ b/PW43_2025_Montreal/BreakoutSessions/Workflows/README.md @@ -0,0 +1,207 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Slicer Workflows + +Organizer: Deepa Krishnaswamy + +Key investigators: Andras Lasso, Steve Pieper, Andrey Fedorov, Tina Kapur, Ivan Johnson-Eversoll, Kalum, Kuan Yi Wang + +--- +# Description + +Creating streamlined workflows in Slicer + + + +## Topics + +[Add ideas for discussion here!](https://docs.google.com/document/d/12XuYPVuRgy4RTuIabSIjy_sRrYSliewKhcbB1zJgXVI/edit?usp=sharing) + +Discussion topics: +- Best practices +- What these extensions have in common +- How to create the infrastructure to support the annotation workflow +- Tasks that a workflow should contain: curation, annotation, review, comparing multiple annotations, classification +- Involvement and use of other software like OHIF + +# CART Base Classes: Generic Iterator Framework for 3D Slicer + +## Overview + +CART (Collaborative Annotation and Review Tool) provides a set of abstract base classes for creating streamlined annotation workflows in 3D Slicer. The framework enables efficient iteration through medical imaging cohorts with customizable tasks and flexible data loading strategies. + +## Contributors + +- **Ivan Johnson-Eversoll** (University of Iowa, USA) +- **Kalum Ost** (Montreal Polytechnic, Canada) +- **Kuan Yi** (Montreal Polytechnic, Canada) + +*With inspiration from SlicerCART, mpReview, and CaseIterator* + +## Design Philosophy + +### Minimal Requirements, Maximum Flexibility + +The framework enforces only **one requirement**: +- A CSV file with a `uid` column for unique case identification + +We chose CSV for its **simplicity and universal usage** - it's human-readable, version-controllable, and supported by every data processing tool. + +### Pluggable Data Loading Architecture + +The key innovation is the **DataUnit abstraction layer** that decouples data sources from task logic: + +``` +CSV (uid + metadata) → DataUnit → Task Logic + ↓ ↓ ↓ + Universal Pluggable Reusable + Interface Loaders Tasks +``` + +This design enables multiple data loading strategies: + +#### Local File Support +```python +class VolumeOnlyDataUnit(DataUnitBase): + # Loads .nrrd files from local filesystem + # Supports relative paths with configurable base directory +``` + +#### DICOM Integration (Future) +```python +class DICOMDataUnit(DataUnitBase): + # Query DICOM database by SeriesInstanceUID + # Automatic series type detection and loading + # Built-in de-identification handling +``` + +#### Cloud/Remote Loading (Future) +```python +class CloudDataUnit(DataUnitBase): + # Download from S3, Google Cloud, or IDC + # Automatic caching and prefetching + # Authentication handling +``` + +## Key Design Benefits + +### 1. **Task-Agnostic Data Loading** +Tasks don't need to know whether data comes from local files, DICOM databases, or cloud storage. They simply request resources through the standard `get_resource(key)` interface. + +### 2. **Data Source Migration** +Switch from local NIfTI files to a DICOM database by changing only the DataUnit type - no task code changes required. + +### 3. **Parallel Development** +Data engineers can optimize loading strategies while UI developers focus on annotation workflows, all working against the same abstract interface. + +### 4. **Custom Hanging Protocols** +Tasks can define sophisticated view layouts and multi-volume displays without coupling to specific data formats. + +## Architecture Overview + +### Core Components + +#### DataUnitBase (Abstract) +- **Purpose**: Abstract interface between CSV metadata and loaded Slicer data +- **Responsibility**: Validate data, load resources, manage MRML scene integration +- **Extensibility**: Subclass for different data sources (files, DICOM, cloud, etc.) + +#### DataManager +- **Purpose**: CSV cohort management and efficient case navigation +- **Features**: Sliding window traversal, wraparound navigation, progress tracking +- **Future**: Multi-scene prefetching for seamless user experience + +#### TaskBaseClass (Abstract) +- **Purpose**: Generic framework for annotation/review tasks +- **Features**: GUI integration, automatic data binding, save/resume capability +- **Customization**: Define custom hanging protocols and user interfaces + +## Prefetching and Multi-Scene Support + +The framework is designed to support **prefetching multiple scenes** for improved performance: + +### Current Implementation +- Single scene with lazy loading +- Automatic scene clearing on navigation + +### Future Enhancement +- Background loading of next/previous cases +- Intelligent memory management +- Seamless transitions without loading delays + +## Custom Hanging Protocols + +Tasks can implement sophisticated display logic: + +```python +class MultiContrastTask(TaskBaseClass): + def setup(self, data_unit): + # Custom layout: T1w background, T2w foreground, FLAIR in separate view + volumes = [data_unit.get_resource(key) for key in ['T1w', 'T2w', 'FLAIR']] + self.layoutLogic.viewerPerVolume(volumes, background=volumes[0]) +``` + +## CSV Format Specification + +The only requirement is a `uid` column. +All other columns are interpreted as resource identifiers. +We support saving to the Input CSV or a separate output CSV.: + +```csv +uid,T1w,T2w,segmentation,notes +patient_001,/data/001/t1.nrrd,/data/001/t2.nrrd,,needs_review +patient_002,/data/002/t1.nrrd,/data/002/t2.nrrd,/segs/002.nrrd,complete +``` + + +## Getting Started + +### 1. Create Your DataUnit +```python +class MyDataUnit(DataUnitBase): + def _validate(self): + # Ensure your data meets requirements + + def _initialize_resources(self): + # Load data into Slicer scene + + def to_dict(self): + # Export for saving back to CSV +``` + +### 2. Create Your Task +```python +class MyTask(TaskBaseClass): + def buildGUI(self, container): + # Build your annotation interface + + def setup(self, data_unit): + # Configure views and load data + + def save(self): + # Save annotations/results +``` + +### 3. Prepare Your CSV +- Include `uid` column with unique identifiers +- Add columns for each resource (images, segmentations, etc.) +- Use paths appropriate for your chosen DataUnit implementation + +# Step 1 +Get Started by simply selecting your CSV file in the module interface. +And Select your "User" or add a new one. +You can Then navigate through your data using the "Next" and "Previous" buttons. +![Start_loc_png.png](Resources/Icons/Start_loc_png.png) + +# Step 2 + +Select your task from the dropdown menu. +![move_to_next.png](Resources/Icons/move_to_next.png) + +# Step 3 + And complete the "Action" you want to perform. And you can still use the same +"Next" and "Previous" buttons to navigate through your data. +![save.png](Resources/Icons/save.png) diff --git a/PW43_2025_Montreal/ContributingProjectPages.md b/PW43_2025_Montreal/ContributingProjectPages.md new file mode 100644 index 000000000..8c58dc009 --- /dev/null +++ b/PW43_2025_Montreal/ContributingProjectPages.md @@ -0,0 +1,84 @@ +--- +--- +{% comment %}Extract event name (e.g "PW39_2023_Montreal"). {% endcomment %} +{%- assign event_name = page.path | split: '/' | first -%} + +# Contributing Project Pages + +## Creating new project pages + +With the [Project Week GitHub Issue page](https://github.com/NA-MIC/ProjectWeek/issues/new/choose), you have two options to create your Project Page: + + +1. [Create a Project](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) issue: If you are ready to create your page, you can simply create a “Project” issue. This issue will allow you to fill out a convenient form to provide the necessary details. The Project Week website team will then review the issue and trigger the page creation pull request. + +2. [Create the project page yourself using the template](Projects/README.md): If you prefer to create the Project Page yourself, you can still do so by using the provided template and submitting a pull request. + +## Project Creation Tips + +- Get your project pages created early! The day before is best to make sure everything you need for you presentation is available. The ProjectWeek site will be closed to edits for the ***10 minutes before*** both the opening and closing presentation session to ensure site generation. After this 10 minute period edits will be re-enabled. + +- If you are [creating the project page yourself using the template](Projects/README.md), **don't reuse a project page template from a previous year.** We have made significant updates to the template to support auto-generation of project pages, so previous years' templates will not function properly. + + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** + - Make sure to fill out / update all of the information at the top of the README file (title, category, location, etc) + +- Remember to fill out the title for your project when using the [project creation issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) + +- Check the formatting on the Key Investigators list when creating a [project issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) (this is critical for page generation): + + `- Firstname Lastname (Affiliation, Country)` + +## Updating existing project pages + +Here are the steps using the GitHub web interface: + +1. Navigate to your project's `README.md` on the GitHub website. For instance, if you want to update a project called **YourProjectName**, visit the URL like the following: + + ``` + https://github.com/NA-MIC/ProjectWeek/blob/master/{{ event_name }}/Projects/YourProjectName/README.md + ``` + +2. Click the edit button, as shown in this screenshot: ![Screenshot 2023-06-12 10 43 35](https://github.com/NA-MIC/ProjectWeek/assets/25040869/ab01a7bf-c1e4-4c23-9aca-e2c6421ca530) + +3. You can now edit the page, add images by dragging and dropping, and more. + +4. Once done, click "Commit Changes", and follow the instructions to create a fork and a pull request to add your changes to the webpage. See this screenshot for reference: ![Screenshot 2023-06-12 10 50 50](https://github.com/NA-MIC/ProjectWeek/assets/25040869/180e81bb-d4f9-4f65-8569-a93192b2828e) + +## Videos in project pages + +Here are some steps to make sure all of your awesome videos render correctly: + +1. Videos added by drag and drop will render correctly when viewed through GitHub, but need some extra tweaks to work in the final generated website. + + + In your `README.md`, if you have a video link that looks like this: + + ``` + https://github.com/NA-MIC/ProjectWeek/assets/66890913/8f257f29-fa9c-4319-8c49-4138003eba27 + ``` + + Update it to: + + ```html + + ``` + +2. Links to externally hosted videos (such as YouTube) will need an iframe. + + Replace: + + ``` + https://youtu.be/ZWxE5QcGvE8 + ``` + + with + + ````html + + ```` diff --git a/PW43_2025_Montreal/Projects/3DSlicerRos2IntegrationForSurgicalRobotSimulation/README.md b/PW43_2025_Montreal/Projects/3DSlicerRos2IntegrationForSurgicalRobotSimulation/README.md new file mode 100644 index 000000000..2925be61e --- /dev/null +++ b/PW43_2025_Montreal/Projects/3DSlicerRos2IntegrationForSurgicalRobotSimulation/README.md @@ -0,0 +1,106 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: 3D Slicer–ROS2 Integration for Surgical Robot Simulation +category: IGT and Training + +key_investigators: + +- name: Juntae Park + affiliation: AIRS + country: South Korea + +- name: Joonho Seo + affiliation: KIMM + country: South Korea +--- + +# Project Description + + + + +This project aims to simulate fracture reduction surgery using a Stewart platform–based robotic system. Currently, we rely on an Optical Tracking System (OTS) to calibrate the spatial relationship between patient and robot, enabling visualization and execution of the planned reduction path. + +However, the use of OTS comes with considerable cost and practical limitations. To overcome these, we plan to integrate ROS2 directly with 3D Slicer and eliminate the need for external tracking hardware. By using only software communication between 3D Slicer and ROS2, we will visualize and simulate fracture reduction trajectories entirely in a virtual environment. + + + +## Objective + + + + +1. Set up an SDF/URDF based ROS2 simulation environment for the Stewart-platform surgical robot +2. Simulate robot movement in 3D Slicer using ROS2 input + + + +## Approach and Plan + + + + +- Analyze and implement SDF/URDF models for the AIRED Stewart-platform robot + - Create both `.sdf` and `.urdf` files representing the AIRED robot structure + - Visualize the robot model in ROS2 RViz for verification + - Attempt visualization using the SlicerROS2 extension + +- Develop ROS2–3D Slicer communication nodes + - Create ROS2 publisher and subscriber nodes to transmit pose data + - Use ROS2 topics to establish bidirectional communication between ROS2 and 3D Slicer + +- Simulate and verify robot motion +- + - Send simulated pose commands via keyboard inputs + - Monitor robot motion in RViz and 3D Slicer during simulation + + +### Progress and Next Steps + +- Created a basic URDF model of the AIRED Stewart-platform robot +- Verified URDF-based visualization in ROS2 RViz + +![Image](https://github.com/user-attachments/assets/0c8b3cbb-fc25-4b54-8ae5-d1742d387233) + + - Implemented ROS2–3D Slicer communication for pose exchange + - Test real-time motion simulation in both RViz and 3D Slicer + - https://www.youtube.com/watch?v=FtWf1xCiFFY + + [![Watch the video](https://img.youtube.com/vi/FtWf1xCiFFY/0.jpg)](https://www.youtube.com/watch?v=FtWf1xCiFFY) + +- Future Work + - Implement and test bidirectional data communication between ROS2 and the robot + - Evaluate the precision of 6-DoF motion of the robot + + + +# Illustrations + + + + +- Fracture reduction surgical robot + +![Image](https://github.com/user-attachments/assets/2ceaf39f-505e-4741-886d-fe6174183b67) + + +- Visualized the URDF model in 3D Slicer using the SlicerROS2 extension + +![Image](https://github.com/user-attachments/assets/39515bcd-d53d-4567-8968-6a5df34b2f39) + + + +# Background and References + + + + +- [SlicerROS2](https://slicer-ros2.readthedocs.io/en/v1.0/index.html) +- [ROS2 Jazzy](https://docs.ros.org/en/jazzy/index.html) +- [https://drive.google.com/file/d/1RaTsGjUCuYQjolq8KW-ABr_gGIWw4BRO/view?usp=drive_link](https://drive.google.com/file/d/1RaTsGjUCuYQjolq8KW-ABr_gGIWw4BRO/view?usp=drive_link) +- [https://rosmed.github.io/tutorials/smarttemplate/](https://rosmed.github.io/tutorials/smarttemplate/) + diff --git a/PW43_2025_Montreal/Projects/AddClaronavMicrontracker4SupportToPlus/README.md b/PW43_2025_Montreal/Projects/AddClaronavMicrontracker4SupportToPlus/README.md new file mode 100644 index 000000000..4fb4a2b55 --- /dev/null +++ b/PW43_2025_Montreal/Projects/AddClaronavMicrontracker4SupportToPlus/README.md @@ -0,0 +1,89 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Add ClaroNav MicronTracker 4 Support to Plus +category: IGT and Training + +key_investigators: + +- name: Tamas Ungi + affiliation: ClaroNav Kolahi Inc + country: Canada + +- name: Sean Chen + affiliation: ClaroNav Kolahi Inc + country: Canada + +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + + +MicronTracker 4 is the latest optical tracker from [ClaroNav](https://claronav.com/oem/microntracker-4/). Currently Plus supports only MicronTracker 3.6 and 3.7. + +During this project week we will work on integrating support for the ClaroNav Micron Tracker 4 into Plus. There is a pending [pull request](https://github.com/PlusToolkit/PlusLib/pull/1236) that we will compile, test and integrate to add support for this device. + + + +## Objective + + + + +1. Add support for the ClaroNav MicronTracker 4 to Plus. + + + +## Approach and Plan + + + + +1. Compile Plus using the changes added in this [PR](https://github.com/PlusToolkit/PlusLib/pull/1236) +2. Test the the connection between Plus and MicronTracker 4. +3. Make necessary changes and integrate the PR. +4. Add options to support compiling, and automatically finding the SDK using PlusBuild. + + + + +## Progress and Next Steps + + + + +1. Integrated changes to support building with MicronTracker 4.1 ([https://github.com/PlusToolkit/PlusBuild/commits/master/?author=Sunderlandkyl&since=2025-06-22&until=2025-06-26](https://github.com/PlusToolkit/PlusBuild/commits/master/?author=Sunderlandkyl&since=2025-06-22&until=2025-06-26))([https://github.com/PlusToolkit/PlusLib/pull/1236](https://github.com/PlusToolkit/PlusLib/pull/1236)) +1. Generated Plus MicronTracker 4.1 package: [https://github.com/PlusToolkit/PlusLib/actions/runs/15842387536/artifacts/3389393270](https://github.com/PlusToolkit/PlusLib/actions/runs/15842387536/artifacts/3389393270) +1. Created tool templates for the stylus and "reference" (cup) +1. Successfully Connected to the camera using Plus + Slicer. + +# Illustrations + + + +![](https://github.com/user-attachments/assets/a0e80c73-11a9-4d74-9d9d-5443cf3dc2d1) + +![20250625_133833](https://github.com/user-attachments/assets/72251628-4b85-4e02-b519-9d33e5e475cc) + + +# Background and References + + + + +- [https://claronav.com/oem/microntracker-4/](https://claronav.com/oem/microntracker-4/) +- [https://github.com/PlusToolkit/PlusLib/pull/1236](https://github.com/PlusToolkit/PlusLib/pull/1236) diff --git a/PW43_2025_Montreal/Projects/AppImageFor3DSlicerOnLinux/README.md b/PW43_2025_Montreal/Projects/AppImageFor3DSlicerOnLinux/README.md new file mode 100644 index 000000000..4b124121e --- /dev/null +++ b/PW43_2025_Montreal/Projects/AppImageFor3DSlicerOnLinux/README.md @@ -0,0 +1,100 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: AppImage, Flatpak, etc. for packaging 3D Slicer on Linux +category: Infrastructure + +key_investigators: +- name: Benjamin Zwick + affiliation: University of Western Australia + country: Australia +--- + +# Project Description + + + +This project aims to create a portable AppImage or Flatpak distribution of 3D Slicer for Linux systems. AppImage and Flatpak are universal software package formats that allow applications to run on various Linux distributions, making 3D Slicer more accessible and easier to deploy across different Linux environments. The project will streamline the distribution process and reduce dependency conflicts that users often encounter when installing 3D Slicer on different Linux distributions. + +## Objective + + + +1. **Objective A. Create a functional 3D Slicer AppImage** that bundles all necessary dependencies and libraries to run 3D Slicer on major Linux distributions without requiring system-wide installation. +2. **Objective B. Establish an automated build pipeline** for generating AppImages from 3D Slicer releases, ensuring consistency and maintainability for future versions. +3. **Objective C. Validate cross-distribution compatibility** by testing the AppImage on multiple Linux distributions (Ubuntu, Fedora, openSUSE, Arch Linux, etc.) to ensure broad compatibility. +4. **Objective D. Document the build process and usage instructions** to enable community contributions and provide clear guidance for end users. + +## Approach and Plan + + + +1. **Analyze 3D Slicer's dependency requirements** and identify all libraries, Qt components, Python modules, and system dependencies needed for a standalone package. +2. **Set up AppImage build environment** using linuxdeploy tools and configure the build process to bundle 3D Slicer with all required dependencies. +3. **Create AppImage recipe and build scripts** that can automatically generate the AppImage from existing 3D Slicer builds, including proper library linking and Qt plugin configuration. +4. **Implement continuous integration pipeline** to automatically build AppImages for new 3D Slicer releases using GitHub Actions or similar CI/CD platforms. +5. **Test the generated AppImage** across different Linux distributions in virtual machines or containers to verify functionality and identify compatibility issues. +6. **Optimize AppImage size and performance** by removing unnecessary components and ensuring efficient library bundling without compromising functionality. +7. **Create comprehensive documentation** including build instructions, troubleshooting guide, and user installation/usage documentation. + +## Progress and Next Steps + + + +1. Learnt that AppImage is not very suitable, and Flatpak is difficult to get working with e.g. the Extension Manager. +2. Decided to use existing `make package` for now. +3. Updated the [SlicerBuildEnvironment](https://github.com/Slicer/SlicerBuildEnvironment) instructions for building 3D Slicer, Slicer Custom Apps and Slicer extensions (See the [Slicer Build Instruction Updates project](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/SlicerBuildInstructionUpdates/) for more details). + +TODO + + + + + + +# Illustrations + + +*Screenshots and demonstration videos will be added as the project progresses, showing the AppImage creation process and cross-platform testing results.* + +# Background and References + + +## Background + +### Related Previous Projects + +- [Slicer Flatpak - Project Week 39 (Montreal, June 2023)](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/SlicerFlatpak/) +- [Systole OS: an operating system for development/deployment of medical devices - Project Week 38 (Gran Canaria, Jan 2023](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SystoleOS/) + +### 3D Slicer Community Discourse + +- [Appimage - for linux systems - Support / Feature requests - 3D Slicer Community](https://discourse.slicer.org/t/appimage-for-linux-systems/35594) +- [Interest to create Flatpak for 3D Slicer, have issue with GUISupportQtOpenGL not found - Support - 3D Slicer Community](https://discourse.slicer.org/t/interest-to-create-flatpak-for-3d-slicer-have-issue-with-guisupportqtopengl-not-found/16532/34) + +## References + +3D Slicer: +- [3D Slicer Official Website](https://www.slicer.org/) +- [3D Slicer GitHub Repository](https://github.com/Slicer/Slicer) +- [GNU/Linux systems — 3D Slicer documentation](https://slicer.readthedocs.io/en/latest/developer_guide/build_instructions/linux.html) +- [SlicerBuildEnvironment](https://github.com/Slicer/SlicerBuildEnvironment) + +AppImage: +- [AppImage - Linux apps that run anywhere](https://appimage.org/) +- [AppImage Best Practices](https://docs.appimage.org/packaging-guide/index.html) +- [linuxdeploy - AppImage creation tool](https://github.com/linuxdeploy/linuxdeploy) + +Flatpak: +- [Flatpak—the future of application distribution](https://flatpak.org/) + +Misc: +- [Qt 5 Application Deployment on Linux](https://doc.qt.io/qt-5/linux-deployment.html) +- [Qt for Linux/X11 - Deployment - Qt 6](https://doc.qt.io/qt-6/linux-deployment.html) diff --git a/PW43_2025_Montreal/Projects/ApplicationOfSlicerros2InRoboticCatheterPlacementForCardiacAblation/README.md b/PW43_2025_Montreal/Projects/ApplicationOfSlicerros2InRoboticCatheterPlacementForCardiacAblation/README.md new file mode 100644 index 000000000..d8ace9c57 --- /dev/null +++ b/PW43_2025_Montreal/Projects/ApplicationOfSlicerros2InRoboticCatheterPlacementForCardiacAblation/README.md @@ -0,0 +1,114 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Application of SlicerROS2 in Robotic Catheter Placement for Cardiac Ablation +category: IGT and Training + +key_investigators: + +- name: Junichi Tokuda + affiliation: BWH + country: USA + +- name: Yue Chen + affiliation: BWH + country: USA + +- name: Ehud Schmidt + affiliation: BWH + country: USA + +- name: Laura Connolly + affiliation: BWH + country: USA + +--- + +# Project Description + +We investigate the use of SlicerROS2 to simulate robotic catheter in the heart anatomy for cardiac ablation. + + + +## Objective +1. We will develop a tool to configure robotic catheter and generate URDF data, +2. Test the URDF for visualization in RViz/3D Slicer and dynamic simulation in the Gazebo simulator. + +## Approach and Plan +1. Write a script to generate a URDF file to model the kinematic and visual models of the catheter. +2. Load the model onto the Gazebo dynamic simulator +3. Visualize the simulation outcome on 3D Slicer using SlicerROS2 +4. (Optional) Create a 3D geometric model of the cardiovascular structures on 3D Slicer, and incorporate into the scene on the Gazebo simulator. + +## Progress and Next Steps +### Generating a URDF file + +* [https://github.com/tokjun/cath_urdf_generator](https://github.com/tokjun/cath_urdf_generator) + +A Python script to generate a XACRO file (which can be converted to URDF) for a flexible catheter has been prototyped. The script models a flexible catheter as serial links connected via universal joints and rotary springs. The user can provide parameters to define the serial links, including N, D, L1, L2, L3, K, and M to mimic the mechanical behavior of the flexible catheter with N links and N-1 universal joints. The serial link consists of the first and the last links representing the tip and the base links, and the bending section consisting of the remaining (N-2) links. Each joint has rotary springs with a spring constant of K that generate torques to bring it back to the straight position when the joint is rotated by the external force. D is the outer diameter of the catheter. L1, L2, and L3 are the lengths of the tip link, the bending section, and the base link. The total weight of the catheter is M. + +The script generate a ROS package including a XACRO file. +~~~~ +$ cd ~/ros2_ws/src +$ python3 /catheter_urdf_generator.py --N 12 --D 0.003 --L1 0.20 --L2 0.5 --L3 0.05 --K 0.2 --M 0.5 --output my_catheter +~~~~ +The URDF can be published in the ROS network using the following command: +~~~~ +$ ros2 run robot_state_publisher robot_state_publisher --ros-args -p robot_description:="$( xacro /home/junichi/ros2_ws/src/my_catheter/urdf/my_catheter.xacro )" +~~~~ +To visualize using RViz and control the joint angles with the joint_state_publisher_gui, open a new terminal and use the following commands: +~~~~ +$ source /opt/ros/jazzy/setup.bash +$ ros2 run joint_state_publisher_gui joint_state_publisher_gui +~~~~ +Open another terminal and launch RViz: +~~~~ +$ source /opt/ros/jazzy/setup.bash +$ ros2 run rviz2 rviz2 +~~~~ +![rviz_catheter](https://github.com/user-attachments/assets/2997eaeb-f2ce-45e8-8073-bee21ce492d7) + + +## Dynamic Simulation with Gazebo and 3D Slicer +The launch file generated by the script can run dynamic simulation on Gazebo and visualize it on RViz and 3D Slicer. Assuming that the script has already generated the package under `~/ros2_ws/src` (see the previous section). +~~~~ +$ cd ~/ros2_ws +$ source /opt/ros/jazzy/setup.bash +$ colcon build +$ ros2 launch my_catheter my_catheter_launch.py +~~~~ +The last command launches both Gazebo and RViz. + +On RViz, open the `Displays` frame (should be on the left panel of the window if Rviz is opened for the first time) and configure it as follows: +1. Click the `Add` button at the bottom. A dialog box should show up. +2. Under `Create visualization`, click `Robot Model` under `rviz_default_plugins` +3. Click `OK` to close the dialog box. +4. Under `Robot Model`, click the right column for `Description Topic` and choose `/robot_description`. +5. Under `Global Options`, click the right column for `Fixed Frame` and choose `base_link`. + +3D Slicer with SlicerROS2 can import the robot description from the ROS network and display the catheter model with the following steps: +1. Open the SlicerROS2 module. +2. Click `Add new robot`. +3. Click `Load robot` (use default parameters). +4. The catheter model should appear on the 3D viewer. + +# Illustrations + + +[![dynamic simulation](https://img.youtube.com/vi/upqZboU-ong/0.jpg)](https://www.youtube.com/watch?v=upqZboU-ong) +(NOTE: The screen video is slow because of the recording, but it was smooth when running without screen recording) + + +# Background and References +1. [SlicerROS2 Repository](https://github.com/rosmed/slicer_ros2_module) +2. [SlicerROS2 Documentation](https://slicer-ros2.readthedocs.io/en/v1.0/) +3. [SlicerROS2 Tutorials](https://rosmed.github.io/tutorials/) + +# Acknowledgements + +This work is supported in part by NIH (R01EB020667, R01EB034359, R01EB036015, R01CA235134, P41EB028741). + + + diff --git a/PW43_2025_Montreal/Projects/AutomatedBoneSegmentationAnd3DModellingUsingTracked2DUltrasoundImaging/README.md b/PW43_2025_Montreal/Projects/AutomatedBoneSegmentationAnd3DModellingUsingTracked2DUltrasoundImaging/README.md new file mode 100644 index 000000000..f96b082cd --- /dev/null +++ b/PW43_2025_Montreal/Projects/AutomatedBoneSegmentationAnd3DModellingUsingTracked2DUltrasoundImaging/README.md @@ -0,0 +1,85 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Automated Bone Segmentation and 3D Modelling using Tracked 2D Ultrasound Imaging +category: IGT and Training + +key_investigators: + +- name: Nicholas Kawwas + affiliation: Concordia University + country: Canada + +- name: Hassan Rivaz + affiliation: Concordia University + country: Canada + +--- + +# Project Description + + + + +This project aims to create sub-millilitre accurate bone modeling using Ultrasound imaging, Optical tracking (OptiTrack) and Deep learning. Modelling can be split into two key steps: segmentation and reconstruction. Segmentation considers each Ultrasound B-mode image and uses deep learning to automatically segment the bone. These segmented bone surfaces along with their respective OptiTrack coordinates associated with the US image are used to create a 3D model of the imaged bone. A free-hand sweep with the US probe will be used to generate a 3D volume of the bone, enabling fast, precise bone modelling at low cost and without radiation. + + + +## Objective + + + + +1. Perform free-hand sweep capturing 2D US images along with the associated coordinates +2. Reconstruct the bone using the bone surface, image coordinates and probe positioning +3. Provide a fast, sub-millilitre precise 3D bone model with no radiation and low cost + + + + +## Approach and Plan + + + + +1. Perform free-hand sweep capturing 2D US images with Verasonics along with the associated coordinates from OptiTrack +2. Segment automatically each US image, obtaining the bone location with deep learning +3. Reconstruct the bone using the segmented bone surface shape, image coordinates and probe angles using Neural Fields +4. Provide a fast, sub-millilitre precise 3D bone model with no radiation and low cost +5. Visualize each slice and entire model in 3DSlicer + + + + +## Progress and Next Steps + + + + +1. Trained new segmentation model with high DICE and low surface distance error +2. Collect US and OptiTrack data from thigh for femur reconstruction +3. Train Neural Fields model like MaskField for 3D model generation from multiple 2D images and coordinates + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ + diff --git a/PW43_2025_Montreal/Projects/AutomatedCbctMriRegistrationAdvancesTemporomandibularDegenerativeJointDiseaseDiagnosis/README.md b/PW43_2025_Montreal/Projects/AutomatedCbctMriRegistrationAdvancesTemporomandibularDegenerativeJointDiseaseDiagnosis/README.md new file mode 100644 index 000000000..51096a538 --- /dev/null +++ b/PW43_2025_Montreal/Projects/AutomatedCbctMriRegistrationAdvancesTemporomandibularDegenerativeJointDiseaseDiagnosis/README.md @@ -0,0 +1,81 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Automated CBCT-MRI Registration Advances Temporomandibular Degenerative Joint Disease + Diagnosis +category: Quantification and Computation + +key_investigators: + +- name: Alban Gaydamour + affiliation: University of Michigan + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: Gaelle Leroux + affiliation: CPE Lyon + country: France + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Enzo Tulissi + affiliation: University of Michigan + country: USA + +--- + +### Project Description + +Accurate registration between CBCT and MRI scans offers complementary visualization of osseous and soft tissue structures around the temporomandibular joint (TMJ). This project aims to automate the registration process to support more effective diagnosis of temporomandibular degenerative joint disease. + +### Objective + +1. Develop a robust multi-step registration pipeline to align MRI and CBCT images in the TMJ region. +2. Enhance diagnostic accuracy by integrating soft tissue and osseous imaging modalities. +3. Move toward a fully automated workflow that eliminates manual intervention. + +### Approach and Plan + +1. Perform initial coarse alignment using TorchReg to approximate global positioning of the volumes. +2. Automatically crop both CBCT and MRI volumes to isolate the TMJ region of interest. +3. Apply Elastix-based deformable registration to the cropped images for fine alignment. +4. Evaluate registration quality through both visual assessment and quantitative metrics. +5. Plan to implement automated TMJ region localization to eliminate manual cropping. + +### Progress and Next Steps + +1. Implemented automated global registration using TorchReg. +2. Established an automated cropping protocol focused on the TMJ region to reduce failure modes in deformable registration. +3. Completed fine registration using Elastix on cropped image pairs. +4. Next steps include developing automated tools to segment MRIs and adding an articular disc label to improve interpretability + +### Illustrations + +### **Figure 1:** Input MRI (grey) and CBCT (beige) +![Left half of the head](https://github.com/user-attachments/assets/4f7dd2ff-5461-41a3-b03e-b2f2d6949aa7) + +### **Figure 2:** Patient after first registration +![Superposition of MRI and CBCT after first registration](https://github.com/user-attachments/assets/9fe4e5b5-386c-4a44-9d2a-3137fe9907e9) + +### **Figure 3:** Cropping of the left TMJ +![Region of Interest used for cropping of the TMJ](https://github.com/user-attachments/assets/b2be7af9-9237-4204-9641-8fc8dc8af751) + +### **Figure 4:** Patient after second registration +![Superposition of MRI and CBCT after second registration](https://github.com/user-attachments/assets/98eddeb9-ce15-4e02-a079-7faa236ee6d4) + +### Background and References + +- [TorchReg](https://github.com/codingfisch/torchreg) +- [Elastix](https://github.com/SuperElastix/elastix) +- [Github](https://github.com/DCBIA-OrthoLab/SlicerAutomatedDentalTools) diff --git a/PW43_2025_Montreal/Projects/AutomaticDetectionOfAnatomicalLandmarksIn3DBrainMri/README.md b/PW43_2025_Montreal/Projects/AutomaticDetectionOfAnatomicalLandmarksIn3DBrainMri/README.md new file mode 100644 index 000000000..2c875ad5a --- /dev/null +++ b/PW43_2025_Montreal/Projects/AutomaticDetectionOfAnatomicalLandmarksIn3DBrainMri/README.md @@ -0,0 +1,132 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Automatic Detection of Anatomical Landmarks in 3D Brain MRI +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Ahmed Rekik + affiliation: École de Technologie Supérieure + country: Canada + +- name: Sylvain Bouix + affiliation: École de Technologie Supérieure + country: USA + +- name: Jarrett Rushmore + affiliation: 'Boston University Medical ' + country: USA + +--- + +# Project Description + + + + +This project aims to automate the detection of 12 anatomical landmarks in T1-weighted brain MRI volumes using deep learning techniques. These landmarks assist neuroanatomists in the manual segmentation of complex brain regions, reducing the time and variability involved in manual annotation. + + + + + +## Objective + + + + +Develop and validate a deep learning model capable of accurately localizing 12 anatomical landmarks in 3D T1w MRI, with the goal of supporting expert-guided neuroimaging annotation. + + + + + +## Approach and Plan + + + + +- Validate a ground truth dataset consisting of 12 manually annotated anatomical landmarks on 3D T1-weighted brain MRIs. + +- Adapt the PIN (Patch-based Iterative Network) model . + +- Build a statistical shape model using Principal Component Analysis (PCA). + +- Adapt and implement a Global-to-Local approach as an alternative method for landmark detection. + +- Compare the predictions and performance of PIN and the Global-to-Local model. + +- Validate the final selected model using an external dataset containing a different set of landmarks to assess generalizability. + + + +## Progress and Next Steps + + + + +**Progress so far:** + +> Assembled a dataset of 100 T1-weighted brain MRI volumes with 12 manually annotated landmarks per subject. + +> Converted the data to the format required by the PIN pipeline (normalized volumes, voxel-based coordinates). + +> Adapted key components of the PIN codebase: input_data.py, train.py, infer.py, and shape_model_func.py. + +> Built a shape model using PCA and verified shape vector consistency. + +> `Trained the PIN model with uncertainty-weighted loss; training loss steadily decreased to ~8000. + +> Debugged inference issues related to patch cropping and out-of-bounds landmarks. + +**Next steps:** + +> Improve robustness during prediction. + +> Implement and adapt the Global-to-Local approach for our 12-landmark dataset. + +> Compare landmark prediction accuracy between PIN and Global-to-Local models. + +> Validate the chosen model on an external landmark dataset with different anatomical structures. + +> Visualize landmark predictions and generate error metrics to support discussion with neuroanatomy experts. + + + + + +# Illustrations + +![1](https://github.com/user-attachments/assets/a0b3aefa-b304-4f8b-9c72-2bf83dd4564c) + +![2](https://github.com/user-attachments/assets/23996352-97ab-4291-9e17-6f8a5d8505ce) + + +![3](https://github.com/user-attachments/assets/90b751d8-47e8-4321-a439-3081f8ed08ec) + + +![4](https://github.com/user-attachments/assets/81c5257a-f68b-45f1-911a-43705a197ce5) + +# Background and References + + + + +> PIN – Patch-based Iterative Network +Li, Y., et al. (2018). Fast Multiple Landmark Localisation Using a Patch-Based Iterative Network. In Frangi, A., Schnabel, J., Davatzikos, C., Alberola-López, C., Fichtinger, G. (Eds.), MICCAI 2018, LNCS, vol. 11070. Springer, Cham. +[https://doi.org/10.1007/978-3-030-00928-1_64](https://doi.org/10.1007/978-3-030-00928-1_64) + +> Global-to-Local Landmark Detection +Noothout, J. M. H., et al. (2020). Deep Learning-Based Regression and Classification for Automatic Landmark Localization in Medical Images. IEEE Transactions on Medical Imaging, 39(12), 4011–4022. +[https://doi.org/10.1109/TMI.2020.3009002](https://doi.org/10.1109/TMI.2020.3009002) + +> CABLD Dataset – Cortical and subcortical Annotation of Brain Landmarks Dataset +Salari, S., Harirpoush, A., Rivaz, H., & Xiao, Y. (2023). CABLD: Contrast-Agnostic Brain Landmark Detection with Consistency-Based Regularization. +Department of Computer Science and Electrical Engineering, Concordia University, Montréal, Canada. +[https://doi.org/10.48550/arXiv.2411.17845](https://doi.org/10.48550/arXiv.2411.17845) + diff --git a/PW43_2025_Montreal/Projects/DedicatedCprViewForDentalPanoramicVisualizationIn3DSlicer/README.md b/PW43_2025_Montreal/Projects/DedicatedCprViewForDentalPanoramicVisualizationIn3DSlicer/README.md new file mode 100644 index 000000000..d97008bb1 --- /dev/null +++ b/PW43_2025_Montreal/Projects/DedicatedCprViewForDentalPanoramicVisualizationIn3DSlicer/README.md @@ -0,0 +1,103 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Dedicated CPR View for Dental Panoramic Visualization in 3D Slicer +category: VR/AR and Rendering + +key_investigators: + +- name: Taeyoung Ted Park + affiliation: TruAbutment + country: South Korea + +--- + +# Project Description + + + + +This project aims to design a dedicated Slice View system for Curved Planar Reformation (CPR) visualization in dental imaging, implemented in 3D Slicer. +Although grid transforms can currently be used to simulate panoramic views, this method is structurally limited due to the following: + +Transforms are applied at the node level, making it necessary to duplicate volume nodes in order to display CPR and standard views simultaneously. + +Integration with other displayable nodes (segmentations, models, markups) is limited, and standard interaction tools such as cursors and slice handles are not functional within the transformed context. + +To overcome these limitations, this project focuses on designing a transform-isolated rendering structure — a dedicated CPR World — where transforms are applied per-view and rendering is independent of the global scene configuration. + + + +## Objective + + + + +- Design a CPR-specific slice view where transforms can be applied at the view level +- Enable concurrent display of CPR and standard views without duplicating data +- Architect a CPR Scene structure that supports volume sharing and transform isolation +- Ensure compatibility with the GeneralReformat module +- Define a system where CPR state is serializable via the MRML infrastructure +- Plan for future extensibility to support models, markups, segmentations, and measurement tools in the CPR view + + + +## Approach and Plan + + + + +1. Analyze Current Slicer Architecture +- Study how transforms are currently applied globally to nodes +- Understand internal mechanisms of vtkMRMLSliceNode, vtkMRMLSliceCompositeNode, vtkImageResliceMapper, etc. + +2. Design CPR View Structure +- Define a new layout and view context for CPR without interfering with existing Slicer views +- Extend qMRMLLayoutManager to support CPR-specific view identifiers + +3. Architect Transform Isolation +- Design a separate MRML scene (CPR Scene) for view-specific transform logic +- Allow data to be shared between scenes while applying transforms only in CPR context + +4. Define CPR Slice Rendering Logic +- Generate reslices along user-defined centerlines (splines or markups) +- Design rendering pipelines capable of displaying volumes, overlays, and models along the panoramic curve + +5. Plan for GeneralReformat Integration +- Analyze the structure introduced in PR [#8148](https://github.com/Slicer/Slicer/pull/8148) (GeneralReformat module) +- Design interface compatibility for future functional convergence + + + +## Progress and Next Steps + + + +- Analyzed merged PR [#8148](https://github.com/Slicer/Slicer/pull/8148) and structure of GeneralReformat +- Initiate CPR-specific view and layout architecture +- Define class-level design documents and rendering flow + + + +# Illustrations + + + + +Example of CPR view (made by 3D Slicer) + +![Image](https://github.com/user-attachments/assets/063bd8ef-e9ce-493b-a963-55e178aa429b) + + + +# Background and References + + + + +_No response_ + diff --git a/PW43_2025_Montreal/Projects/DevelopmentOfAVirtualRealityAndHapticTrainingSimulationForUltrasoundGuidedCatheterInsertion/README.md b/PW43_2025_Montreal/Projects/DevelopmentOfAVirtualRealityAndHapticTrainingSimulationForUltrasoundGuidedCatheterInsertion/README.md new file mode 100644 index 000000000..5a53265b5 --- /dev/null +++ b/PW43_2025_Montreal/Projects/DevelopmentOfAVirtualRealityAndHapticTrainingSimulationForUltrasoundGuidedCatheterInsertion/README.md @@ -0,0 +1,117 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Development of a virtual reality and haptic training simulation for ultrasound-guided + catheter insertion +category: VR/AR and Rendering + +key_investigators: + +- name: Naomi Catwell + affiliation: ÉTS + country: Canada + +- name: Simon Drouin + affiliation: ÉTS + country: Canada + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +- name: Steve Pieper + affiliation: Isomics + country: Inc., United States + +--- + +# Project Description + + + + +**Context** +Ultrasound-guided IV catheter insertion is the most commonly performed medical procedure in hospitals. However, it has been associated with high complication rates. Virtual reality and haptic enabled simulations may improve training for such a procedure. The aim of this project is to evaluate the effectiveness of haptic feedback for ultrasound-guided peripheral IV insertion training in a virtual reality simulation. + +**Current state** +A virtual reality training and evaluation simulator equipped with real-time ultrasound image rendering and a haptic force feedback model has been developed with Unity and two Haply Robotics Inverse3 devices. + +**Project Week** +In the context of Project Week, the aim is to improve the simulation with skin deformation physics using [SOFA-Slicer](https://github.com/Slicer/SlicerSOFA). This would improve perception of depth and identification of the needle insertion point in the virtual environment. + + + +## Objective + + + + +1. Integration of SOFA-Slicer Project into the Unity simulation +2. Simulation of believable skin deformation with SOFA physics + + + +## Approach and Plan + + + + +1. Get a working version of a recent SOFA-Slicer version on Windows. See this [forked version of Slicer-SOFA ](https://github.com/pieper/SlicerSOFA) and lungsimple.py sample project +2. Re-integrate SOFA with the existing simulation - use SOFA version 24.12 +3. Define an interactive SOFA configuration file to simulate forearm skin deformation in Unity +4. Integrate a version of Slicer-SOFA into the Slicer configuration file of the simulation +5. Work with SOFA-Slicer project team + + + +## Progress and Next Steps + + + + + +Development + +1.Explored multiple strategies (Cosserat Strategy, SlicerSOFA Strategy, Shader Deformation Strategy, etc.) + +2. Hooking the Unity simulation up to IGT directly in Slicer to use the Grid transform method + +4. Send the updated deformed mesh continuously back to Unity using PolyData message +![Screenshot 2025-06-26 102642](https://github.com/user-attachments/assets/41a6026e-6bfc-4cc0-837c-82221589da1a) + +5. Test to see if the ultrasound image is deformed appropriately +6. Apply the deformation upon needle insertion as well +7. User testing + + +Pilot testing +1. Success in getting enough people for the mini-longitudinal pilot test +2. Lots of great feedback (thanks!) +3. Need more people for the short pilot test for needle deviation strategies (come try the simulation!) +![image](https://github.com/user-attachments/assets/51a376e0-ea72-4e47-85e5-07961f5f37e0) + + +# Illustrations + + + + +![Image](https://github.com/user-attachments/assets/6d74833a-af17-41ed-9cbe-fd084dabb651) + + +"Analytical approximation" of probe-tissue interaction to experiment with tissue displacement, using [this code](https://github.com/pieper/SlicerSOFA/blob/07a77dcf24980475ac0c0c7736b71f142db46491/Experiments/arm.py) developed during Project Week. While this method is not physically realistic, it represents a real-time approximation that may be adapted for use in training systems. A more complex simulation can possibly make use of the linear and nonlinear transformation hierarchy worked out in this example. + + + +# Background and References + + + + +[Project Week 41](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/SOFAUnityHapticModel/) - familiarisation with SOFA and a first integration of SOFA into Unity + diff --git a/PW43_2025_Montreal/Projects/DynamicCitationCounterBadgeForPapersWithCode/README.md b/PW43_2025_Montreal/Projects/DynamicCitationCounterBadgeForPapersWithCode/README.md new file mode 100644 index 000000000..e3bdbfab0 --- /dev/null +++ b/PW43_2025_Montreal/Projects/DynamicCitationCounterBadgeForPapersWithCode/README.md @@ -0,0 +1,84 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Dynamic citation counter badge for papers with code +category: Other + +key_investigators: + +- name: Mauro Ignacio Dominguez + affiliation: Independent + country: Argentina + +--- + +# Project Description + + + + +Dynamic citation counter badges display up-to-date citation counts, making it easy to track a paper’s real-time impact. They encourage researchers to share code by linking visibility and recognition to reproducible work. Embedding badges in GitHub or documentation connects the code directly with its academic influence. They ensure transparency by pulling citation data from trusted sources automatically. Overall, they promote open science, reproducibility, and fair recognition in the research community. + + + +## Objective + + + + +1. Achieve a dynamic citation counter badge from a given paper with code if possible + + + +## Approach and Plan + + + + +1. Write the steps to get the dynamic citation counter badge from a given paper with code +2. Research most trustable and comprehensive scientific-research metrics web sites or APIs +3. Write a python script that takes a DOI (i.e. Digital Object Identifier) and a scientific-research metrics source as input and gives a dynamic citation counter badge image (and its markdown code) as output +4. Test and implement on a paper with code repository, on the README.md + + + +## Progress and Next Steps + + + + +1. Project was successful +2. Implementation here: +[https://gist.github.com/mauigna06/81a593644ec46e520adf4a7561d2075e](https://gist.github.com/mauigna06/81a593644ec46e520adf4a7561d2075e) +3. Use `badgeCreator` function on python + + + +# Illustrations + + + + +- Dynamic badge _with_ link reference: [![auto-commit-msg](https://img.shields.io/badge/dynamic/json?label=Citations&query=%24.citationCount&url=https%3A%2F%2Fapi.semanticscholar.org%2Fgraph%2Fv1%2Fpaper%2FDOI%3A10.1016%2Fj.stlm.2023.100109%3Ffields%3DcitationCount)](https://www.sciencedirect.com/science/article/pii/S2666964123000103#section-cited-by) ([code](https://github.com/SlicerIGT/SlicerBoneReconstructionPlanner/blob/8351095086965ad68db3b9257f3e356c13148e49/README.md?plain=1#L15)) +- Dynamic badge _without_ link reference: ![auto-commit-msg](https://img.shields.io/badge/dynamic/json?label=Citations&query=%24.citationCount&url=https%3A%2F%2Fapi.semanticscholar.org%2Fgraph%2Fv1%2Fpaper%2FDOI%3A10.1016%2Fj.stlm.2023.100109%3Ffields%3DcitationCount) +- Demo picture below of repository of [BoneReconstructionPlanner](https://github.com/SlicerIGT/SlicerBoneReconstructionPlanner/tree/main#bonereconstructionplanner) extension +![Image](https://github.com/user-attachments/assets/8d4ddff1-1784-442a-84e1-996052371e35) +- Dynamic badge for Slicer's paper: [![auto-commit-msg](https://img.shields.io/badge/dynamic/json?label=Citations&query=%24.citationCount&url=https%3A%2F%2Fapi.semanticscholar.org%2Fgraph%2Fv1%2Fpaper%2FDOI%3A10.1016%2Fj.mri.2012.05.001%3Ffields%3DcitationCount)](https://www.sciencedirect.com/science/article/abs/pii/S0730725X12001816?via%3Dihub#preview-section-cited-by) + + + +# Background and References + + + + +- [https://shields.io/badges/dynamic-json-badge](https://shields.io/badges/dynamic-json-badge) +- [https://www.semanticscholar.org/](https://www.semanticscholar.org/) +- [https://openalex.org/](https://openalex.org/) +- [https://www.crossref.org/](https://www.crossref.org/) +- [https://github.com/SlicerIGT/SlicerBoneReconstructionPlanner/tree/main#bonereconstructionplanner](https://github.com/SlicerIGT/SlicerBoneReconstructionPlanner/tree/main#bonereconstructionplanner) + diff --git a/PW43_2025_Montreal/Projects/EvaluateTheFitOfPreformedPlatesInOrbitalSurgery/README.md b/PW43_2025_Montreal/Projects/EvaluateTheFitOfPreformedPlatesInOrbitalSurgery/README.md new file mode 100644 index 000000000..c6f03fd5e --- /dev/null +++ b/PW43_2025_Montreal/Projects/EvaluateTheFitOfPreformedPlatesInOrbitalSurgery/README.md @@ -0,0 +1,128 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Evaluate the fit of preformed plates in orbital surgery +category: VR/AR and Rendering + +key_investigators: + +- name: Chi Zhang + affiliation: Texas A&M College of Dentistry + +- name: Braedon Gunn + affiliation: Texas A&M College of Dentistry + +- name: Andrew Read-Fuller + affiliation: Texas A&M College of Dentistry + +--- + +# Project Description + + + + +Orbital fracture repair commonly used preformed/contoured plates. ProprietaryVirtual Surgical Planning (VSP) soft focused on customized plate manufacture and placement guidance but does not allow comparing the fit of plates across vendors. The goal of this project is to allow surgeons to interactively try different ways of placement or different plates and compare the fit to find the more suitable implant. I created a customized extension [orbiSurgerySim](https://github.com/chz31/orbitSurgerySim). + +**Current procedures:** +1. Alignment using the Interaction Transform Handle: rotate around the "posterior stop" landmark until the plate sits just above the. Real-time heatmap using Probe Model with Volume module and VTK collision detector can mark intersections to aid plate position adjustment. + +

+![](https://github.com/user-attachments/assets/b79071c2-5518-418f-9c8e-99d3b005ab35) +![](https://github.com/user-attachments/assets/89116733-eb03-4823-b2d5-d836432187d1)
+![](https://github.com/user-attachments/assets/76affefe-9e47-48c7-aa54-53a65f05f13f) +![](https://github.com/user-attachments/assets/50178264-6ab7-4bf8-b596-24dfd9344a5f)
+

+ +2. Reconstructing the fractured orbit using the mirror of the contralateral side: use DentalSegmentator to segment the bone. I created a module to streamline reconstruction by using the mirrored contralateral side to rigidly register to fracture side followed by a warping. + +

+![](https://github.com/user-attachments/assets/bf411572-7a98-462c-ba84-7f8d65859229) +![](https://github.com/user-attachments/assets/790efb9d-8838-4b48-bbd1-0f714eed0da6) +![](https://github.com/user-attachments/assets/65be864e-4556-4bdd-af5b-68371d8c752d) +![](https://github.com/user-attachments/assets/3bebb56c-b7c0-495d-84e1-c5ee6255ee52) +

+ + +3. Measuring fit by projecting fiducial points on plate edges to the reconstructed orbit. Fit is based on overall distances. Overall distance map can also be used. These distances can also be used to highlight largest deviation. + +

+![](https://github.com/user-attachments/assets/62e02a60-b94d-4ddd-9cdb-55e9ffc53cea) +![](https://github.com/user-attachments/assets/c94f156f-c6a8-4570-ac2d-0b20a9021761)
+![](https://github.com/user-attachments/assets/649076d2-9c08-48e0-8a12-1ce0cead75b6) +![](https://github.com/user-attachments/assets/ed204c08-2bb0-49c4-bd27-f8745a78a337) +![](https://github.com/user-attachments/assets/854b8d7d-04f1-44c4-8173-61de3bc9162f) +

+ + + +## Objective + + + +The current workflow is tedious. My goal is to simplify the process. +1. Design UI and workflow to streamline the processes +2. Alternative methods for plate registrastion to ensure the plate sits at the surface of the bone (e.g., methods from SlicerHeart). +3. Improve and design additional metrics for measuring the fit of different plates and their ways of placement. +4. Explore additional or improved methods for orbit reconstruction (e.g., using SlicerAntsPy for image deformable registration). + + +## Approach and Plan + + + + +1. Same as objectives +2. Simplify segmentation workflows for thin orbital bones. In some cases, the orbital floor and medial wall are too thin, and only a boundary between the sinus and orbit is visible. I segmented maxillary sinus, expand it, and create a shell to fix the orbit. Perhaps models used in DentalSegmentator or TotalSegmentator can be tuned to depict the boundary between sinus & orbit. +

+![](https://github.com/user-attachments/assets/da70f728-1d4a-45ac-9f0b-5922ba5e4307) +![](https://github.com/user-attachments/assets/176ebcb4-1e35-426e-ae73-d39c4757808f) +

+ +

+![](https://github.com/user-attachments/assets/27016320-ba74-433b-a5bb-8e988c395ab5) +![](https://github.com/user-attachments/assets/a65a172e-12f7-47da-ad43-84c3a44dfa7d) +

+ + + +## Progress and Next Steps + +1. Demonstrated for workflow and collected suggestions for further improvement and simplification +2. Add instant distant map during plate position adjustment. Tried texture map alignment. +3. Add more visual support to highlight differences between different plates +4. Start to collect data from plates from two vendors, gather further user feedback, and prepare for manuscript. + + + + + +# Illustrations + +Johnson&Johnson DePuy Synthesis preformed titanium plate:
+![](https://github.com/user-attachments/assets/f1402cf8-5581-4835-a829-133c03cfc594) +![](https://github.com/user-attachments/assets/8b631448-c285-4c72-8609-75d3190db24f) + +

+![](https://github.com/user-attachments/assets/5c46f298-f059-4c4c-8114-4f21906f9dd2) +

+Surgical Guidance using MatrixOrbital preformed plates from DePuy (see below for reference). + +

+![](https://github.com/user-attachments/assets/66a054ca-7751-4fe7-8c82-94ab1da61509) +

+Transconjunctival approach for retracting orbital tissue. From: [https://surgeryreference.aofoundation.org/cmf/pediatric-trauma/midface/orbital-floor/reconstruction](https://surgeryreference.aofoundation.org/cmf/pediatric-trauma/midface/orbital-floor/reconstruction) + + + +# Background and References + +Initial Project Week version [https://projectweek.na-mic.org/PW41_2024_MIT/](https://projectweek.na-mic.org/PW41_2024_MIT/) + +Github for the customized rbitSurgerySim extension: [https://github.com/chz31/orbitSurgerySim](https://github.com/chz31/orbitSurgerySim) + +Surgical Guidance using MatrixOrbital preformed plates from DePuy Synthesis: [https://www.jnjmedtech.com/en-US/product/matrixorbital-preformed-orbital-plates](https://www.jnjmedtech.com/en-US/product/matrixorbital-preformed-orbital-plates) + +Orbital floor and wall fracture repair guidance: [https://surgeryreference.aofoundation.org/cmf/pediatric-trauma/midface/orbital-floor/reconstruction](https://surgeryreference.aofoundation.org/cmf/pediatric-trauma/midface/orbital-floor/reconstruction) diff --git a/PW43_2025_Montreal/Projects/EvaluatingConcordanceOfAiBasedAnatomySegmentationModels/README.md b/PW43_2025_Montreal/Projects/EvaluatingConcordanceOfAiBasedAnatomySegmentationModels/README.md new file mode 100644 index 000000000..408ce96cb --- /dev/null +++ b/PW43_2025_Montreal/Projects/EvaluatingConcordanceOfAiBasedAnatomySegmentationModels/README.md @@ -0,0 +1,153 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Evaluating concordance of AI-based anatomy segmentation models +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Lena Giebeler + affiliation: BWH/RWTH Aachen + country: USA/Germany + +- name: Deepa Krishnaswamy + affiliation: BWH + country: USA + +- name: Ron Kikinis + affiliation: BWH + country: USA + +- name: David Clunie + affiliation: PixelMed Publishing + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +Quantitative analysis of large-scale medical imaging datasets can be streamlined using automated segmentation. The growing number of AI-based methods for anatomical segmentation raises a central challenge of choosing among functionally similar models due to: the absence of ground truth data for representative samples, and practical challenges in comparing segmentation results (inconsistent structure naming, non-uniform formats, and complexity of visualization). Our work alleviates these issues by evaluating six open-source segmentation models—TotalSegmentator 1.5 and 2.6, Auto3DSeg, Moose, MultiTalent, and OMAS—on a sample of CT scans from the publicly available National Lung Screening Trial (NLST) dataset. We analyzed 31 anatomical structures—lungs, vertebrae, ribs, and heart—after harmonizing segmentation results to follow consistent representation. To support visual comparison, we developed open-source tools in 3D Slicer automating loading, structure-wise inspection and comparison across models. For quantitative comparison we evaluated consensus segmentations per structure and assessed model agreement using Dice similarity and volume differences. Preliminary results show excellent agreement segmenting some (e.g., lung) but not all structures (e.g., some models produce invalid vertebrae or rib segmentations). Only one model, Moose, segmented the costovertebral joints—rib-to-spine connections. Overall, this work assists in model evaluation in absence of ground truth, ultimately enabling informed model selection. + + + +## Objective + + + + +This project builds upon the previous work "Review of segmentation results quality across various multi-organ segmentation models", conducted during the last Project Week in Gran Canaria. The goal is to systematically evaluate and compare the segmentations of six publicly available multi-organ segmentation models. This evaluation is done by identifying areas of agreement and disagreement across anatomical structures in our dataset, for which ground truth segmentations are unavailable. + +During this Project Week, we will improve upon and extend the previous analysis by extending the scope of comparison, and engaging with the users of the evaluated models and model developers. + + + +## Approach and Plan + + + + +1. Discuss the current analysis results with developers and interested community members + * discuss problematic results + * learn about other observed or potential errors we should investigate + * discuss approaches for selecting representative test data sample from NLST + * collect feedback, observations, and suggestions for improvement +2. Improve and extend the analysis based on this discussion +3. Secondary: Slicer Segmentation Verification module extension used for visual comparison + * revisit loading of DICOM SEG as Segmentation and see if any optimizations can be implemented to the code to speed up Segmentation node creation (need to profile the load process further first) + * discuss developed new features designed to simplify the process of comparison of the results from different models + * discuss possible improvements to the performance of populating Segmentation node loaded from DICOM SEG + + + +## Progress and Next Steps + + + +Current Status of our Analysis: +- Preliminary results show excellent agreement for the lungs. +- Inconsistencies were found in the segmentation of vertebrae and ribs: 4 out of 6 models produce invalid or anatomically implausible segmentations for these structures. +- Only one model, Moose, includes segmentation of the costovertebral joints (rib-to-spine connections). +- OMAS uses a different anatomical definition for the heart compared to the other models, leading to differences in the heart segmentation. + +The Slicer Segmentation Verification Module Extension currently: +- Displays available segmentations based on the selected volume. +- Visualizes chosen segmentations in both 3D and 2D slice views. +- Allows switching between horizontal (default) and vertical layout. +- Adds functionality to automatically link 3D views via checkbox. +- Provides option to link 2D views using a checkbox. +- Enables displaying only segmentation outlines in 2D slice views. + +Over the past week, I discussed with community members how my extension could be improved and how the project could be adapted for use in other workflows. A key outcome of these conversations was the positive response to the dynamic layout configuration. + +**Suggested Improvements for the Extension - Key Points:** +* Providing an overview table of which segmentations are shown in the different views. +* Increasing the line thickness of the 2D outlines when only outlines are displayed, as they can sometimes be difficult to see. +* Making the layout more flexible (e.g., allowing 3D views to be placed on the right side as well). +* Simplifying the layout into a more user-friendly version. +* Adding information to the module about which models the extension has been tested with and clearly stating that the extension currently only supports DICOM input. +* Including cross-referencing. + +**Analysis – Key Points** +* In discussions about the analysis, it became clear that the vertebra segmentation issues observed in Auto3DSeg also occur in other datasets. +* A suggested improvement was to manually segment a few CT scans in order to have at least a small set of ground truth data. + * This is not going to be pursued due to limited resources and the time required. +* It was also suggested that including the surface area of each structure in the analysis could be valuable. + + +**Dashboard Development:** +To identify a representative test sample for repeating our analysis, we started building a dashboard to better understand the distribution of metadata across the dataset. +From the clinical data, we included Age, Gender, Race, and Smoking status. In addition, we incorporated Manufacturer, ManufacturerModelName, Convolution Kernel, Pixel Spacing, and Slice Thickness. +Initially, we began developing the dashboard using Google Looker Studio, but it quickly became apparent that Looker only offers few plots and has limitations when displaying data with high variability such as Pixel Spacing (which has over 500 unique values). For this reason, we transitioned to Dash. One drawback of Dash, however, is that it requires deploying a local server for use. + + +**Future Work** +* We plan to improve the extension based on the feedback and suggestions we received from the community. +* Once the extension is more robust, we aim to publish it +* Conduct our analysis on a larger and more representative dataset, which we will define based on insights gained from the metadata dashboard. + + + + +# Illustrations + + + +An interactive poster with a summary of the results and the current state of the project can be found at the following link: +[https://www.dropbox.com/scl/fi/c84sm9djytyi80jk2ixfa/giebeler.lena.pptx?rlkey=g3sf82zuv5fgmuog0an3dsy96&dl=0](https://www.dropbox.com/scl/fi/c84sm9djytyi80jk2ixfa/giebeler.lena.pptx?rlkey=g3sf82zuv5fgmuog0an3dsy96&dl=0) + +**Current Status of the Slicer Segmentation Verification Module Extension:** + + + + + + + +# Background and References + + + + +* This project is continuing earlier PW42 project ["Review of segmentation results quality across various multi-organ segmentation models"]( https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/) +* [My fork of the Segmentation Verification Module](https://github.com/LenaGiebeler/SlicerSegmentationVerification) + * PR adding new features upstream: [https://github.com/cpinter/SlicerSegmentationVerification/pull/3](https://github.com/cpinter/SlicerSegmentationVerification/pull/3) +* AI segmentation models (Git repositories): + * [TotalSegmentator](https://github.com/wasserth/TotalSegmentator) + * [Auto3DSeg](https://github.com/Project-MONAI/tutorials/tree/main/auto3dseg) + * [Moose](https://github.com/ENHANCE-PET/MOOSE), and [MultiTalent](https://github.com/MIC-DKFZ/MultiTalent) + diff --git a/PW43_2025_Montreal/Projects/ExtractionOfOrofacialPainComorbiditiesFromClinicalNotesUsingLargeLanguageModels/README.md b/PW43_2025_Montreal/Projects/ExtractionOfOrofacialPainComorbiditiesFromClinicalNotesUsingLargeLanguageModels/README.md new file mode 100644 index 000000000..657ce6d52 --- /dev/null +++ b/PW43_2025_Montreal/Projects/ExtractionOfOrofacialPainComorbiditiesFromClinicalNotesUsingLargeLanguageModels/README.md @@ -0,0 +1,133 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Extraction of Orofacial Pain Comorbidities from Clinical Notes Using Large Language + Models +category: Quantification and Computation + +key_investigators: + +- name: Alban Gaydamour + affiliation: University of Michigan + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: David Hanauer + affiliation: University of Michigan + country: USA + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +- name: Lucie Dole + affiliation: University of North Carolina + country: USA + +--- + +# Project Description + + + + +Temporomandibular Disorders (TMDs) are often linked with complex comorbidities that are difficult to extract from long free-text clinical notes. This project leverages Large Language Models (LLMs) to identify and summarize these comorbidities, enabling structured analysis and visualization across patient cohorts. + + + +## Objective + + + + +1. Fine-tune open-source LLMs to extract a curated list of TMD-related comorbidities from clinical notes. +2. Generate structured patient-level outputs from model predictions. +3. Visualize comorbidity data using an interactive dashboard. +4. Compare model performance to determine the most clinically effective approach. + + + +## Approach and Plan + + + + +1. Annotate clinical notes with summaries across 56 comorbidity criteria. +2. Fine-tune LLMs such as `facebook/bart-large-cnn` and `deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B` using chunked note inputs. +3. Generate structured outputs and compile them into a CSV. +4. Visualize cohort-level trends using a Python-based dashboard. +5. Evaluate model performance and deploy the tool to be accessible in 3D Slicer. + + + +## Progress and Next Steps + + + + +1. Deidentified clinical notes were obtained and manually summarized for 500. +2. Fine-tuned `facebook/bart-large-cnn` and `deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B` on these summaries to generate structured outputs across 56 comorbidity fields. +3. Generated CSV outputs from model summaries and created a dashboard to visualize cohort-level patterns. +4. Currently working on fine-tuning larger models and expanding the dataset. +5. Next steps include completing 500 patient summaries, comparing model performance, and deploying the tool for use in 3D Slicer. + + + +# Illustrations + + + + +### Table 1. Metrics from BART training + +|Fold|ROUGE-1|ROUGE-2|ROUGE-L|ROUGE-L sum| +|---|---|---|---|---| +|Fold1|83.68|71.99|83.50|83.49| +|Fold2|83.48|73.40|83.11|83.14| +|Fold3|84.93|74.23|84.38|84.57| +|Fold4|85.50|74.73|85.11|85.21| +|Fold5|85.47|74.64|84.98|85.01| +|Average|84.61|73.80|84.22|84.29| + +### Table 2. Metrics from DeepSeek training + +|Fold|ROUGE-1|ROUGE-2|ROUGE-L|ROUGE-L sum| +|---|---|---|---|---| +|Fold1|86.55|86.49|86.53|86.54| +|Fold2|84.90|84.79|84.86|84.82| +|Fold3|86.08|86.09|86.10|86.11| +|Fold4|85.96|85.91|85.95|85.92| +|Fold5|85.21|85.70|85.17|85.21| +|Average|85.74|85.70|85.72|85.72| + +### Figure 1. Dashboard summary from 500 cases extracted manually +![Dashboard summary from 500 cases extracted manually](https://github.com/user-attachments/assets/29e17ece-13d4-417a-ae64-955ce6d66cfc) + +### Figure 2. Dashboard summary from 500 cases extracted by fine-tuned BART +![Dashboard summary from 500 cases extracted automatically by BART](https://github.com/user-attachments/assets/b0edb217-d825-4a37-ac95-3226689d7c1a) + +### Figure 3. Dashboard summary from 500 cases extracted by fine-tuned DeepSeek +![Dashboard summary from 500 cases extracted automatically by DeepSeek](https://github.com/user-attachments/assets/54e0cb4b-d307-4b38-ba45-02f99a906ed4) + + + +# Background and References + + + + +- Github Page: [https://github.com/DCBIA-OrthoLab/MedEx](https://github.com/DCBIA-OrthoLab/MedEx) +- Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension. *Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics*, pages 7871–7880. +- DeepSeek-AI, Guo D, Yang D, et al. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. Preprint at *arXiv*, 2025. Available from: [https://arxiv.org/pdf/2501.12948](https://arxiv.org/pdf/2501.12948). + diff --git a/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/README.md b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/README.md new file mode 100644 index 000000000..1c9e95ede --- /dev/null +++ b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/README.md @@ -0,0 +1,93 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Fine-Tuning SimCortex on Expert-Annotated Cortical Surfaces for Enhanced Topological Accuracy +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Kaveh Moradkhani + affiliation: École de Technologie Supérieure + country: Canada + +- name: Sylvain Bouix + affiliation: École de Technologie Supérieure + country: Canada + +- name: Jarrett Rushmore + affiliation: Boston University Medical + country: USA +--- + +# Project Description + +SimCortex is a deep-learning framework that reconstructs all four cortical surfaces (left/right white matter and pial) from T1-weighted MRI, with a focus on minimizing inter-surface collisions and self-intersections while maintaining high geometric fidelity. To improve robustness and generalization, we fine-tune SimCortex—originally trained on FreeSurfer-generated segmentations—using a set of 50 expert-annotated MRI volumes. + +--- + +## Objectives + +1. **Fine‐tune SimCortex with high‐quality, manually labeled data** + Fine-tune the pre‐trained SimCortex model using 50 expert‐annotated MRI segmentations to improve anatomical accuracy and reduce geometric artifacts. + +2. **Compare fine‐tuned variants against the baseline** + Evaluate several fine‐tuned configurations using geometric metrics (Chamfer Distance, ASSD, HD) and topological consistency (SIF), and compare them to the baseline SimCortex model. + +3. **Visually validate reconstructions in 3D Slicer** + Load predicted cortical surfaces into 3D Slicer and assess anatomical plausibility with expert guidance. + +--- + +## Approach and Plan + +1. **Preprocessing:** Convert expert-segmented MRI volumes into a format compatible with SimCortex. +2. **Model Fine-Tuning:** Use manual segmentations to fine-tune the original SimCortex model. +3. **Evaluation:** Measure Chamfer, ASSD, HD, and %SIF for each configuration. +4. **Baseline Comparison:** Compare all metrics with the baseline SimCortex model. +5. **Expert Review:** Collaborate with Prof. Jarrett Rushmore for visual validation. +6. **Selection:** Choose the best configuration based on both quantitative metrics and expert review. + +--- + +## Progress and Next Steps + +We fine-tuned the pre-trained SimCortex model (`SimCortex_M`) using 50 high-quality, expert-annotated MRI segmentations. The model was evaluated against the original SimCortex baseline using geometric and topological metrics, including **Chamfer Distance**, **Hausdorff Distance (HD)**, **Average Symmetric Surface Distance (ASSD)**, and **Self-Intersection Fraction (SIF)**. + +Both **quantitative** and **visual evaluations** demonstrate the benefits of fine-tuning: + +- `SimCortex_M` achieves **lower errors** across all evaluated metrics. +- The **red surface** (fine-tuned) shows better alignment with the **turquoise manual ground truth**, while the **yellow surface** (baseline) exhibits greater deviation. +- These results confirm that using expert-labeled data leads to improved anatomical and topological accuracy. + +## 🚀 Future Direction +- As a next step, we plan to develop a more robust version of SimCortex as a **3D Slicer extension**. This extension will aim to produce more accurate cortical surfaces with **minimal inter-surface collisions** and **eliminate self-intersections**, enhancing its usability for both clinical and research workflows. +--- + +# Illustrations + +## 📊 Quantitative Evaluation Results + +The table below compares baseline and fine-tuned model performance across all surfaces. + +![Quantitative Comparison Table](comparison_table.png) + +## 🧠 Visual Comparison of Cortical Surfaces + +This Figure illustrates the differences between ground truth and predictions. + +- **Cyan:** Manual ground truth +- **Red:** Fine-tuned model (SimCortex_M) +- **Yellow:** Baseline model (SimCortex) + +### 🧩 Full Brain View + +![Zoomed-In Surface Overlay](visual_results_1.png) + +### 🔍 Zoomed-In View + +![Whole-Brain Surface Overlay](visual_results_3.png) + +### 🧠 Reconstructed Cortical Surfaces (Fine-Tuned Model) +![Whole-Brain Surface Overlay](Reconstracted_Surfaces_1.png) diff --git a/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/Reconstracted_Surfaces.png b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/Reconstracted_Surfaces.png new file mode 100644 index 000000000..5641c2aab Binary files /dev/null and b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/Reconstracted_Surfaces.png differ diff --git a/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/Reconstracted_Surfaces_1.png b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/Reconstracted_Surfaces_1.png new file mode 100644 index 000000000..d5d0d3393 Binary files /dev/null and b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/Reconstracted_Surfaces_1.png differ diff --git a/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/comparison_table.png b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/comparison_table.png new file mode 100644 index 000000000..1313787c0 Binary files /dev/null and b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/comparison_table.png differ diff --git a/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_1.jpg b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_1.jpg new file mode 100644 index 000000000..d49fcddc5 Binary files /dev/null and b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_1.jpg differ diff --git a/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_1.png b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_1.png new file mode 100644 index 000000000..694a15f00 Binary files /dev/null and b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_1.png differ diff --git a/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_2.jpg b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_2.jpg new file mode 100644 index 000000000..317726039 Binary files /dev/null and b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_2.jpg differ diff --git a/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_3.png b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_3.png new file mode 100644 index 000000000..9dca7af9a Binary files /dev/null and b/PW43_2025_Montreal/Projects/FineTuningSimcortexOnExpertAnnotatedCorticalSurfacesForEnhancedTopologicalAccuracy/visual_results_3.png differ diff --git a/PW43_2025_Montreal/Projects/FreesurferSurfaceCorrectionScriptImprovement/README.md b/PW43_2025_Montreal/Projects/FreesurferSurfaceCorrectionScriptImprovement/README.md new file mode 100644 index 000000000..16284b35f --- /dev/null +++ b/PW43_2025_Montreal/Projects/FreesurferSurfaceCorrectionScriptImprovement/README.md @@ -0,0 +1,87 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Freesurfer surface correction script improvement +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Benoît Verreman + affiliation: ETS + +- name: Jarrett Rushmore + affiliation: Boston University School of Medicine + +- name: Sylvain Bouix + affiliation: ETS + +--- + +# Project Description + + + + +The project consists in improving freesurfer generated white and pial surfaces, by adding manual segmentation information. +Some outputs from recon-all still need to be generated by the script. + + + +## Objective + + + + +1. Check which outputs still need to be generated before upgrading the script +2. Check visualy those outputs + + + + +## Approach and Plan + + + + +1. Jarrett Rushmore, neuro-anatomist, will evaluate the outputs generated by the script. +2. He will also give tips on how to make easier for him and his colleagues to use the script. + + + + +## Progress and Next Steps + + + +1. Added dilation and erosion control arguments to solve Hippocampus-Amygdala VS cortex labeling +2. Added RS to script specific files to distinguish with files compatible with recon-all +3. Checked the script outputs visually + + + + +# Illustrations + + + + + + +![2025-06-27_freeview_surfaces](https://github.com/user-attachments/assets/4990aedc-fef8-4bf5-a1aa-4a7161146c12) + + +# Background and References + + + + +_No response_ + diff --git a/PW43_2025_Montreal/Projects/GenerativeAiForDisplayLayoutHangingProtocols/README.md b/PW43_2025_Montreal/Projects/GenerativeAiForDisplayLayoutHangingProtocols/README.md new file mode 100644 index 000000000..dab4e4d1e --- /dev/null +++ b/PW43_2025_Montreal/Projects/GenerativeAiForDisplayLayoutHangingProtocols/README.md @@ -0,0 +1,137 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Generative AI for Display layout/ Hanging protocols +category: Infrastructure + +key_investigators: + +- name: Martin Bellehumeur + affiliation: Germany + +--- + +# Project Description + + + + +Generative AI / LLMs can today produce a good draft of report impressions based on learning the report history of the radiologist. + We believe that GenAI could today produce reasonably valid hanging protocols if it had enough training data. + + Therefore, we propose to standardized hanging protocol Gen AI training data. + This data would be used by PACS viewers or AI agents to infer the appropriate display of the images available. + + + Because of standardization, radiologists would no longer have to train/configure each viewer they encounter. + They (or their PACS admin) would instead provide access to their personal training data repository to the viewer software. + + +Ideally, the viewers would not only use the data for display but also provide the ability to add new hanging protocol training cases to the user's repository. + +We believe that having the full study/series/image metadata, in addition to the thumbnails, could allow AI to compete with rule based systems. Especially when those systems cannot be constantly maintained by highly skilled individuals. + +Gen AI would also enable personal hanging protocols which is almost impossible today because of maintenance costs. + + + +## Objective + + + +Raise interest in the radiology reporting community in the standardization of hanging protocol training data. + +Create proof of concept training data by adding the functionality to an open-source viewer. +This exercise should allow to refine the training data format required and could support an IHE standard request/grant request. + + + + +## Approach and Plan + + +This week we will add a prototype extension to the OHIF viewer that will save display layout training data and load it. +Ideally, we would do the same in 3DSlicer and demonstrate interoperability. + + +### Criterias +1. The training data should be human readable. Therefore we would use DICOMweb/JSON encoding instead of DICOM binary. + +2. The training data should be portable. The size of the data should be small enough that radiologist can keep their own backup copy, modify it manually if they wish and create multiple repositories if they need to. + + +### Example training data format + +Each stored hanging protocol training data point would be composed of a folder containing: +1. A screenshot of the layout with privacy mode on. +2. A [hanging protocol DICOM information object](https://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.23.html) defining the layout and which series was assigned to which viewport (and other details). This object would not try to define rules for the hanging protocol but just store what the user selected. Also of interest is the work in [section V](https://dicom.nema.org/medical/dicom/current/output/chtml/part17/chapter_V.html). THe hard work of defining monitor geometry, layouts, viewport types, viewport linking, ect, all that is already existing in the DICOM standard. +3. Separate folder for each study of the patient containing the metadata of each study/series/image(object) of the patient in [static DICOMweb](https://github.com/RadicalImaging/Static-DICOMWeb) format. Therefore each display set will also include a thumbnail image. Patient level information and bulk data (DICOM image data) would not be stored. +4. A way to identify which study was the current. + + +### Viewer support + +Viewers would at a minimum need to support reading the hanging protocol object and assign the series specified by the AI to the layout. Viewers that already support the existing hanging protocol DICOM standard object would be advantaged as they should be. + +The request for the hanging protocol object would be done by the worklist or the viewer. They would provide the same study/series metadata that is in the training data and the AI agent would return the most appropriate hanging protocol from your training data and assign series to the defined viewports in the hanging protocol object. + + +## Progress and Next Steps + + + + +I did not make progress on the original project but worked on a volume cropping (clipping?) tool for Cornerstore3D/OHIF that was requested by a customer. +The customer wants the tool to be given back to open source so it will be available shorthly: + +![image](https://github.com/user-attachments/assets/5f316009-dcb1-4f82-895a-da06a19f5e1d) + +Next step I will attend Eusomii conference in October and try to find radiologist interested in GenAI for hanging protocols. + +I will attend the next NA-MIC in Las Palmas and prototype something beforehand. + +# Illustrations + +A hanging protocol AI agent could, using the studies and series level metadata of the current and prior studies, find the most appropriate layout. + +## How Hanging Protocol AI Agents Work + + +``` ++-----------------------------------+ +---------------------+ +-------------------+ +| | | | | | +| PACS Viewer +-------->+ Hanging Protocol +-------->+ Display Layout | +| (current & prior study metadata | | AI Agent | | (images assigned | +| and thumbnails) | | (selects protocol | | to viewports) | +| | | and assigns series | | | ++-----------------------------------+ +---------------------+ +-------------------+ +``` +*The PACS viewer sends current and prior study metadata and thumbnails to the AI agent, which selects the best hanging protocol and returns the layout for display.* +*The PACS viewer sends study metadata and images to the AI agent, which selects the best hanging protocol and returns the layout for display.* + + + + +# Background and References + + + + +[hanging protocol DICOM information object](https://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.23.html) + +[section V](https://dicom.nema.org/medical/dicom/current/output/chtml/part17/chapter_V.html) + +[static DICOMweb](https://github.com/RadicalImaging/Static-DICOMWeb) + + + + + + + + + diff --git a/PW43_2025_Montreal/Projects/IconRefreshIn3DSlicer/README.md b/PW43_2025_Montreal/Projects/IconRefreshIn3DSlicer/README.md new file mode 100644 index 000000000..6854cbccb --- /dev/null +++ b/PW43_2025_Montreal/Projects/IconRefreshIn3DSlicer/README.md @@ -0,0 +1,94 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Icon Refresh in 3D Slicer +category: Infrastructure + +key_investigators: + +- name: Sam Horvath + affiliation: Kitware + country: USA + +--- + +# Project Description + + + + +We will be continuing the work to integrate the new icon set into 3D Slicer. + + + +## Objective + + + + +1. Finalize icon switching logic initially developed in last project week + + + + +## Approach and Plan + + + + +1. Rebase slicer integration branch and test with extension +2. Demo both programed and UI file based icon usage in extension +3. Add in all new icons available from assets (w/updates) +4. Work on Python module approach +5. List bugs/issues with current approach + + + + +## Progress and Next Steps + + + +1. Added theme switching for Python modules +2. Integrated new icons in a single resource binary in QTGUI +3. Tested with Slicer nightly +4. Began inegration of new icons in toolbars, module icons and subject hiearchy plugins + +### TODO + +1. Rework to pull slicer-media-assets at configure time +2. Address issues using new icons as cursors +3. Continue icon replacement + + + +# Illustrations + + + +![cropped](https://github.com/user-attachments/assets/e0480af6-a534-4678-9adf-805723171959) + +![toolbar](https://github.com/user-attachments/assets/d41f14cf-2ede-486b-91ae-d6a5bf9420b4) + +![welcome](https://github.com/user-attachments/assets/22ef6882-4e46-49c3-ba06-77361918e5c1) + + + + +[Light Theme Icon Index](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/LightThemeIconsIndex.html) + +[Dark Theme Icon Index](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/DarkThemeIconsIndex.html) + + + +# Background and References + + + + +- [PW 41 Project Page](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/UpdatedIconsAndThemeSwitching/) +- [PW 42 Project Page](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/FinalizeSlicerIconSetUpdateInfrastructure/) diff --git a/PW43_2025_Montreal/Projects/ImportingAndDisplayingDicomStructuredReportsInSlicer/README.md b/PW43_2025_Montreal/Projects/ImportingAndDisplayingDicomStructuredReportsInSlicer/README.md new file mode 100644 index 000000000..c8a1537e7 --- /dev/null +++ b/PW43_2025_Montreal/Projects/ImportingAndDisplayingDicomStructuredReportsInSlicer/README.md @@ -0,0 +1,118 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Importing and displaying DICOM Structured Reports in Slicer +category: DICOM + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Chris Bridge + affiliation: Massachusetts General Hospital + country: USA + +- name: Ron Kikinis + affiliation: Brigham and Women's Hospital + country: USA + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +--- + +# Project Description + + + + +In TCIA, multiple collections include CSV files with different measurements. For instance, the [ProstateX collection](https://www.cancerimagingarchive.net/collection/prostatex/) has information about the Gleason score and target biopsy points. Other annotations, such as the lesion bounding boxes for NLST from [Sybil](https://github.com/reginabarzilaygroup/Sybil/tree/main) are in json format. + +There is no easy way to use these measurements. If we want to use them in Imaging Data Commons (IDC), the data needs to be in a standardized format like DICOM Structured Reports (SRs). From the last project week [here](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/CreatingDicomCompatibleCancerAnnotationsForNlst/), we have already created SRs for a few of these collections that hold points and bounding boxes. + +However, right now, Slicer can only load a specific type of SR. We are currently working on adding functionality to load SRs to load and display points, boxes, and lines. + + + +## Objective + + + + +We will create SRs to hold points and bounding boxes, and modify the DICOMTID1500Plugin to load and display these measurements as markups. + + + +## Approach and Plan + + + + +1. Create SRs for the ProstateX (csv from TCIA) and NLST (Sybil) collections. +2. Modify the DICOMTID1500Plugin.py in [QuantitativeReporting](https://github.com/QIICR/QuantitativeReporting/blob/master/DICOMPlugins/DICOMTID1500Plugin.py) to read these SRs and display as markups. +3. Show a table holding the measurements. +4. Display the markups appropriately in the subject hierarchy. + + + +## Progress and Next Steps + + + +1. We have created SRs for ProstateX (points) and NLST Sybil (bounding boxes). +2. We have modified the plugin to load points, boxes, and lines. +3. We have loaded the markups in a folder in the subject hierarchy and have displayed the table. +4. Currently, we are working on loading the correct referenced series. + +I had some good discussions this week with Ron, Steve, and Andras about how to improve the extension. + +Summary of the discussion: +- How to effectively display the different annotations +- How to store data describing each of the annotations - node attributes vs table +- Improvements in the subject hierarchy - making sure table/markups are linked to the series/study +- How this interface can be used to later save SRs +- How this work can fit into the larger scheme of data exploration +- How to modify and preconfigure the layout for each of the types of annotations, for instance, below for bounding boxes: +- ![](https://github.com/user-attachments/assets/1b8d855b-f4bf-4f17-a06d-c1be873904dc) + +# Illustrations + + + +Example of a DICOM SR with points - ProstateX biopsy target points +![](https://github.com/user-attachments/assets/1ecccd9c-94eb-4dcf-a9a7-c27acc6af019) + +Example of a DICOM SR with bounding boxes - Sybil lesion annotations for NLST + + +Example of a DICOM SR with lines +![](https://github.com/user-attachments/assets/cd8a5926-90b1-4e18-8d96-651bcbd2bc6a) + + + +# Background and References + + + +- [Slicer discourse discussion](https://discourse.slicer.org/t/loading-and-displaying-dicom-structured-reports/42754/7) +- [My fork of QuantitativeReporting](https://github.com/deepakri201/QuantitativeReporting/blob/master/DICOMPlugins/DICOMTID1500Plugin.py) +- [Creating SRs from last project week](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/CreatingDicomCompatibleCancerAnnotationsForNlst/) diff --git a/PW43_2025_Montreal/Projects/ImprovementsOfSlicertms/README.md b/PW43_2025_Montreal/Projects/ImprovementsOfSlicertms/README.md new file mode 100644 index 000000000..1031393a6 --- /dev/null +++ b/PW43_2025_Montreal/Projects/ImprovementsOfSlicertms/README.md @@ -0,0 +1,121 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Improvements of SlicerTMS +category: Infrastructure + +key_investigators: + +- name: Lipeng Ning + affiliation: Brigham and Women's Hospital and Harvard Medical School + country: USA + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Tae Young Park + affiliation: TruAbutment Inc. + country: USA + +- name: Daniel Haehn + affiliation: University of Massachusetts Boston + country: USA + +- name: Benjamin Zwick + affiliation: The University of Western Australia + country: Australia + +- name: Satya Barak + affiliation: The University of Western Australia + country: Australia + +- name: Cameron Paterson + affiliation: The University of Western Australia + country: Australia + +--- + +# Project Description + + + + +The SlicerTMS project has been developed to predict the electric field induced by transcranial magnetic stimulation by using deep neural networks and magnetic resonance imaging data. In this project week, we further develop the software to improve the performance and integrate additional functions into this module. + + + +## Objective + + + + +1. Objective A. Improve the overall software architecture for integration with other electric field solvers. +2. Objective B. Develop and test an example on the integration of the SimNIBS solver. +3. Objective C. Validate and update the sampling algorithms in SlicerTMS and improve the file I/O strategy for vector nifti files. +4. Objective D. Discuss and improve the integration with neuronavigation and other fast segmentation and meshing techniques. +5. Objective E. Investigate the use of markerless tracking of the patient head and TMS probe for neuronavigation. + + + + +## Approach and Plan + + + + +1. Meet to review existing SlicerTMS software structure and other external solvers (e.g., SimNIBS). +2. Discuss and prototype an improved architecture. +3. Compare the vtk-based resampling with SimNIBS to improve the accuracy of SlicerTMS models. +4. Talk with other teams about related toolboxes, e.g., OpenIGTLink, for improvement. + + + +## Progress and Next Steps + +A very productive week! +* We discussed our TMS work with the community in relation to other navigation, TMS, and FEM projects +* We investigated and now better understand SimNIBS mesh file format conventions for gray/white matter and other tissue +* We tested and expanded our RPyC-based integration of SimNIBS with 3D Slicer for simulation of TMS. +* A small test model (~22K tetrahedra) can be simulated in real-time in SlicerTMS with a SimNIBS back end. +* A "clinical grade" simulation (~4M tetrahedra) can be simulated in about 3 seconds a frame. +* A reduced resolution field simulation with a full resolution anatomical display can be displayed in approximately 1 second per frame. +* The simulation was extended to include additional TMS coil configurations and display of the model in 3D Slicer. + +![image](https://github.com/user-attachments/assets/c09f2676-030f-4843-96db-e36f68d0f73f) + +Next steps: +* Now we can extend our previous work by training and testing our real-time deep learning approximations to the FEM results "head to head" in a common software environment to assess accuracy and performance tradeoffs +* We have ordered navigation equipment so we can leverage the NousNav infrastructure to simulate navigated TMS +* We will work to incorporate our previous work into the new integrated framework: + * We will experiment with volume rendering and other e-field visualization methods + * We will explore the integration of SlicerDMRI tractography technology to investigate the white matter tracts influenced by TMS therapy +* We will extend our previous work + * We will work to integrate new segmentation methods, such as the new version of SynthSeg that can generate full-head tissue segmentations for a wider range of input data, possibly making patient-specific head models less expensive and thus expanding the availability of more precise TMS + * We will test novel tetrahedral mesh generation technology being developed by Will Schroeder at Kitware based on Sarah Frisken's SurfaceNets approach + * We will work with the NousNav team to optimize the price/performance of tracking cameras and related technologies in the hopes of making patient-specific neuronavigated TMS more widely available, with the possible outcome of improved patient response to therapy + * We will work with neurology and other specialties to better understand the challenges and potential applications + * We will streamline the user interface to facilitate experiments in these areas + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- [SlicerTMS GitHub Repository](https://github.com/SlicerTMS/SlicerTMS) +- [3D Slicer](https://github.com/Slicer/Slicer) +- [Real-Time Visualization of TMS-evoked Potentials PW 40 Project](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/RealTimeVisualizationOfTmsEvokedPotentials/) + diff --git a/PW43_2025_Montreal/Projects/ImprovementsOfTheSliceridcbrowserExtension/README.md b/PW43_2025_Montreal/Projects/ImprovementsOfTheSliceridcbrowserExtension/README.md new file mode 100644 index 000000000..b5a7f1d38 --- /dev/null +++ b/PW43_2025_Montreal/Projects/ImprovementsOfTheSliceridcbrowserExtension/README.md @@ -0,0 +1,81 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Improvements of the SlicerIDCBrowser extension +category: Infrastructure + +key_investigators: + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +--- + +# Project Description + + + +[SlicerIDCBrowser](https://github.com/ImagingDataCommons/SlicerIDCBrowser) is a 3D Slicer extension for exploring and downloading over 85TB of freely available image data from [NCI Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/explore). This project is about updating and improving this extension to make it more usable and easier to maintain. + + + +## Objective + + + +1. Plan refactoring of the extension +2. Improve features: add download progress reporting, simplify download by automatically detecting whether identifier specified by the user is collection/patient/study/series, support automatic loading of the images. +3. A discussion with Ben Zwick resurrected earlier ideas about embedding [IDC portal explore page](https://portal.imaging.datacommons.cancer.gov/explore/) in Slicer directly, and injecting functionality to trigger Slicer open directly from that embedded page. Will prioritize development of that feature, see https://github.com/ImagingDataCommons/SlicerIDCBrowser/issues/52. + + + +## Approach and Plan + + + + +1. Discuss with Kyle overall organization of the extension, estimate effort. +2. Meet with the users to collect feedback. +3. Prioritize development and start working on implementation. + +## Progress and Next Steps + + + +1. Andrey and Kyle discussed plans for improving the extension and defined priorities for the immediate development in [https://github.com/ImagingDataCommons/SlicerIDCBrowser/milestone/1](https://github.com/ImagingDataCommons/SlicerIDCBrowser/milestone/1). +3. Kyle contributed improvements to reduce delays to the startup of the extension in [https://github.com/ImagingDataCommons/SlicerIDCBrowser/pull/51](https://github.com/ImagingDataCommons/SlicerIDCBrowser/pull/51). + +# Illustrations + + + + + + + +(video from [https://www.youtube.com/watch?v=m_jfSTWIYvc](https://www.youtube.com/watch?v=m_jfSTWIYvc)) + +# Background and References + + + + +- [https://portal.imaging.datacommons.cancer.gov/](https://portal.imaging.datacommons.cancer.gov/) + diff --git a/PW43_2025_Montreal/Projects/InterpretableDeepLearningForTheDetectionAndClassificationOfImpactedCaninesAndSeverityOfRootResorption/README.md b/PW43_2025_Montreal/Projects/InterpretableDeepLearningForTheDetectionAndClassificationOfImpactedCaninesAndSeverityOfRootResorption/README.md new file mode 100644 index 000000000..8aeb39b56 --- /dev/null +++ b/PW43_2025_Montreal/Projects/InterpretableDeepLearningForTheDetectionAndClassificationOfImpactedCaninesAndSeverityOfRootResorption/README.md @@ -0,0 +1,104 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: 'Interpretable Deep Learning for the Detection and Classification of Impacted Canines + and severity of root resorption ' +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Enzo Tulissi + affiliation: University of Michigan + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +- name: Jonas Bianchi + affiliation: University of Pacific + country: USA + +--- + +# Project Description + + + + +The **CLIC** module uses a Mask R-CNN segmentation model to locate and classify impacted canines in CBCT scans. +This project extends CLIC by developing a supervised model to automatically classify the severity of root resorption in teeth adjacent to impacted canines. + + +## Objective + + + +1. **Segment impacted canines** using the existing CLIC module. +2. **Extract adjacent teeth** volumes for analysis. +3. **Assemble an annotated dataset** of adjacent teeth with clinician‐provided resorption severity scores. +4. **Train a classification model** to predict root resorption severity from segmented volumes. +5. **Integrate and visualize** segmentation plus severity scores within 3D Slicer. + + + +## Approach and Plan + + + + +1. Run CLIC across the CBCT dataset to isolate impacted canines. +2. Combine CLIC masks to extract volumes of adjacent teeth. +3. Annotate each extracted tooth volume with a severity label (mild, moderate, severe). +4. Extract geometric and morphological feature sets from each volume. +5. Train a classifier on these features. +6. Extend CLIC or create a new Slicer module for real‐time severity classification. +7. Validate model performance (accuracy, recall, precision) and integrate into the 3D visualization workflow. + + + + +## Progress and Next Steps + + + + +**Completed:** +- CLIC module validated. +- Prototype pipeline for adjacent tooth extraction established. + +**Next Steps:** +- Clinician annotation of extracted tooth volumes. +- Feature engineering and model training. +- UI design for severity score display in Slicer. +- Performance evaluation and final documentation. + + + + +# Illustrations + + + + +![image](https://github.com/user-attachments/assets/6f588a90-3e77-440c-b5c9-9ef0c9550150) + +*Figure 1: Impacted canine segmentation results from CLIC* + + + +# Background and References + + + + +_No response_ + diff --git a/PW43_2025_Montreal/Projects/MassvisionExtensionNewFeaturesForMsiAnalysis/README.md b/PW43_2025_Montreal/Projects/MassvisionExtensionNewFeaturesForMsiAnalysis/README.md new file mode 100644 index 000000000..97be1d429 --- /dev/null +++ b/PW43_2025_Montreal/Projects/MassvisionExtensionNewFeaturesForMsiAnalysis/README.md @@ -0,0 +1,87 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: 'MassVision extension: new features for MSI analysis' +category: Quantification and Computation + +key_investigators: + +- name: Amoon Jamzad + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + + +MassVision is an extension in 3D Slicer for end-to-end AI-driven analysis of Mass Spectrometry Imaging (MSI) data. The current functionalities include data exploration via various targeted and non-targeted visualization, co-localization to spatial labels (histopathology annotations), dataset curation with spatial- and spectral-guidance, multi-slide dataset merge via feature alignment, denoising via spatial aggregation, AI model training and validation, and whole-slide AI deployment. + + + +## Objective + + + + +1. Add new functionalities to the to the extension +2. Optimize the current code and UI +3. Update the documentation + + + +## Approach and Plan + + + +1. Add statistical analysis tab + - Data distribution + - Boxplot + - ANOVA + - t-test + - Volcano plot + - Interactive table/plot + + +## Progress and Next Steps + + + + +1. MassVision version 1.0 is up and running +2. The statistical analysis tab has been implemented successfully + + + + +# Illustrations + + + + +

+ logo +

+ + + + +# Background and References + + + + +MassVision repo [https://github.com/jamzad/SlicerMassVision](https://github.com/jamzad/SlicerMassVision) + +MassVision manual [https://slicermassvision.readthedocs.io/](https://slicermassvision.readthedocs.io/)] + diff --git a/PW43_2025_Montreal/Projects/MultidimensionalExplorerGeneralization/README.md b/PW43_2025_Montreal/Projects/MultidimensionalExplorerGeneralization/README.md new file mode 100644 index 000000000..1598c1cdb --- /dev/null +++ b/PW43_2025_Montreal/Projects/MultidimensionalExplorerGeneralization/README.md @@ -0,0 +1,91 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Multidimensional explorer generalization +category: Cloud / Web + +key_investigators: + +- name: Steve Pieper + affiliation: Isomics + country: USA + +--- + +# Project Description + + + +Several projects involve looking at clinical and imaging data for large collections, resulting in a multidimensional data space that can be difficult to quickly comprehend. + +This project explores the use of eCharts with Slicer to make interactive visualizations of this kind of data. + +Although we have some hand-coded examples for specific projects, it could be of interest to create a more generic tool using the Slicer dicomDatabase, tables, markups, and other data sources to generate visualizations. + + +## Objective + + + +During Project Week I would like to brainstorm with the community about various use cases and requirements. From this I would like to determine what kind of core features would support these and how they could be bundled and exposed in a Slicer extension. + +Since the current "UI" for creating these visualizations is a text editor the ability to explore the data in this way is limited to people with both Python and JavaScript expertise. I would like to see if there are ways to build infrastructure to make it easier to apply these techniques with less programming required. It's possible that LLM tools like Gemini or DeepSeek can already perform this task. + +## Approach and Plan + + + +1. Identify use cases and discuss: + * [TMJ Dashboard]([url](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/ExtractionOfOrofacialPainComorbiditiesFromClinicalNotesUsingLargeLanguageModels/)) + * [Data from IDC]([url](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/UsingIdcAndAiForHypothesesExplorationInTheNlstCohort/)) + * Others TBD at project week +2. See how easy it would be to adapt the code for these use case and also see how hard it would be for people to create their own interactive charts for their data. +3. Try auto-generating these charts using LLM technology + + +## Progress and Next Steps + +1. Tried DeepSeek and Gemini to create charts and it was not helpful, so I just did it myself by looking at examples from IDC documentation and previous experiments. +2. Made a [sample script](https://gist.github.com/pieper/04b72eb1a60192a33c207fb73d9f7170) for echart parallel coordinates to demo IDC data exploration in a qSlicerWebWidget. + * The demo uses the idc_index package to summarize a high level view of IDC data + * Gist is posted as a reference + * Determined a limit to the size of data that can be displayed in a qSlicerWebWidget (around 1000) but 60K can be displayed when loaded in chrome. Workaround TBD. + * Chrome demo with full dataset hosted here: [https://pieper.github.io/sites/idc-chart/](https://pieper.github.io/sites/idc-chart/) +3. Talked with TMJ and IDC teams as planned and got more ideas for applications of this technique +4. Talked with MassVision team (Amoon) and shared mutual enthusiasm for prettier and more interactive plots and plan to continue collaboration +5. Learned that there's a [pyecharts](https://pyecharts.org/#/) package that may be easier to use than current javascript wrapping (but may also make debugging harder) + +![](https://github.com/user-attachments/assets/9a169d58-5424-4901-95d3-b3e1f569a324) + + +# Illustrations + + + + +An example parallel coordinates visualization: [https://storage.googleapis.com/sdp-lnq-site/site/index.html](https://storage.googleapis.com/sdp-lnq-site/site/index.html). +In this example, Slicer is used to pre-render thumbnails for interactive exploration, and clicking on the link takes you to an IDC-hosted viewer to see the full dataset. This allow the full interactive chart to be hosted in a google storage bucket. + +Previous experiments with parallel coordinates charts leveraged the qSlicerWebWidget to support bidirectional communication between the JavaScript-based parallel coordinates chart and the Python-based Slicer visualization. + +* In this example, measures of tissue microstructure for dozens of whitematter tracts for hundreds of patients can be interactively compared. Screenshots of each structure are shown as you mouse over the chart and clicking on a particular one loads the corresponding 3D file into Slicer. + + + +* In this example a multiparametric MRI scan of a patient with a brain tumor is shown in Slicer and statistics from different volumes are shown in the parallel coordinates chart. The video shows the two-way interaction of the chart with the volume visualization. For example, when regions are selected in Slicer using the Segment Editor, the signal intensities are plotted on the chart in colors corresponding to the segment color. Alternatively, the user can select combinations like high fractional anisotropy and low mean diffusivity and Slicer displays all voxels meeting that criterion as an overlay on all the images. The goal is for clinical researchers to be able to explore the 3D anatomical distribution of tissues with different signal properties in and around the tumor. + + + + + +# Background and References + + + + +_No response_ diff --git a/PW43_2025_Montreal/Projects/OpenMeshedAnatomyViewerUsingTrameSlicer/README.md b/PW43_2025_Montreal/Projects/OpenMeshedAnatomyViewerUsingTrameSlicer/README.md new file mode 100644 index 000000000..9ddf8f938 --- /dev/null +++ b/PW43_2025_Montreal/Projects/OpenMeshedAnatomyViewerUsingTrameSlicer/README.md @@ -0,0 +1,92 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Open Meshed Anatomy Viewer using Trame Slicer +category: Cloud / Web + +key_investigators: + +- name: Andy Huynh + affiliation: University of Western Australia + +- name: Benjamin Zwick + affiliation: University of Western Australia + +- name: Karol Miller + affiliation: University of Western Australia + +- name: Adam Wittek + affiliation: University of Western Australia + +--- + +# Project Description + + + + +The Open Meshed Anatomy Viewer is a web-app for researchers to download and visualize atlas-based meshes/grids for scientific computing based on Trame Slicer. [trame-slicer](https://github.com/KitwareMedical/trame-slicer) is a Python library bringing[ 3DSlicer](https://github.com/Slicer/Slicer/) components in trame as a composable library. It uses 3D Slicer's python wrapping and adds a thin wrapping to make it available with the[ trame framework](https://github.com/Kitware/trame/). + +- The meshes/grids are created from [Open Anatomy Project's SPL/NAC Brain Atlas](https://www.openanatomy.org/atlas-pages/atlas-spl-nac-brain.html). +- The [Trame Slicer](https://github.com/KitwareMedical/trame-slicer) library provides a way for 3D Slicer components to be used in a web-app. + + + +## Objective + + + + +1. Objective A. Develop a Trame Slicer based web application to view meshes/grid with images. +2. Objective B. Download meshes/grids. +3. Objective C. Host web-app on cloud server. +4. Objective D. Upload simulation results. +5. Objective E. Extract statistics based on anatomical labels. +6. Objective F. Morph meshes/grids to target MRI. + + + +## Approach and Plan + + + + +1. Create a blank Trame Slicer repo project. +2. Host project on cloud server (e.g. using docker, cap rover etc.). +3. Load images and atlas. +4. Test server-side rendering performance. (related [Github Issue](https://github.com/KitwareMedical/trame-slicer/issues/11)). +5. Add features. + + + +## Progress and Next Steps + + + + +1. We have developed detailed computational meshes using the SPL Brain Atlas. +2. At the [39th Project Week in Montreal (2023)](https://projectweek.na-mic.org/PW39_2023_Montreal/) we developed a Trame-based web application that can be used to visualize the Open Meshed Anatomy in 3D Slicer + + + +# Illustrations + + + + +![Image](https://github.com/user-attachments/assets/4fe0811b-3d21-4a8c-b05f-7a8ab1b4eb47) + + + +# Background and References + + + + +1. [Open Meshed Anatomy Project Description - NA-MIC Project Week 39 in Montreal (2023)](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/OpenMeshedAnatomy/) +2. [KitwareMedical/trame-slicer: Bring the capabilities of 3D Slicer to the web with modern UI using the trame framework!](https://github.com/KitwareMedical/trame-slicer) + diff --git a/PW43_2025_Montreal/Projects/PhysicsInformedNeuralNetworksToImproveRegistrationAccuracyForImageGuidedNeurosurgery/README.md b/PW43_2025_Montreal/Projects/PhysicsInformedNeuralNetworksToImproveRegistrationAccuracyForImageGuidedNeurosurgery/README.md new file mode 100644 index 000000000..6cf4a490e --- /dev/null +++ b/PW43_2025_Montreal/Projects/PhysicsInformedNeuralNetworksToImproveRegistrationAccuracyForImageGuidedNeurosurgery/README.md @@ -0,0 +1,122 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Physics-informed neural networks to improve registration accuracy for image-guided + neurosurgery +category: Quantification and Computation + +key_investigators: + +- name: Benjamin Zwick + affiliation: University of Western Australia + +- name: Mostafa Jamshidian + affiliation: University of Western Australia + +- name: Sajjad Arzemanzadeh + affiliation: University of Western Australia + +- name: Karol Miller + affiliation: University of Western Australia + +- name: Adam Wittek + affiliation: University of Western Australia + +- name: Paul Parizel + affiliation: University of Western Australia + +- name: Ron Kikinis + affiliation: Harvard Medical School + country: USA + +- name: Michael Bynevelt + affiliation: Department of Health WA + country: Australia + +- name: Alexandra Golby + affiliation: Harvard Medical School + country: USA + +--- + +# Project Description + + + + +Brain tumour surgery relies on preoperative images for planning and guidance. But during surgery the brain shifts, reducing navigation accuracy. This project will develop an artificial intelligence-based technique that combines biomechanics and machine learning in a physics-informed neural network to track brain shift during surgery. This will help surgeons remove tumours more precisely while preserving healthy tissue, reducing adverse effects and follow-up surgeries for better patient outcomes. + +This project aims to develop a biomechanics-guided physics-informed neural network (PINN) to correct preoperative images for intraoperative brain shift, enhancing the precision, accuracy, and efficiency of neuronavigation in brain tumour surgery. + +Surgical outcomes depend on precise navigation, but once surgery begins, the brain deforms due to cerebrospinal fluid drainage, gravity, and resection. This brain shift renders preoperative images inaccurate, compromising localisation of tumour boundaries and identification of critical neural structures. + +Existing solutions are limited. Intraoperative imaging lacks the resolution of preoperative scans and can be costly, slow, or unavailable. Purely physics-based methods that predict brain shift using patient-specific biomechanical models require time-consuming geometry reconstruction and mechanical property description of brain tissues. We propose a novel approach that integrates physics-based modelling with data-driven machine learning. Our specific aims are to: +1. develop and train a PINN to correct preoperative images for intraoperative brain shift +2. integrate the PINN into a non-rigid image registration framework using open-source software; and +3. evaluate the PINN’s performance against existing techniques. + +The PINN will incorporate biomechanics-based constraints to ensure deformation fields conform to brain tissue mechanics. Training will use retrospective data from Harvard Medical School. Performance will be assessed by comparing predicted tumour and ventricle contours with actual positions identified on intraoperative magnetic resonance images. + +By combining the strengths of physics-based and data-driven methods, the proposed PINN-based approach has the potential to provide intraoperative images with accuracy and resolution comparable to preoperative images. This would enable more precise tumour localisation, reducing incomplete resections and preserving healthy tissue, and ultimately improving patient outcomes. + +Ultimately, we aim to implement these methods into 3D Slicer extension. + + + + +## Objective + + + + +1. Objective A. Develop and validate a novel PINN-based non-rigid image registration technique that accurately tracks large brain deformations during surgery in real time. +2. Objective B. Perform validation using a dataset of clinical cases from the Advanced Multimodality Image Guided Operating (AMIGO) suite, demonstrating the technique's applicability in a clinical setting. +3. Objective C. Implement real-time brain shift correction by combining preoperative MRI images with intraoperative sparse imaging data, providing an adaptable system for various surgical scenarios. +4. Objective D. Demonstrate the scalability of the technique by applying it to a broader patient cohort and comparing its performance to existing rigid and non-rigid registration methods. +5. Objective E. Disseminate the developed algorithms via the widely used 3D Slicer medical imaging platform (https://www.slicer.org/), ensuring its availability for clinical and research purposes. +6. Objective F. Publish high-impact journal articles in leading medical and engineering journals, such as Medical Image Analysis and NeuroImage. +7. Objective G. Secure additional research funding through ARC Linkage and NHMRC applications, leveraging the project’s outcomes to support future clinical trials and the expansion of the research to other imaging modalities, such as intraoperative ultrasound. + + + +## Approach and Plan + + + + +1. Discuss current state-of-the-art registration techniques in 3D Slicer. +2. Develop a workflow for integrating PINNs-based registration into 3D Slicer. + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +### SlicerCBM +We have previously developed the SlicerCBM “Computational Biophysics for Medicine in 3D Slicer” extension for biomechanics-based image registration. [SlicerCBM](https://github.com/SlicerCBM/SlicerCBM) is an extension for[ 3D Slicer](http://slicer.org/) that provides tools for creating and solving computational models of biophysical systems and processes with a focus on clinical and biomedical applications. Features include grid generation, assignment of material properties and boundary conditions, and solvers for biomechanical modeling and biomechanics-based non-rigid image registration. + diff --git a/PW43_2025_Montreal/Projects/RealTimePointCloudStreamingTo3DSlicerViaOpenigtlink/README.md b/PW43_2025_Montreal/Projects/RealTimePointCloudStreamingTo3DSlicerViaOpenigtlink/README.md new file mode 100644 index 000000000..7f58d3f7e --- /dev/null +++ b/PW43_2025_Montreal/Projects/RealTimePointCloudStreamingTo3DSlicerViaOpenigtlink/README.md @@ -0,0 +1,86 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Real-Time Point Cloud Streaming to 3D Slicer via OpenIGTLink +category: Infrastructure + +key_investigators: + +- name: Vitor Azevedo Padovani + affiliation: TELUQ University + country: Canada + +- name: Houssem Gueziri + affiliation: TELUQ University + country: Canada + + +--- + +# Project Description + + + + +This project explores the real-time streaming of dynamic 3D point clouds or surfaces to 3D Slicer using OpenIGTLink and OpenIGTLinkIO. The goal is to generate synthetic time-varying surface data (e.g., a deforming mesh or animated point cloud), package it as VTK PolyData, and transmit it to Slicer for visualization and possible downstream processing. This will also allow benchmarking the performance of OpenIGTLinkIO versus the traditional OpenIGTLink API for sending non-transform data types like surfaces. + + + +## Objective + + + + +1. **Objective A.** Implement a function to generate time-varying surface data (e.g., a sinusoidal wave or morphing geometry). +2. **Objective B.** Package this data into vtkPolyData and stream it to Slicer in real time using OpenIGTLinkIO. +3. **Objective C.** Test the same setup with the original OpenIGTLink library and compare performance. +4. **Objective D.** Evaluate the responsiveness, latency, and frame rates when visualizing dynamic surface data in Slicer. + + + + +## Approach and Plan + + + + +1. Create a C++ module that synthesizes a point cloud or surface that varies over time (e.g., a wave surface or a breathing-like deformation). +2. Use vtkPolyData to store the generated geometry at each time step. +3. Integrate OpenIGTLinkIO to send this PolyData to a 3D Slicer scene using an appropriate device type (e.g., Mesh or PolyData device). +4. Validate the rendering in Slicer, ensuring correct reception and visualization. +5. Repeat the above using the standard OpenIGTLink library and measure latency, throughput, and CPU usage. +6. Record performance differences and summarize findings. + + + +## Progress and Next Steps + + + + +- Local development environment with OpenIGTLink, VTK, and 3D Slicer already set up +- Configurable plane dimensions and wave parameters +- Pre-calculated wave animation frames for smooth playback +- Point cloud as VTK Poly Data through OpenIGTLink (900 points at ~30 FPS) +- Point cloud as VTK Points through OpenIGTLink (900 points at ~20 FPS) +- Point cloud as OpenIGTLink Image (100.000 points at ~60 FPS) + + +# Illustrations + + +![PolyDataToSlicer](https://github.com/user-attachments/assets/bdc2cb51-08c4-467d-89a6-844b46e17796) +![PointsToSlicer](https://github.com/user-attachments/assets/23d49396-a136-4f65-8755-10e06c75bc5f) +![PointsAsImageToSlicer](https://github.com/user-attachments/assets/7c9dc5df-60ff-4137-bf7f-fe01ac8d2460) +![PointsAsImageUnpackedToSlicer](https://github.com/user-attachments/assets/3bd53216-38c6-451c-b534-89835d64fdde) + + +# Background and References + + + +[https://github.com/Vitor-Padovani/surfaceStreamer.git](https://github.com/Vitor-Padovani/surfaceStreamer.git) diff --git a/PW43_2025_Montreal/Projects/SegmentAwareCarvingOfVolumes/README.md b/PW43_2025_Montreal/Projects/SegmentAwareCarvingOfVolumes/README.md new file mode 100644 index 000000000..91fe927a2 --- /dev/null +++ b/PW43_2025_Montreal/Projects/SegmentAwareCarvingOfVolumes/README.md @@ -0,0 +1,96 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Segment-aware carving of volumes +category: VR/AR and Rendering + +key_investigators: + +- name: Andrey Titov + affiliation: ÉTS + country: Canada + +- name: Simon Drouin + affiliation: ÉTS + country: Canada + +- name: Liam O'Connor + affiliation: Concordia University + country: Canada + +--- + +# Project Description + + + + +The goal of this project is to implement segment-aware carving mechanism, that will make it possible to clip specific user-defined segments when rendering a medical volume. This will allow users to create very customized visualizations tailored for specific needs, like anatomy learning or preoperative planning. Eventually, the goal is to add a VR interactions, as demonstrated in this video: + + + + + + +## Objective + + + + +1. Implement a segment-aware clipping occlusion management technique, which may be useful for anatomy learning or preoperative planning. + + + +## Approach and Plan + + + + +1. Create a a basic architecture using the currently existing OpenGL that allows chaining multiple compute shaders and then to visualize the computed image +2. Implement segment-awake clipping visualization using the computer shader chaining pipeline + + + + +## Progress and Next Steps + + + + +1. This project has already been implemented in Unity, and the goal of this project is to port it to 3D Slicer. However, the Unity implementation relies a lot on compute shaders. +2. An approach to fill a volume in an OpenGL a compute shader has been tested, and this result can then be displayed by writing the output to an existing multi-component volume. However, to access the texture ID of this multi-component volume, a loadable C++ is required, which has already been made. +3. The different segements from the volume are rendering using volume rendering and the texels take in the proper color based on the segments' labels. +4. The editor makes it possible to add a 'clipping sphere' to the scene. This clipping sphere is created using a point markup and a radius value can be set in the editor. The clipping sphere is then used in a compute shader to clip into the different segments within the volume. If the sphere clips into a voxel it sets the intensity value of said voxel to 0. +5. The clipping sphere uses a 'mask' to only clip certain segements in the volume. Whether the the segment will be clipped depends on the segment visibility value in the Segment Editor, which can be changed in real time. +6. The module was published online and is accessible in the Extension Manager: + - [https://github.com/andrey-titov/SlicerAnatomyCarve](https://github.com/andrey-titov/SlicerAnatomyCarve) + +Future work: +1. The rendering needs to be polished to reduce the antialiasing on the segments and make it look better. +2. Multiple clipping spheres should be possible to be created. This would then enable the ability to have each sphere clip different segments within the volume. + + + +# Illustrations + + + +![Screenshot 2025-06-26 153232](https://github.com/user-attachments/assets/d24714da-98f3-4215-a942-40092ff9d8e7) + + + + +# Background and References + + + + +Article describing the occlusion management technique was submitted to the ISMAR 2025 conference. + +Relevant technique for unsegmented data: A. Joshi, D. Scheinost, K. Vives, D. Spencer, L. Staib, and X. Papademetris, “Novel interaction techniques for neurosurgical planning and stereotactic navigation,” IEEE Trans. Vis. Comput. Graph., vol. 14, no. 6, pp. 1587–1594, Nov. 2008, doi: 10.1109/TVCG.2008.150. + diff --git a/PW43_2025_Montreal/Projects/SegmentingAndQuantifyingFatHerniationAndMuscleConformationalChangeInFracturedOrbits/README.md b/PW43_2025_Montreal/Projects/SegmentingAndQuantifyingFatHerniationAndMuscleConformationalChangeInFracturedOrbits/README.md new file mode 100644 index 000000000..07ae4a201 --- /dev/null +++ b/PW43_2025_Montreal/Projects/SegmentingAndQuantifyingFatHerniationAndMuscleConformationalChangeInFracturedOrbits/README.md @@ -0,0 +1,92 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: 'Segmenting and quantifying fat herniation and muscle conformational change in fractured + orbits ' +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Chi Zhang + affiliation: Texas A&M College of Dentistry +- name: Caelan Ducommun + affiliation: Texas A&M College of Dentistry +- name: Gwen Tran + affiliation: Texas A&M College of Dentistry +- name: Andrew Read-Fuller + affiliation: Texas A&M College of Dentistry +--- + +# Project Description + +Segmentation of orbital soft tissue and bones are difficult because the structures are small and thin and boundaries are not well defined in medical CT scans. Fractured orbits introduced additional challenges, including cracked and isolated bones, muscle conformational changes (e.g., shape and size), fat herniation into adjacent sinuses, and hematoma. Detecting and delineating these conditions are crucial for surgical decision making and planning. + +Manual segmentation of orbit is laborious and technical. The available deep learning tools are scarce. TotalSegmentator recently included a model for segmenting extraocular muscles but it might only be based on about twenty manual segmentations with no peer-reviewed publications. Boundaries between some muscles (e.g., superiour rectus & levator palpebrae) cannot be validated due to CT image qualities. A company from Finland has a model for segmenting orbital tissue but they did not incorporate fractured conditions. Consequently, fat is indistinguishable from blood in herniation cases. + +-Segmentation of orbital fat (herniation into maxillary sinus) vs. blood. A Finnish commercial software include blood as fat segmentation, thus failed to compute herniation.
+![](https://github.com/user-attachments/assets/444d7704-1965-41cb-a42d-621dc1fa88d6) +![](https://github.com/user-attachments/assets/e72d0d66-a01c-40da-a5cd-dd4b4ad58212)
+![](https://github.com/user-attachments/assets/e65d9afd-587a-427c-a414-bc0a0243a6b2) +![](https://github.com/user-attachments/assets/c54ffdc2-004d-46c0-a94a-6c6e8dcd37b5)
+ +
+-Conformational change in inferior rectus (segmented by TotalSegmentator):
+![](https://github.com/user-attachments/assets/0d8e2ae1-f45c-4a45-82bf-9b4a9a9505aa) +![](https://github.com/user-attachments/assets/658c39f1-91cc-486f-a466-0c8eebb81339) + + +## Objective +1. Create repeatable semi-automatic approaches for accurately segmenting orbital tissue, including thin bone & soft tissue, especially in fractured conditions +2. Creating repeatable metrics based on segmentation results to detect and quantify tissue changes to aid decision making +3. Stretching goal: validate TotalSegmentaor results and initiate deep learning segmentation development. Detecting and segmentting fractures. + + + +## Approach and Plan + +1. Use TotalSegmentator as a start point to correct its results, then add fat tissue with a focus of differentiating herniated fat vs. blood (Grow-from-Seeds appear to perform well). How to consistently delineate anterior boundary of orbital fat. +2. Focus on detecting and quantifying fat herniation and inferior rectus muscle (also medial rectus) conformational changes because they are most frequently used in decision making. +3. Fat herniation: register the contralateral side to the fractured side to quantify herniation using Hausdorf distance, distance map, etc. +4. Inferior rectus m.: perhaps connecting centroids of each slice to draw its basic shape and use cross section area to quantify sizes. +5. Consulting people at PW to prepare for deep learning segmentation model training for detection and segmentation, including how to efficiently creating training dataset. + + + + +## Progress and Next Steps + +1. Testing nnInitiatve to prepare manual segmentation and potential traning dataset. Continue develop a repeatable workflow for orbital fracture cases.
+![Screenshot from 2025-06-26 22-57-11](https://github.com/user-attachments/assets/df1d8512-6fdb-4090-895e-2fdcd2c509ef) + +![Screenshot from 2025-06-26 23-41-36](https://github.com/user-attachments/assets/26bc4612-22cf-4400-a99e-816356722af6) +![Screenshot from 2025-06-26 23-42-56](https://github.com/user-attachments/assets/cf9dc043-0c9a-4323-87ee-6b5db2fd5a0f) + + +3. Using the centroid of each slice of inferior rectus to create a curve (subsampled to 20 points). The shape of the curve, such as maximum curvature, might be able to detect muscle conformational change related to surgical decision, such as simple logistic regression.
+Unfractured side
+![Screenshot from 2025-06-27 01-00-41](https://github.com/user-attachments/assets/d4046e22-d9de-4090-a159-0e37105db479) +![Screenshot from 2025-06-27 01-02-50](https://github.com/user-attachments/assets/9400ace4-b85c-469d-a0ec-fb72d93778c4) + +4. Still difficult to quantify fat herniation, though nnInteractive and Grow from Seeds can capture it well. The reason is because the anterior boundary of the fat tissue is arbitrarily delineated and asymmetry of orbits. + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW43_2025_Montreal/Projects/SimulateOrbitSurgeryUsingSlicersofa/README.md b/PW43_2025_Montreal/Projects/SimulateOrbitSurgeryUsingSlicersofa/README.md new file mode 100644 index 000000000..938207eeb --- /dev/null +++ b/PW43_2025_Montreal/Projects/SimulateOrbitSurgeryUsingSlicersofa/README.md @@ -0,0 +1,128 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Simulate orbit surgery using SlicerSOFA +category: VR/AR and Rendering + +key_investigators: + +- name: Chi Zhang + affiliation: Texas A&M College of Dentistry + country: US + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +- name: Steve Pieper + affiliation: Isomics + country: US + +- name: Paul Baksic + affiliation: INRIA + country: France + +- name: Andrew Read-Fuller + affiliation: Texas A&M College of Dentistry + +--- + +# Project Description + + + + +Simulating orbital fracture repair process using SlicerSOFA. + +

+![](https://github.com/user-attachments/assets/5c46f298-f059-4c4c-8114-4f21906f9dd2)

+ +Surgical Guidance using MatrixOrbital preformed plates from Johnson&Johnson DePuy Synthesis: [https://www.jnjmedtech.com/en-US/product/matrixorbital-preformed-orbital-plates](https://www.jnjmedtech.com/en-US/product/matrixorbital-preformed-orbital-plates) + + + +## Objective + + + + +1. Learn programing using SlicerSofa. Deform a simple orbital tissue model. +2. Load and deform multiple models and create a simple simulation of lifting orbital tissue using a scoope-like tool. +3. Stretching goal: create a volumetric model of the meshed plate and bend it using SlicerSOFA + + + +## Approach and Plan + + + +Currently, I was able to do a simple simulation in SOFA. Models are prepared in Gmsh. My goal is to transfer all of these processes, including model preparation, in Slicer and SlicerSOFA. +1. Segmentation using TotalSegmentator and DentalSegmentator. Orbital fat tissue and maxillary sinus also need to be added to the model. + +

+![](https://github.com/user-attachments/assets/8cf719a4-304b-4ac8-a010-23bd5f6b91b8)
+![](https://github.com/user-attachments/assets/90be9429-97d6-451e-b9c7-5f9a85c7d32c) +

+ +2. Volumetric model preparation using Gmsh. Plate geometry is too complicated and can only be created in Gmsh but not in SegmentMesher. + +

+![](https://github.com/user-attachments/assets/334e7775-2bdf-437e-862e-06465ebb1f42)
+![](https://github.com/user-attachments/assets/c950c1df-d0be-4a2d-a1e6-c9d80ed9c50f) +

+ +3. Did a simple orbital tissue retraction in SOFA. The retractor model is created using Fiducial to Model module in Slicer. + +

+![](https://github.com/user-attachments/assets/15d09c71-796f-4a31-9087-afc68fade26d)
+![](https://github.com/user-attachments/assets/f8ca83fe-7308-4d63-8e35-405ba2aa5f25) +![](https://github.com/user-attachments/assets/096e04fa-53be-4f08-b786-4e4b5ffcc7bf) +

+ + + +## Progress and Next Steps + +1. General plan is to first learn and design simulation and SOFA, followed by doing simulation using basic SlicerSOFA function in a Slicer scene by simple scripting, and then learn simulation at the SlicerSofa module level. +2. Move & rotate objects using keybaord. Simplify simulation.
+ + +[Watch the video](https://www.youtube.com/watch?v=QgoJxyJ06to): + + + + +3. Using SlicerHeart Baffle Planner to create a 2D model roughly capture the shape of the plate and use Shell plugin for simulate plate bending:
+![Screenshot from 2025-06-27 02-43-11](https://github.com/user-attachments/assets/fd6b900b-9900-4c83-b0d7-1adf572208e1)
+ +[![Watch the video](https://youtu.be/QgoJxyJ06to)](https://youtu.be/iLCJW_BHsg8)
+ +4. Testing simple simulation via import Sofa in Slicer Scene
+[![Watch the video]([https://youtu.be/3xOu1HUu0Uc)](https://youtu.be/3xOu1HUu0Uc) + +# Illustrations + + + +![](https://github.com/user-attachments/assets/5c46f298-f059-4c4c-8114-4f21906f9dd2) + +Surgical Guidance using MatrixOrbital preformed plates from DePuy (see below for reference). + + +![](https://github.com/user-attachments/assets/66a054ca-7751-4fe7-8c82-94ab1da61509) + +Transconjunctival approach for retracting orbital tissue. From: [https://surgeryreference.aofoundation.org/cmf/pediatric-trauma/midface/orbital-floor/reconstruction](https://surgeryreference.aofoundation.org/cmf/pediatric-trauma/midface/orbital-floor/reconstruction) + + + + +# Background and References + + + + +Surgical Guidance using MatrixOrbital preformed plates from DePuy Synthesis: [https://www.jnjmedtech.com/en-US/product/matrixorbital-preformed-orbital-plates](https://www.jnjmedtech.com/en-US/product/matrixorbital-preformed-orbital-plates) diff --git a/PW43_2025_Montreal/Projects/SlicerBuildInstructionUpdates/README.md b/PW43_2025_Montreal/Projects/SlicerBuildInstructionUpdates/README.md new file mode 100644 index 000000000..7a4e49c24 --- /dev/null +++ b/PW43_2025_Montreal/Projects/SlicerBuildInstructionUpdates/README.md @@ -0,0 +1,85 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Slicer Build Instruction Updates +category: Infrastructure + +key_investigators: + +- name: Hans Johnson + affiliation: University of Iowa + +- name: Cavan Riley + affiliation: University of Iowa -- Working from Iowa + +- name: Benjamin Zwick + affiliation: University of Western Australia + +- name: Slicer Core Developers + affiliation: Jean-Christophe Fillion-Robin / Others ?? + +--- + +# Project Description + +Update the documentation for Slicer development to describe what should +be expected. Provide expanded guidance for how to identify and work through +common issues that arise in different environments. + +## Improve Slicer build instructions +Building a custom version of Slicer has become increasingly complex. + - Identify how to install Qt 6 on an ARM-based Apple M4 computer. + - Identify how to install Qt 6 on a Ubuntu 24.04 Linux computer. + - Identify how to install Qt 6 on a Debian 12 Bookworm Linux computer. + +## Objective + +1. Improve documentation for new (not expert) developers to understand the environment necessary to build Slicer on Linux (Ubuntu 24.04), Mac (M4 new mac), Windows +2. To prepare Slicer for future versions of ITK, but configuring a stable build environment has been more challenging than expected (primarily regarding Qt 5 requirements). +3. Update BRAINSTools support in Slicer +4. Have Slicer build succesfully with CMake 4.0 +5. Update [SlicerBuildEnvironment](https://github.com/Slicer/SlicerBuildEnvironment) instructions to configure, build and package Slicer for Linux. + +Strech Goals +1. Stretch Goal -- Setup building a large number of extensions to facilitate preparation for ITKv6 C++17 (while maintaining backward compatibility). +2. Investigate updating TCLAP to build cleanly, perhaps update to the latest version of TCLAP. +3. Investigate the status of moving to Qt6. Provide support/testing environments for migration to Qt6. [https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/TransitionSlicerDefaultBuildFromQt5ToQt6/](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/TransitionSlicerDefaultBuildFromQt5ToQt6/) + +## Approach and Plan + +1. Have Cavan follow instructions naively, and update instructions as necessary to get a build environment that allows building of Slicer +2. Find outdated build instructions via Google searching and update/remove documentation. + +## Progress and Next Steps + +1. Downloaded Slicer source code, tried to build on mac M4 computer. +2. Updates to allow for building with CMake 4+ [https://github.com/Slicer/Slicer/pull/8491](https://github.com/Slicer/Slicer/pull/8491) + +``` bash +# Install older version of cmake with homebrew on mac +brew uninstall cmake +# Find and download the formula for the cmake version you wish to install +curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/1976f46fc84ea7716722a067c0dcffb072a38388/Formu +la/c/cmake.rb +brew install ./cmake.rb +``` + + + +# Background and References + + + +The current instructions 2025-06-17 do not work on Arm64-based Mac, primarily due to the inability to install Qt5 on an Arm64 based mac. + +[Base Instructions](https://slicer.readthedocs.io/en/latest/developer_guide/build_instructions/index.html) + diff --git a/PW43_2025_Montreal/Projects/SlicerSOFA/README.md b/PW43_2025_Montreal/Projects/SlicerSOFA/README.md new file mode 100644 index 000000000..69644cf99 --- /dev/null +++ b/PW43_2025_Montreal/Projects/SlicerSOFA/README.md @@ -0,0 +1,135 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: 'Slicer-SOFA: Next Steps' +category: Infrastructure + +key_investigators: + +- name: Rafael Palomar + affiliation: Oslo University Hospital and NTNU + country: Norway + +- name: Paul Baksic + affiliation: Inria + country: France + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware Inc. + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Sam Horvath + affiliation: Kitware Inc + country: USA + +- name: Naomi Catwell + affiliation: ÉTS + country: Canada + +- name: Chi Zhang + affiliation: Texas A&M School of Dentistry + country: USA + +--- + +# Project Description + + + + +The SlicerSOFA project has already been integrated as a Slicer extension, providing core functionality including python bindings and many examples. +We'd now like to take the next steps to determine default funcationality and determine how we want to enable applications that use the extension. In addition we will tackle some of the issues that need a fix, most notably, the MacOS extension packaging which to date is not available. + +## Objective + + + + +1. Objective A. Define what SOFA plugins should be enabled for the extension and update the packaged SOFA to the lastest possible version (SOFA is currently in v25.06, while SlicerSOFA still uses v24.06). + +2. Objective B. See if we can build a template for SlicerSOFA-based extensions that can provide custom C++ SOFA plugins. +This would allow the SOFA community to leverage Slicer's existing infrastructure for cross-platform testing and distribution. + +3. Objective C. Discuss/prototype parallel processing architectures to optimize overlap of simulation and rendering for best interactive performance. + +4. Objective D. Bug fixing ([#44](https://github.com/slicer/slicersofa/issues/44)) and MacOS package fixing + +5. Objective E. Discuss other topics of interest to potential SlicerSOFA users. + +## Approach and Plan + + + +1. Meet to review existing SlicerSOFA build configuration and options. + +2. Discuss and possibly prototype a C++ SOFA plugin in a SuperBuild extension that depends on SlicerSOFA + +3. Discuss various client/server and message passing options, such as an http-based protocol, RPyC, or others. + +4. Improve the SlicerSOFA (and possibly SOFA) CMake infrastructure to enable MacOS packaging. Review SlicerSOFA python infrastructure in connection with the SoftTissueSimulation and [#44](https://github.com/Slicer/SlicerSOFA/issues/44) + +6. Reach out to other Project Week attendess who express interest. + + +## Progress and Next Steps + + +### macOS Packaging for SlicerSOFA + +Significant progress has been made toward enabling macOS packaging for SlicerSOFA. Most required infrastructure changes have been implemented and are pending final testing. The following contributions have been made: + +* A PR was opened on SOFA to globally define `OUTPUT_DIRECTORY` for runtime, archive, and library targets: [sofa-framework/sofa#5558](https://github.com/sofa-framework/sofa/pull/5558). +* Another PR was opened to globally set `RELOCATION` paths for plugins, applications, and projects: [sofa-framework/sofa#5562](https://github.com/sofa-framework/sofa/pull/5562). +* A PR was submitted to Slicer to introduce CMake infrastructure for `rpath` manipulation of third-party libraries on macOS: [Slicer/Slicer#8516](https://github.com/Slicer/Slicer/pull/8516). +* A committed fix aligned the SOFA fork used in Slicer with recent infrastructure improvements: [Slicer/sofa@f698e29](https://github.com/Slicer/sofa/commit/f698e29e66e24c702e665c9fb80822731dd31407). +* A PR has been merged in the SlicerSOFA repository to fix macOS packaging: [Slicer/SlicerSOFA#48](https://github.com/Slicer/SlicerSOFA/pull/48). + +> These updates not only benefit SlicerSOFA but also lay a solid foundation for packaging other SuperBuild-based extensions that rely on complex third-party libraries. + +### RPyC Integration + +* A working prototype was developed using [RPyC](https://rpyc.readthedocs.io/en/latest/) to enable remote procedure calls to a SOFA server. See proof-of-concept implementation here: [bakpaul/TestScenes](https://github.com/bakpaul/TestScenes) repository. +* Ongoing discussions have explored how SlicerSOFA could best leverage RPyC for distributed processing, but no final design decision has been made. + +### Review of current SlicerSOFA build options + +* The SOFA team has been actively refactoring their CMake infrastructure to improve modularity and facilitate integration into external projects like SlicerSOFA. +* Once macOS packaging support is finalized, we plan to review and refine how SOFA is built and deployed within SlicerSOFA, potentially simplifying the configuration process. + +### SlicerSOFA documentation + +* Community feedback has highlighted the urgent need for better documentation for SlicerSOFA. +* During Project Week 43, we created a [Read the Docs](https://readthedocs.org/) site for SlicerSOFA and began populating it with initial content to support onboarding and usage. + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +- [SlicerSOFA GitHub Repository](https://github.com/Slicer/SlicerSOFA) +- [SOFA Framework](https://www.sofa-framework.org/) +- [3D Slicer](https://www.slicer.org/) +- [SlicerSOFA PW 42 Project](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/SlicerSofaIntegrationOfSofaWith3DSlicerForAdvancedMedicalSimulations/) +- [Slicer-SOFA PW 41 Project](https://projectweek.na-mic.org/PW41_2024_MIT/Projects/SlicerSofa/) +- [Slicer-SOFA PW 40 Project](https://projectweek.na-mic.org/PW40_2024_GranCanaria/Projects/SlicerSofaIntegration/) diff --git a/PW43_2025_Montreal/Projects/Slicercart/README.md b/PW43_2025_Montreal/Projects/Slicercart/README.md new file mode 100644 index 000000000..810a61b56 --- /dev/null +++ b/PW43_2025_Montreal/Projects/Slicercart/README.md @@ -0,0 +1,96 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: SlicerCART +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Maxime Bouthillier + affiliation: Polytechnique Montreal + country: Canada + +- name: Delphine Pilon + affiliation: Polytechnique Montreal + country: Canada + +- name: Kuan Yi wang + affiliation: Polytechnique Montreal + country: Canada + +- name: An Ni Wu + affiliation: Polytechnique Montreal + country: Canada + +- name: Julien Cohen-Adad + affiliation: Polytechnique Montreal + country: Canada + +- name: Laurent Létourneau-Guillon + affiliation: FARQ + country: Canada + +--- + +# Project Description + + + + +Building a custmizable module to facilitate manual annotation (segmentation, classification, quality-control) in large datasets. + + + +## Objective + + + + +Objective A. 3D Slicer Module that faciliate manual segmentation, classification and quality-control, with project-specific configurations + + + + +## Approach and Plan + + + + +1. Resolve current bugs to make the module working +2. Get feedback + + + + +## Progress and Next Steps + + + + +1. See Demo + + + + +# Illustrations + + + + +![Image](https://github.com/user-attachments/assets/a3bbf55f-3cb3-451a-b1c3-1ac5d37129f2) + + + +# Background and References + + + + +[https://github.com/neuropoly/slicer-manual-annotation](https://github.com/neuropoly/slicer-manual-annotation) + +[https://drive.google.com/drive/folders/1xVPfhznA60xt0YCgDhgdMWWbDP8-3LTK?usp=sharing](https://drive.google.com/drive/folders/1xVPfhznA60xt0YCgDhgdMWWbDP8-3LTK?usp=sharing) + diff --git a/PW43_2025_Montreal/Projects/SlicersofaSlicerrosIntegration/README.md b/PW43_2025_Montreal/Projects/SlicersofaSlicerrosIntegration/README.md new file mode 100644 index 000000000..b63c134ae --- /dev/null +++ b/PW43_2025_Montreal/Projects/SlicersofaSlicerrosIntegration/README.md @@ -0,0 +1,104 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: SlicerSOFA - SlicerROS Integration +category: IGT and Training + +key_investigators: + +- name: Eléonore Germond + affiliation: IMT Atlantique + country: France + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Paul Baksic + affiliation: Inria + country: France + +- name: Junichi Tokuda + affiliation: Brigham and Women's Hospital + country: USA + +- name: Anton Deguet + affiliation: Johns Hopkins + country: USA + +- name: Laura Connolly + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + + +SlicerSOFA and SlicerROS are two paradigm extensions integrating Slicer with external libraries for simulation and robotics. With this project we would like to explore the possibility to use Slicer as a means for briding SOFA and ROS with the medical-image computing capabilities of Slicer for medical robotics applications. + + + +## Objective + + + + +1. Objective A. Prototyping of a needle insertion example using SlicerSOFA with the Cosserat plugin and SlicerROS. This prototype will be used to test the feasibility of the integration and possible bottlenecks +2. Objective B. Document the installation process so it can be done easily. + + + +## Approach and Plan + + + + +1. Integration of the SOFA Cosserat plugin in SlicerSOFA. +2. Development of prototype application featuring deformation of models and propagation of deformation to medical images. The deformation will be driven by a Omni Phantom able to interact with a SOFA simulation + + + +## Progress and Next Steps + + + + +1. Wrote the scripts and put together a github repository ([https://github.com/eleonore2001/SlicerSOFA-SlicerROS](https://github.com/eleonore2001/SlicerSOFA-SlicerROS)) with the scripts / models / demos. + + + +# Illustrations + + + + + + + + In this video, the needle is represented by the fine yellow line. + + +# Background and References + + + + diff --git a/PW43_2025_Montreal/Projects/SlicerultrasoundDicomAnonymizerTesting/README.md b/PW43_2025_Montreal/Projects/SlicerultrasoundDicomAnonymizerTesting/README.md new file mode 100644 index 000000000..3c8d7d6a0 --- /dev/null +++ b/PW43_2025_Montreal/Projects/SlicerultrasoundDicomAnonymizerTesting/README.md @@ -0,0 +1,114 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: SlicerUltrasound DICOM Anonymizer +category: DICOM + +key_investigators: + +- name: Tina Kapur + affiliation: BWH + country: USA + +- name: Tamas Ungi + affiliation: Queens + country: Canada + +- name: Fahimeh Fooladgar + affiliation: UBC + country: Canada + +- name: Shreyas Puducheri + affiliation: BWH + country: USA + +- name: Matt Alves + affiliation: BWH + country: USA + +- name: Caroline Schissel + affiliation: Lahey + country: USA + +- name: Maha Kesibi + affiliation: Queens + country: Canada + +- name: David Dinh + affiliation: SlicerUltrasound Team + country: USA + +- name: Atin Malaviya + affiliation: SlicerUltrasound Team + country: USA + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA + + + +--- + +# Project Description + + + +SlicerUltrasound is a 3D Slicer extension that currently includes two modules: Annotate, which supports expert labeling of image findings, and Anonymize, which removes both metadata-embedded PHI and burned-in text from DICOM images. The Anonymize module, the focus of this project, has been used at Brigham and Women’s Hospital (BWH), Lahey Hospital, and Indiana Methodist to remove PHI from over 1,000 patient exams. The Anonymizer module allows users to import DICOM ultrasound images from a local network folder and apply probe-specific masking templates to remove burned-in identifiers. Users specify the transducer type—curvilinear or phased array—which determines the expected fan shape. By marking three or four points on the image, the module interpolates the imaging sector and masks any visual PHI outside this region while preserving diagnostically relevant content. This flexible, semi-automated approach supports consistent anonymization across large datasets while accommodating variations in ultrasound geometry. + + + +## Objective + + + + +Our team members are onsite as well as working virtually to fix [open issues](https://github.com/SlicerUltrasound/SlicerUltrasound/issues) in the software to make it more user friendly, and to test AI models for automated fan segmentation. + + + +## Approach and Plan + + + + +We will be working on the open issues list and happy to discuss it with any PW43 participants who are interested. + + + +## Progress and Next Steps + + + + +During PW43 we completed manual de-identification (fan segmentation by clicking on the corners of the fan) for 100 patient exams, and tested our AI system on them. Some rough edges, no pun intended, were revealed, which we plan to complete and before PW44. See illustration in the lower figure below. + + +# Illustrations + + + +

+ +

+ +Results from PW43 - WIP: + +

+![](https://github.com/user-attachments/assets/b57695f1-1d8a-490f-8939-d265cfe98ecd) +

+ + +# Background and References + + + +- [https://github.com/SlicerUltrasound](https://github.com/SlicerUltrasound) diff --git a/PW43_2025_Montreal/Projects/SlicerultrasoundDicomAnonymizerTesting/SlicerAnonymizer.png b/PW43_2025_Montreal/Projects/SlicerultrasoundDicomAnonymizerTesting/SlicerAnonymizer.png new file mode 100644 index 000000000..7453853b4 Binary files /dev/null and b/PW43_2025_Montreal/Projects/SlicerultrasoundDicomAnonymizerTesting/SlicerAnonymizer.png differ diff --git a/PW43_2025_Montreal/Projects/SlicerultrasoundExtensionAddingUserRequestedFeaturesToAnnotateultrasound/README.md b/PW43_2025_Montreal/Projects/SlicerultrasoundExtensionAddingUserRequestedFeaturesToAnnotateultrasound/README.md new file mode 100644 index 000000000..76e784b30 --- /dev/null +++ b/PW43_2025_Montreal/Projects/SlicerultrasoundExtensionAddingUserRequestedFeaturesToAnnotateultrasound/README.md @@ -0,0 +1,190 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: SlicerUltrasound Extension Development - New features +category: DICOM + +key_investigators: + +- name: Maha Kesibi + affiliation: Queen's University + country: Canada + +- name: Tina Kapur + affiliation: BWH + country: USA + +- name: Tamas Ungi + affiliation: Queen's + country: Canada + +- name: Fahimeh Fooladgar + affiliation: UBC + country: Canada + +- name: Shreyas Puducheri + affiliation: BWH + country: USA + +- name: Matt Alves + affiliation: BWH + country: USA + +- name: Caroline Schissel + affiliation: Lahey + country: USA + +- name: David Dinh + affiliation: SlicerUltrasound Team + country: USA + +- name: Atin Malaviya + affiliation: SlicerUltrasound Team + country: USA + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA + +--- + +# Project Description + + + + +**AnnotateUltrasound** is a 3D Slicer extension that enables structured sector annotation of lung ultrasound video clips, focusing on features such as pleura lines and B-lines. It provides an intuitive interface for frame-by-frame annotation, supports multiple raters, and saves annotation data for future research and machine learning. + +We collaborate closely with several physicians who use AnnotateUltrasound in their clinical and research workflows. Their feedback directly shapes the module’s features and usability, ensuring a user-centered design. We regularly incorporate their suggestions for new features and improvements based on their real-world experiences. + +This week, we focused on incorporating user-requested features to improve efficiency, usability, and comparative analysis of annotations. + +More about the module: [https://github.com/SlicerUltrasound/SlicerUltrasound](https://github.com/SlicerUltrasound/SlicerUltrasound) + + +## Objective + + + + + +1. **Frame-by-Frame Pleura Percentage Comparison** + - Display pleura percentage per frame. + - Highlight the frame with the highest pleura coverage for each rater in the rater table. + +2. **Annotation Time Tracking** + - Track and display time each rater spends annotating, to support workload analysis and training evaluation. + +3. **Adjudication Mode** + - Introduce an adjudicator workflow for validating or invalidating rater annotations and generating final consensus annotations. + +4. **Label Annotation Comparison** + - Add label data to the rater table to allow comparison of both line and label annotations. + +5. **Improved Line Endpoint Visualization** + - Refine endpoint markers to support more precise annotations. + + +## Approach and Plan + + +- Update the AnnotateUltrasound logic to compute and store pleura percentages per frame and clip. +- Extend UI tables to display these values, with live updates as annotations change. +- Integrate a timer using `QTimer` and idle detection (from the UserStatistics module) to track annotation time and store it in JSON. +- Modify endpoint visuals for clearer frame annotation. +- Add an adjudicator mode for validating annotations across raters, including new UI tools and changes to the annotation file structure to store validated results + + + +## Progress and Next Steps + + + +**Completed** +- **Annotation Timer** + - Tracks time per rater, auto-pauses on inactivity + - Displays timer in MM:SS format + - Saves to annotation JSON + +- **Pleura Percentage Display** + - Shows per-frame pleura percentages in annotation table + - Highlights max pleura frame in rater table with frame index + - Values stored in annotation JSON + +- Added **Adjudicator Mode** for validating annotations across raters + - UI tools for validate/invalidate actions (+ keyboard shortcuts) + - Saves to `.adjudication.json` file with adjudicator metadata (status, timestamp) + - Updates schema to support final validated annotations + +**In Progress** +- Refine UI for adjudication visibility and toggles +- Add the Sandbox/UserStatistics module as a dependency +- Testing edge cases and fine-tuning timer and pleura calculations +- Adding the label annotation to the UI in the rater table +- Get user feedback from clinicians for final usability tweaks + +# Illustrations + + + + +Pleura percentage saving per frame and populating rater table: + + +Timer demo: + + + +Label annotation and populating rater tbale: + +![](https://github.com/user-attachments/assets/82875613-02be-4548-b1d6-46c52103ed35) +![](https://github.com/user-attachments/assets/496c3132-91d1-4512-849d-b49a0fdf05c3) + + +Adjudication video: + + + + +https://github.com/user-attachments/assets/d3cb237e-92d7-4a67-87ff-18c41fe003cd + + + +![](https://github.com/user-attachments/assets/c3d61cf3-a1bf-47ce-bbc7-f27af37c712f) + + +Current keyboard shortcuts: + +![keyboardShortcuts](https://github.com/user-attachments/assets/a05b65e1-8bd4-40f5-8ccd-3abf3efc2589) + + + + +# Background and References + + + +- Source Code: [https://github.com/SlicerUltrasound/SlicerUltrasound](https://github.com/SlicerUltrasound/SlicerUltrasound) +- Related Extension: [Sandbox – UserStatistics module](https://github.com/Slicer/SlicerSandbox) + +_No response_ diff --git a/PW43_2025_Montreal/Projects/Template/README.md b/PW43_2025_Montreal/Projects/Template/README.md new file mode 100644 index 000000000..f1696f21c --- /dev/null +++ b/PW43_2025_Montreal/Projects/Template/README.md @@ -0,0 +1,57 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Write full project title here +category: Uncategorized + +key_investigators: +- name: Person Doe + affiliation: University + +- name: Person2 Doe2 + affiliation: University2 + country: Spain +--- + +# Project Description + + + +## Objective + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW43_2025_Montreal/Projects/Template/README.md.j2 b/PW43_2025_Montreal/Projects/Template/README.md.j2 new file mode 100644 index 000000000..2dc08686c --- /dev/null +++ b/PW43_2025_Montreal/Projects/Template/README.md.j2 @@ -0,0 +1,55 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: {{ title | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +category: {{ category | regex_replace("\n$|\n\.\.\.\n$", "") }} + +key_investigators: +{% for investigator in investigators %} +- name: {{ investigator.name | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} + affiliation: {{ investigator.affiliation | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- if investigator.country %} + country: {{ investigator.country | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- endif %} +{% endfor %} +--- + +# Project Description + + + +{{ description }} + +## Objective + + + +{{ objective }} + +## Approach and Plan + + + +{{ approach }} + +## Progress and Next Steps + + + +{{ progress }} + +# Illustrations + + + +{{ illustrations }} + +# Background and References + + + +{{ background }} diff --git a/PW43_2025_Montreal/Projects/Tracked2DUltrasoundFor3DLiverImagingAndSegmentation/README.md b/PW43_2025_Montreal/Projects/Tracked2DUltrasoundFor3DLiverImagingAndSegmentation/README.md new file mode 100644 index 000000000..69fd9bc48 --- /dev/null +++ b/PW43_2025_Montreal/Projects/Tracked2DUltrasoundFor3DLiverImagingAndSegmentation/README.md @@ -0,0 +1,77 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Tracked 2D Ultrasound for 3D Liver Imaging and Segmentation +category: IGT and Training + +key_investigators: + +- name: Hassan Rivaz + affiliation: Concordia University + country: Canada + +- name: Hamza Rasaee + affiliation: Concordia University + country: Canada + +--- + +# Project Description + + + + +We want to image the liver using 2D ultrasound tracked with Optotrak and generate 3D ultrasound from the sweep. We then want to segment the 2D slices and the 3D volume to reconstruct the liver. + + + +## Objective + + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. +We aim to generate a 3D ultrasound volume of the liver by performing a freehand sweep of 2D ultrasound images tracked using the Optotrak system. The final result will include both 2D slice-level and 3D volume-level liver segmentations to enable accurate liver reconstruction. + + + +## Approach and Plan + + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. + + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ + diff --git a/PW43_2025_Montreal/Projects/TractographyVr/README.md b/PW43_2025_Montreal/Projects/TractographyVr/README.md new file mode 100644 index 000000000..cb5a30e37 --- /dev/null +++ b/PW43_2025_Montreal/Projects/TractographyVr/README.md @@ -0,0 +1,93 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Tractography-VR +category: VR/AR and Rendering + +key_investigators: + +- name: Tina Nantenaina + affiliation: ETS + country: Canada + +- name: Sylvain Bouix + affiliation: ETS + country: Canada + +- name: Simon Drouin + affiliation: ETS + country: Canada + +--- + +# Project Description + + + + +This project aims to work on VR interaction with tractograms in 3D Slicer. The goal is to develop tools that allow users to clean and annotate fiber bundles more accurately and efficiently in an immersive environment. This could support tasks such as cluster selection, labeling, or removal of outlier fibers during tractography analysis. + + + +## Objective + + + + +1. Objective A. Improve VR interaction with tractography data +2. Objective B. Improve VR interaction with the ROI box + + + + +## Approach and Plan + + + + +1. Identify the source of the positional mismatch between the controller and the selected object +2. Fix VR laser-object interaction offset : Ensure the object follows the controller movement precisely without lag or offset + + + + +## Progress and Next Steps + + + + +1. Users can now select and resize the ROI box in VR using the controller, which enables manual cleaning of fibers by adjusting the region of interest. +1. Different bugs where identified + 1. It was determined that the ROI is only moved relative to the controller translation, which explain why the box is not following the laser. This could be fixed by taking into account the rotation of the controller + 2. Usability issues and rendering artifacts come from a very low framerate that results from an incompatibility between the new Markup system in Slicer and SlicerVirtualReality +1. The short-term solution to the bug is to use a simple polygonal model as a ROI for fiber clipping instead of a ROI markup. +2. A script that illustrates how this can be done in Python is available [here](https://gist.github.com/drouin-simon/e2b5ecf77d53697e2e20c1d8fd016ea3). + +# Illustrations + + + + + + + + + + + + +# Background and References + + + + +_No response_ + diff --git a/PW30_2019_GranCanaria/Projects/Data-glove_for_virtual_operations/20190201_095221.gif b/PW43_2025_Montreal/Projects/TractographyVr/demo3red.mp4 similarity index 65% rename from PW30_2019_GranCanaria/Projects/Data-glove_for_virtual_operations/20190201_095221.gif rename to PW43_2025_Montreal/Projects/TractographyVr/demo3red.mp4 index d32a0e3ca..126c183a8 100644 Binary files a/PW30_2019_GranCanaria/Projects/Data-glove_for_virtual_operations/20190201_095221.gif and b/PW43_2025_Montreal/Projects/TractographyVr/demo3red.mp4 differ diff --git a/PW43_2025_Montreal/Projects/TransitionSlicerDefaultBuildFromQt5ToQt6/README.md b/PW43_2025_Montreal/Projects/TransitionSlicerDefaultBuildFromQt5ToQt6/README.md new file mode 100644 index 000000000..a1fa4894a --- /dev/null +++ b/PW43_2025_Montreal/Projects/TransitionSlicerDefaultBuildFromQt5ToQt6/README.md @@ -0,0 +1,157 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Transition Slicer Default Build from Qt5 to Qt6 +category: Infrastructure + +key_investigators: + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware + country: USA + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: James Butler + affiliation: Revvity + country: USA + +- name: Hans Johnson + affiliation: University of Iowa + country: USA + +--- + +# Project Description + + + + +This project focuses on updating Slicer's build system, dependencies, and related infrastructure to support building, packaging, and distributing Slicer with Qt6. + +This effort lays the groundwork for supporting native builds on macOS ARM systems. + + + +## Objective + + + + +1. Ensure all Slicer dependencies can be built with Qt6. +2. Enable building Slicer itself against Qt6. +3. _Tentative_: Update infrastructure and build environments to support packaging and continuous integration with Qt6 builds. + + + + +## Approach and Plan + + + + +1. **Identify Suitable Qt6 Version**: Evaluate supported Qt6 versions to determine the most compatible and stable version for Slicer (tentatively targeting Qt 6.9). + +2. **Test Qt6 Compatibility of Dependencies**: Build Slicer’s external dependencies with Qt6, document any issues encountered, and work toward resolving them. + +3. **Enable Qt6 Build of Slicer**: Support configuring Slicer with `Slicer_REQUIRED_QT_VERSION=6.9` (or selected version) to enable Qt6-based builds. + +4. **Update Infrastructure and CI for Qt6** (_Tentative_) + - Update packaging scripts and build environments (e.g., Docker images, GitHub Actions runners) to support Qt6-based builds. + - Add CI jobs to test, build, and package Slicer against Qt6 on supported platforms (Linux, Windows, macOS ARM/x86_64). + - Validate the creation of functioning Slicer packages built with Qt6. + + + + +## Progress and Next Steps + + + +* Target Qt Version: Qt 6.9 +* Initial focus is on enabling Slicer and its dependencies to build on macOS arm64 with Qt6. Work is ongoing across the main Slicer repository, key dependencies, and related infrastructure. + + +### Status of Qt6 & macOS arm64 support + +| | Status | Progress | What | +|----------------|--------|----------|-------------------| +| PythonQt | ⏳ | 95% | Qt6 & macOS arm64 | +| qRestAPI | ✅ | 100% | Qt6 & macOS arm64 | +| QtTesting | ⏳ | 95% | Qt6 & macOS arm64 | +| CTK | ⏳ | 5% | Qt6 & macOS arm64 | +| CTKAPPLAUNCHER | | | Qt6 & macOS arm64 | +| VTK | ✅ | 100% | Qt6 & macOS arm64 | +| Slicer | | | Qt6 & macOS arm64 | +| teem | ✅ | 100% | macOS arm64 | +| rapidjson | ✅ | 100% | macOS arm64 | +| OpenSSL | ✅ | 100% | macOS arm64 | + + +### Slicer Updates + +* CMake and Build System + * [PR-8501](https://github.com/Slicer/Slicer/pull/8501): ✅ Update minimum required CMake version from 3.16.3 to 3.20.6 + * [PR-8491](https://github.com/Slicer/Slicer/pull/8491): ⏳ Build cmake4 WORK IN PROGRESS WIP + +* Dependency Updates + * teem + - [PR-8500](https://github.com/Slicer/Slicer/pull/8500): ✅ Update teem from r6245 to r7265 + - [PR-8503](https://github.com/Slicer/Slicer/pull/8503): ✅ Update teem to fix windows build + * rapidjson + - [PR-8502](https://github.com/Slicer/Slicer/pull/8502): ✅Update RapidJSON to latest revision + - [PR-8508](https://github.com/Slicer/Slicer/pull/8508): ✅ Fix configuration on Windows by updating RapidJSON project installation + - Pull requests contributed to upstream `Tencent/rapidjson`: ⏳ [PR-2343](https://github.com/Tencent/rapidjson/pull/2343), ⏳ [PR-2344](https://github.com/Tencent/rapidjson/pull/2344) + * OpenSSL + - [PR-8504](https://github.com/Slicer/Slicer/pull/8504): ✅ OpenSSL 1.1.1w is needed to fix missing include + - [PR-8513](https://github.com/Slicer/Slicer/pull/8513): ✅ Fix OpenSSL 1.1.1w build on macOS with non-system zlib + +* Compiler Warning Fixes + * [PR-8509](https://github.com/Slicer/Slicer/pull/8509): ✅ Fix deprecated declarations related to vtkStdString + * [PR-8510](https://github.com/Slicer/Slicer/pull/8510): ✅ Fix unused variable warning in qMRMLSortFilterColorProxyModel + +* Identified issues + * [PR-8515](https://github.com/Slicer/Slicer/pull/8515): ✅ Ensure deepcopy is propagated in vtkMRMLSequenceBrowserNode::CopyContent + +### QtTesting Updates + +* ⏳ Support for Qt6 being finalized + +### PythonQt Updates + +* ⏳ [commontk/PythonQt PR-90](https://github.com/commontk/PythonQt/pull/90), ⏳ [MeVisLab/pythonqt PR-269](https://github.com/MeVisLab/pythonqt/pull/269) + - Qt6 Porting: Updated code to support Qt6, including replacing deprecated QVariant::Type with QMetaType and adding version checks for Qt5/Qt6 compatibility. + - C++17/20 Modernization: Refactored code to use modern C++ features as required by newer Qt versions (e.g., constexpr, noexcept, alignas, etc.). + - Compiler Detection Updates: Enhanced and updated compiler detection macros in qcompilerdetection.h for better support of recent compilers and platforms. + - Warning and Attribute Macros: Improved handling of compiler warnings and attributes, including support for new C++ attributes like [[nodiscard]], [[maybe_unused]], and [[deprecated]]. + - Platform and Feature Checks: Added or updated macros for platform-specific and feature-specific checks, ensuring better cross-platform compatibility. + - General Maintenance: Bug fixes, code cleanup, and improved documentation/comments throughout the codebase. + +### simplecpp Updates (used by PythonQt wrapper generator) + +* ⏳ [PR-448](https://github.com/danmar/simplecpp/pull/448): ENH: Add support to find Headers in Apple Frameworks + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +* [Slicer Issue](https://github.com/Slicer/Slicer/issues/6388) +* [Build Instructions PW43](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/SlicerBuildInstructionUpdates/) + diff --git a/PW43_2025_Montreal/Projects/UniversalToothLabelingModule/README.md b/PW43_2025_Montreal/Projects/UniversalToothLabelingModule/README.md new file mode 100644 index 000000000..dfdea0e1b --- /dev/null +++ b/PW43_2025_Montreal/Projects/UniversalToothLabelingModule/README.md @@ -0,0 +1,99 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Universal Tooth Labeling Module +category: Segmentation / Classification / Landmarking + +key_investigators: + +- name: Enzo Tulissi + affiliation: University of Michigan + country: USA + +- name: Lucia Cevidanes + affiliation: University of Michigan + country: USA + +- name: Juan Prieto + affiliation: University of North Carolina + country: USA + +- name: Jonas Bianchi + affiliation: University of Pacific + country: USA + +--- + +# Project Description + + + + +The **Universal Tooth Labeling** module employs nnUNet to automatically label all teeth (including primary dentition) from CBCT scans. +It aims to deliver robust, anatomically correct labels for each tooth. + +Currently, some outputs exhibit left/right mirroring errors (e.g., both canines labeled as “right canine”). + + + +## Objective + + + + +1. **Resolve mirroring errors** in the labeling output. +2. **Optimize nnUNet training** (patch size, batch size, learning rate) and augmentations. +3. **Implement post‐processing checks** to verify and correct side‐specific labels. + + + +## Approach and Plan + + + + +1. Analyze cases with mirroring errors and identify their characteristics. +2. Tune nnUNet hyperparameters for optimal label accuracy. +3. Develop a post‐processing module to enforce correct left/right assignment. +4. Evaluate performance (DSC, IoU) on a dedicated test set. + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. +**Completed:** +- Initial nnUNet pipeline and label export implemented. +- Prototype output integrated into 3D Slicer. + +**Next Steps:** +- Fix mirroring bugs in output labels. +- Conduct clinical validation and benchmarking. + + + +# Illustrations + + + + +![image](https://github.com/user-attachments/assets/5632a488-8b6d-45b3-946c-567c8d40b613) + +*Figure 1: Example output from Universal Tooth Labeling* + + + +# Background and References + + + + +- [Universal Tooth Labeling (in progress)](https://github.com/DCBIA-OrthoLab/SlicerAutomatedDentalTools/tree/main/BATCHDENTALSEG) + diff --git a/PW43_2025_Montreal/Projects/UpdateSlicerBuildInstructions b/PW43_2025_Montreal/Projects/UpdateSlicerBuildInstructions new file mode 100644 index 000000000..1275683ba --- /dev/null +++ b/PW43_2025_Montreal/Projects/UpdateSlicerBuildInstructions @@ -0,0 +1,14 @@ +# Improve Slicer build instructions +Building a custom version of slicer has become increasingly difficult. + - Identify how to install Qt on an arm based apple M4 computer. + - Identify how to install Qt on Ubuntu 24.04 linux computer. + + +# Investigate the status of moving to Qt6 +Provide support/testing environments for migration to Qt6. + +# Investigate updating TCLAP to build cleanly, perhaps update to latest version of TCLAP. + +# Motivation +I'd like to prepare Slicer for future versions of ITK, but configuring a stable build environiment has been more challenging that expected (primarily regarding Qt 5 requirements). + diff --git a/PW43_2025_Montreal/Projects/UsingIdcAndAiForHypothesesExplorationInTheNlstCohort/README.md b/PW43_2025_Montreal/Projects/UsingIdcAndAiForHypothesesExplorationInTheNlstCohort/README.md new file mode 100644 index 000000000..8c6a1cf78 --- /dev/null +++ b/PW43_2025_Montreal/Projects/UsingIdcAndAiForHypothesesExplorationInTheNlstCohort/README.md @@ -0,0 +1,127 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Using IDC and AI for hypotheses exploration in the NLST cohort +category: Quantification and Computation + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Fadwa Elfeituri + affiliation: Georgetown University + country: USA + +- name: Pari Shah + affiliation: Georgetown University + country: USA + +- name: Vamsi Thiriveedhi + affiliation: BWH + country: USA + +- name: Deepa Krishnaswamy + affiliation: BWH + country: USA + +- name: Yuriy Gusev + affiliation: Georgetown University + country: USA + +--- + +# Project Description + + + + +In 2024 [Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/explore) team processed CT images available in the National Lung Screening Trial (NLST) collection using TotalSegmentator and pyradiomics to segment anatomical organs and extract basic first order (e.g., mean intensity) and shape features (e.g., volume), and released all of the extracted quantitative features publicly in the IDC [`TotalSegmentator-CT-Segmentations` analysis results collection](https://portal.imaging.datacommons.cancer.gov/explore/filters/?analysis_results_id=TotalSegmentator-CT-Segmentations) [1,2]. A basic Looker dashboard for exploring this dataset is available [here](https://lookerstudio.google.com/reporting/c3e2965e-e615-4b4b-b523-1fc335dd9d43). + +The motivation for that work was to utilize segmentation as a tool - as opposed as the objective of research endeavor - to enable downstream use of the data for exploration and hypothesis generation by the users who do not have expertise/time/resources to extract those quantitative features on their own. + +To date, one study was published utilizing liver mean attenuation measurements for this dataset to explore a novel biomarker of liver disease [3]. We also investigated detection of outliers/erroneous segmentation results in this dataset [4]. + +In this project we aim to improve materials/resources that accompany the dataset to make it more accessible to the community, and explore research directions where the extracted quantitative features can be useful. AI will facilitate and enable this project at multiple levels: for segmenting the regions of interest, extraction of radiomics features, analyzing relevant research literature for identifying hypotheses of interest, and facilitating exploration of those hypothesis on the real data. + + + +## Objective + + + + +1. Improve accessibility and usability of the dataset to simplify its use. +2. Demonstrate the utility of the dataset to inspire future users. +3. Prepare materials for a future publication to raise awareness of the dataset more broadly. + + + +## Approach and Plan + + + + +1. Develop tutorial notebooks explaining how to access and use the dataset. +2. Develop interactive dashboard(s) to simplify exploration of the dataset by non-technical users, collect feedback from potential/target users. +3. Identify and explore specific hypotheses that can be facilitated by the dataset, capture this exploration in notebooks. + + + +## Progress and Next Steps + + + + +1. Developed a [new tutorial notebook](https://tinyurl.com/y87pusa4) describing access to the TotalSegmentator-derived measurements, and demonstrating how to combine those with clinical data and image acquisition parameters. +2. Iterated on the development of the Tableau and Plotly notebooks, those are not yet ready for general availability. +3. Worked on exploring clinically-relevant questions using TotalSegmentator-derived measurements. + 1. Used Gemini to analyze the existing literature and suggest possible directions for exploration, as summarized in [this document](https://tinyurl.com/2vdsep3b). + 2. Selected investigation of longitudinal changes in the Lower Left Lung Lobe (LLLL) as the question of interest. + 3. Developed BigQuery queries to combine relevant attributes (measuremeents, demographics, acquisition) into a single table. Implemented rules to clean the data (remove patients with incomplete longitudinal history, select scans that have complete coverage of the lung based on availability of C7 and T12 vertebrae). + 4. Developed [notebook](https://tinyurl.com/3sfunh44) exploring the hypothesis and visualizing relevant data. No strong signal was identified for the selected question, but the data can now be used more easily for exploring any other similar hypotheses. + 5. Learned a bit about about human anatomy, and found interesting cases that appear to be correctly segmented by TotalSegmentator but "highly unusual" based on the LLLL volume, per perplexity dot ai. + 6. Further clinical data can be requested from [CDAS](https://cdas.cancer.gov/nlst/) - unfortunately, some key demographics data, such as patient weight and height is not included in the public offering. + +Check out [this notebook](https://tinyurl.com/3sfunh44) if you want to know more about the below! + +![](https://github.com/user-attachments/assets/dc5cf7a0-ed13-4a66-bf94-4a5c8995287d) + +![](https://github.com/user-attachments/assets/091602a8-b786-4776-8cba-a9360c0e7998) + +![](https://github.com/user-attachments/assets/ffc9a1ea-35c3-48df-b9fd-bb0560e27c32) + + +# Illustrations + + + + +![Example of segmentation](https://github.com/user-attachments/assets/869d4b98-f30f-46a5-b2b4-8654d243e931) +Individual images/segmentations can be loaded in Slicer using [SlicerIDCExplorer](https://github.com/imagingdatacommons/sliceridcbrowser) extension. + +![T8 vertebra volume distribution](https://github.com/user-attachments/assets/0632004b-a481-427d-994b-6b110e396844) +A basic Looker dashboard for exploring this dataset is available [here](https://lookerstudio.google.com/reporting/c3e2965e-e615-4b4b-b523-1fc335dd9d43), screenshot above displays distribution of volume of the T8 vertebra. + +![Vertebrae volume](https://github.com/user-attachments/assets/85b8fc85-3c7f-4738-9732-a8bf1867a20e) +Plot from the [Krishnaswamy et al. 2024](http://arxiv.org/abs/2406.14486) study summarizing concordance between the trends observed for the volume measurements of the total vertebrae volume derived using TotalSegmentator on a cohort of ~20,000 subjects vs a published study [Limthongkul et al. 2010](https://pubmed.ncbi.nlm.nih.gov/20142072/) measuring vertebral body volume conducted on the data from 40 subjects. + + + +# Background and References + + + + +[1] Thiriveedhi, V. K., Krishnaswamy, D., Clunie, D., Pieper, S., Kikinis, R. & Fedorov, A. Cloud-based large-scale curation of medical imaging data using AI segmentation. Research Square (2024). [https://doi.org/10.21203/rs.3.rs-4351526/v1](https://doi.org/10.21203/rs.3.rs-4351526/v1) + +[2] Thiriveedhi, V. K., Krishnaswamy, D., Clunie, D. & Fedorov, A. TotalSegmentator segmentations and radiomics features for NCI Imaging Data Commons CT images. (2024). [https://doi.org/10.5281/zenodo.8347012](https://doi.org/10.5281/zenodo.8347012) + +[3] Weiss, J., Bernatz, S., Johnson, J., Thiriveedhi, V., Mak, R. H., Fedorov, A., Lu, M. T. & Aerts, H. J. W. Opportunistic assessment of steatotic liver disease in lung cancer screening eligible individuals. J. Intern. Med. (2025). [https://doi.org/10.1111/joim.20053](https://doi.org/10.1111/joim.20053) + +[4] Krishnaswamy, D., Thiriveedhi, V. K., Ciausu, C., Clunie, D., Pieper, S., Kikinis, R. & Fedorov, A. Rule-based outlier detection of AI-generated anatomy segmentations. arXiv [eess.IV] (2024). at [http://arxiv.org/abs/2406.14486](http://arxiv.org/abs/2406.14486) diff --git a/PW43_2025_Montreal/Projects/VisualizingBrainDeformation/README.md b/PW43_2025_Montreal/Projects/VisualizingBrainDeformation/README.md new file mode 100644 index 000000000..2e668da5f --- /dev/null +++ b/PW43_2025_Montreal/Projects/VisualizingBrainDeformation/README.md @@ -0,0 +1,106 @@ +--- +layout: pw43-project + +permalink: /:path/ + +project_title: Visualizing Brain Deformation +category: Quantification and Computation + +key_investigators: + +- name: Isabel Frolick + affiliation: McGill University + country: Canada + +- name: Elise Donzselmann-Lund + affiliation: McGill University + country: Canada + +- name: Étienne Léger + affiliation: McGill University + country: Canada + +--- + +# Project Description + + + + +This project introduces a 3D Slicer plug-in for visualizing and quantifying non-linear brain deformation fields as seen during MRI-ultrasound registration. + + +While existing tools such as Transform Visualizer can display vector fields or deformation grids, they have several limitations for the purposes of non-linear MRI-ultrasound registration — notably, limited compatibility with ultrasound-derived data (you must specify the deformation as either rigid, affine, or B-spline, none of which will work on ultrasound out of the box) and the inability to express deformation magnitudes in real-world physical units. + +Our module addresses these gaps by enabling quantitative visualization of deformation in two ways: 1) dense displacement magnitude in millimetres and 2) determinant of the Jacobian magnitude to quantify the local stretching or squishing of space, to provide intuitive visual feedback on tissue expansion and compression. + +This module can be used with any non-linear registration method. Testing data was developing using published and validated MRI-ultrasound registration pipeline from our lab [Rivaz, 2014] as a basis for non-rigid registration. We then use the resulting transformation to quantify how much brain shift (deformation) is occurring at each voxel and convert this deformation to millimetres. + +This module allows researchers to better understand the extent and spatial distribution of brain shift occurring during neurosurgical interventions. + + + +## Objective + + + + +1. Objective A. Quantitative Deformation Visualization: Compute voxel-wise deformation in millimetres and visualize both displacement magnitude and Jacobian-based expansion/compression as an out-of-the-box solution. Introduce intuitive colour maps and functionality to enable users to examine quantitative deformation using the cursor. +2. Objective B. Simplified Landmark-Based Analysis: Introduce functionality for .tag to .fcsv (Slicer compatible) landmark registration. Explictly show the Euclidean distance between corresponding landmarks within the module, for improved visualization. + + + + +## Approach and Plan + + + + +1. Develop Slicer module that accepts a moving image (US), source image (MRI), and transformation (output from Step 1) to visualize the deformation field intuitively. +2. Compute physical displacements from voxel-wise transformation fields, converting deformation to millimetres in real-world space. Additionally, calculate the Jacobian determinant to characterize local tissue expansion or compression. +3. Develop an intuitive visualization interface that overlays deformation maps on the MRI or ultrasound volumes, supporting both default and custom colour maps, opacity adjustment, and threshold control. +4. Incorporate landmark handling and interactive displacement queries: + - Convert .tag files to Slicer-compatible .fcsv format. + - Display Euclidean distances between corresponding landmarks. + - Track cursor position and dynamically display displacement in millimetres. + + +## Progress and Next Steps + + + + +Completed: +- Implemented displacement visualization in millimetres +- Added Jacobian magnitude visualization for local expansion/compression. +- Integrated custom and default colour map options. +- Added cursor tracking, showing displacement magnitude in real time. +- Implemented landmark conversion from .tag → .fcsv and visualization of Euclidean distances between corresponding landmarks. +- Added opacity and threshold controls for flexible visualization. + +Next Steps: +- Refine user interface for smoother workflow integration in Slicer. +- Validate deformation quantification on additional datasets. +- Developing test cases and debugging + +# Illustrations + + + +[https://mcgill-my.sharepoint.com/:v:/g/personal/etienne_leger_mcgill_ca/EYxMVat9qgxHnoQ-7JhEq_0BCcxIrsJj4NhBqJVAEP02cQ?e=57a12U&nav=eyJyZWZlcnJhbEluZm8iOnsicmVmZXJyYWxBcHAiOiJTdHJlYW1XZWJBcHAiLCJyZWZlcnJhbFZpZXciOiJTaGFyZURpYWxvZy1MaW5rIiwicmVmZXJyYWxBcHBQbGF0Zm9ybSI6IldlYiIsInJlZmVycmFsTW9kZSI6InZpZXcifX0%3D](https://mcgill-my.sharepoint.com/:v:/g/personal/etienne_leger_mcgill_ca/EYxMVat9qgxHnoQ-7JhEq_0BCcxIrsJj4NhBqJVAEP02cQ?e=57a12U&nav=eyJyZWZlcnJhbEluZm8iOnsicmVmZXJyYWxBcHAiOiJTdHJlYW1XZWJBcHAiLCJyZWZlcnJhbFZpZXciOiJTaGFyZURpYWxvZy1MaW5rIiwicmVmZXJyYWxBcHBQbGF0Zm9ybSI6IldlYiIsInJlZmVycmFsTW9kZSI6InZpZXcifX0%3D) + + + + + +# Background and References + + + + +The registration method being implemented on testing data: Rivaz H, Karimaghaloo Z, Fonov VS, Collins DL. Nonrigid registration of ultrasound and MRI using contextual conditioned mutual information. IEEE Trans Med Imaging. 2014 Mar;33(3):708-25. doi: 10.1109/TMI.2013.2294630. PMID: 24595344. + +For use of testing data, please contact isabel.frolick@mail.mcgill.ca + diff --git a/PW43_2025_Montreal/README.md b/PW43_2025_Montreal/README.md new file mode 100644 index 000000000..eb6467dd3 --- /dev/null +++ b/PW43_2025_Montreal/README.md @@ -0,0 +1,164 @@ +--- +permalink: /:path/ +redirect_from: +- /PW43_2025_Montreal/README.html +- /PW43_2025_Montreal/Readme.html + +project_categories: +- DICOM +- IGT and Training +- VR/AR and Rendering +- Segmentation / Classification / Landmarking +- Quantification and Computation +- Registration +- Cloud / Web +- Infrastructure +- Other +--- + +# Welcome to the web page for the 43rd Project Week! + + +Summary: +[This event](https://projectweek.na-mic.org/PW43_2025_Montreal/README.html) took place June 23rd - June 27th, 2025 in Montreal, Canada. 56 participants worked on 41 projects. + +## Location + +École de Technologie Supérieure (ETS), Montreal, Canada - **E Building** + +The images below show how to get to the Project Week 43 conference room. If you Google "ETS", it will take you to the main building of the university (1). You need to walk about 200 meters to get to the building called "Maison des étudiants" (2). Enter the building and either climb the stairs or take the elevator on the right to reach the second floor(3). From there, you should easily find the registration desk. + +Venue entrance on Google Maps: [https://goo.gl/maps/xNedgMBt4C6jwiCu5](https://goo.gl/maps/xNedgMBt4C6jwiCu5) + + + +## Wifi +**Network name**: ETS-NAMIC-Invite +**User name**: ets-namic-invite@etsmtl.ca +**password**: EY5VRT + +## How to participate + +* We hold weekly online preparation meetings before the workshop (Tuesdays 10am ET, from April 29 until the start of the workshop, [zoom link](https://etsmtl.zoom.us/j/82098172682?pwd=OHXPL5SlaLt817CzeBu8C1Pu21PvqW.1) +* These meetings are an opportunity to introduce yourself, find a project you want to participate in during the workshop or propose one yourself and find collaborators. You will also find out more about how the workshop works. +* If you have a project in mind already, you can create a draft of your project page in just a few minutes using the form [here](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=project%2Cevent%3APW43_2025_Montreal&projects=&template=project.yml&title=Project%3A+). The project description will help drive the discussion during preparation meetings. +* **Register as soon as possible** to help us plan the number of attendees (see instructions below) + +## Registration +You can register for PW43 using the form [here](https://thepointofsale.com/tickets/namic-project-week-43). The registration fee is 400 CAN$ (approx. 290 US$ or 255 Euros) + 21.71 $CAN payment platform fees. It covers lunch for the 5 days of the workshop plus coffee and snacks throughout the day. + +## Remote participation +For members of the community that are unable to attend Project Week in person this time, it will be possible to watch the main sessions that will be broadcast on Zoom: +1. Project introduction (Monday 10am ET) +2. What's new in Slicer breakout session (Tuesday 10am ET) +3. Project results presentation (Friday 10am ET) +Use the this [zoom link](https://etsmtl.zoom.us/j/82098172682?pwd=OHXPL5SlaLt817CzeBu8C1Pu21PvqW.1) to join. + +A smaller conference room will be available throughout the week for other breakout sessions and meetings between on-site teams and remote participants. The room can be joined using the following [zoom link](https://etsmtl.zoom.us/j/98174045874?pwd=WTy1O4Q24f1jaanNelkZssymq6af2b.1). + +## Discord +The **Discord** application is used to communicate between team members and organize activities before and during Project Week. Please join the Project Week [Discord server](https://discord.gg/AkxzKvqMBp) as soon as possible and explore its functionality before the workshop. For more information on the use of Discord before and during Project Week, please visit [this page](../common/Discord.md). + +## Agenda + +{% include calendar.md from="2025-06-23" to="2025-06-27"%} + +## Breakout sessions + +[Day 1 - Slicer Workflow Breakout](BreakoutSessions/Workflows/README.md) + +[Day 2 - Slicer Update Breakout](BreakoutSessions/Slicer/README.md) + +[Day 3 - Neuroanatomy Breakout](BreakoutSessions/Neuroanatomy/README.md) + +[Day 4 - AR-VR and Rendering Breakout](BreakoutSessions/ARVRRendering/README.md) + +## Projects + +Begin creating your project page [here](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=project%2Cevent%3APW43_2025_Montreal&projects=&template=project.yml&title=Project%3A+)! + +To learn how to create or update project pages, please refer to the [contributing project pages](ContributingProjectPages.md) section. + +{% include projects_noloc.md %} + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. + +List of registered participants so far (names will be added here after processing registrations): + + + +1. Simon Drouin, ETS, Canada +1. Rafael Palomar, Oslo University Hospital, Norway +1. Ron Kikinis, Brigham and Women’s Hospital and Harvard Medical School, United States +1. Steve Pieper, Isomics, Inc., United States +1. Adam Wittek, UWA, Australia +1. Sajjad Arzemanzadeh, UWA, Australia +1. Liam O'Connor, Concordia University, Canada +1. Andrey Fedorov, Brigham and Women’s Hospital and Harvard Medical School, United States +1. Ivan Johnson-Eversoll, University of Iowa, United States +1. Hans Johnson, University of Iowa, United States +1. Chi Zhang, Texas A&M College of Dentistry, United States +1. Martin Bellehumeur, Bellehumeur Engineering, Germany +1. Mostafa Jamshidian, The University of Western Australia (UWA) , Australia +1. Ihssene Brahimi, ETS, Canada +1. Andrey Titov, ETS, Canada +1. Jarrett Rushmore, Boston University, United States +1. Vitor Azevedo Padovani, ETS, Canada +1. Benoît Verreman, ETS, Canada +1. Mauricio Juárez, ETS, Canada +1. Andras Lasso, Queen's University, Canada +1. Deepa Krishnaswamy, Brigham and Women’s Hospital and Harvard Medical School, United States +1. Étienne Léger, McGill University, Canada +1. Nicholas Kawwas, Concordia University, Canada +1. Amoon Jamzad, Queen's University, Canada +1. Lena Giebeler, RWTH Aachen University and Brigham and Women’s Hospital, United States +1. Maxime Bouthillier, Université de Montréal, Canada +1. Ahmed Rekik, ETS, Canada +1. Tina Nomena Herimino Nantenaina, ETS, Canada +1. Isabel Frolick, McGill University, Canada +1. Elise Donszelmann-Lund, McGill University, Canada +1. Hyung Tae Park, Truabutment, inc., United States +1. Junichi Tokuda, Brigham and Women's Hospital, United States +1. Enzo Tulissi, University of Michigan, United States +1. Alban Gaydamour, University of Michigan, United States +1. Lucia Cevidanes, University of Michigan, United States +1. Kyle Sunderland, Queen's University, Canada +1. Naomi Catwell, ETS, Canada +1. Jean-Christophe Fillion-Robin, Kitware, United States +1. Samantha Horvath, Kitware, United States +1. Houssem Gueziri, TÉLUQ University, Canada +1. Rui Li, New York University, United States +1. Tina Kapur, Brigham and Womens Hospital, United States +1. Sylvain Bouix, ETS, Canada +1. Hamze Rasaee, Concordia University, Canada +1. Tamas Ungi, ClaroNav Kolahi Inc, Canada +1. Sean Chen, ClaroNav Kolahi Inc, Canada +1. Juntae Park, AIRS Inc., South Korea +1. Benjamin Zwick, UWA, Australia +1. Raphaël Christin, McGill University, Canada +1. Taeyoung Ted Park, Truabutment, inc., South Korea +1. Maha Kesibi, Queen's University, Canada +1. Lipeng Ning, Brigham and Women’s Hospital and Harvard Medical School, United States +1. Kaveh Moradkhani, ETS, Canada +1. Daniel Haehn, UMass Boston, United States +1. Paul Baksic, Centre Inria de l'Université de Lorraine, France +1. Mauro Ignacio Dominguez, Independent, Argentina + + +## Statistics + + + +## Organizers + +* [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](http://www.spl.harvard.edu/pages/People/tkapur)), +* [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://drouin-simon.github.io/ETS-web//)) +* [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)) +* [@piiq](https://github.com/piiq) ([Theodore Aptekarev](https://discourse.slicer.org/u/pll_llq)) +* [@sjh26](https://github.com/sjh26) ([Sam Horvath, PhD](https://www.kitware.com/samantha-horvath/)) + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](http://perk.cs.queensu.ca/sites/perkd7.cs.queensu.ca/files/Kapur2016.pdf). diff --git a/PW43_2025_Montreal/images/PW43-venue.png b/PW43_2025_Montreal/images/PW43-venue.png new file mode 100644 index 000000000..16908a80e Binary files /dev/null and b/PW43_2025_Montreal/images/PW43-venue.png differ diff --git a/PW43_2025_Montreal/images/pw39-event-photos.jpg b/PW43_2025_Montreal/images/pw39-event-photos.jpg new file mode 100644 index 000000000..59b87ac76 Binary files /dev/null and b/PW43_2025_Montreal/images/pw39-event-photos.jpg differ diff --git a/PW43_2025_Montreal/images/readme.txt b/PW43_2025_Montreal/images/readme.txt new file mode 100644 index 000000000..a6e4a119c --- /dev/null +++ b/PW43_2025_Montreal/images/readme.txt @@ -0,0 +1 @@ +Please use this directory to store all images included in the PW43 page diff --git a/PW44_2026_GranCanaria/ContributingProjectPages.md b/PW44_2026_GranCanaria/ContributingProjectPages.md new file mode 100644 index 000000000..609e5bef0 --- /dev/null +++ b/PW44_2026_GranCanaria/ContributingProjectPages.md @@ -0,0 +1,84 @@ +--- +--- +{% comment %}Extract event name (e.g "PW39_2023_Montreal"). {% endcomment %} +{%- assign event_name = page.path | split: '/' | first -%} + +# Contributing Project Pages + +## Creating new project pages + +With the [Project Week GitHub Issue page](https://github.com/NA-MIC/ProjectWeek/issues/new/choose), you have two options to create your Project Page: + + +1. [Create a Project](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) issue: If you are ready to create your page, you can simply create a “Project” issue. This issue will allow you to fill out a convenient form to provide the necessary details. The Project Week website team will then review the issue and trigger the page creation pull request. + +2. [Create the project page yourself using the template](Projects/Template/README.md): If you prefer to create the Project Page yourself, you can still do so by using the provided template and submitting a pull request. + +## Project Creation Tips + +- Get your project pages created early! The day before is best to make sure everything you need for you presentation is available. The ProjectWeek site will be closed to edits for the ***10 minutes before*** both the opening and closing presentation session to ensure site generation. After this 10 minute period edits will be re-enabled. + +- If you are [creating the project page yourself using the template](Projects/README.md), **don't reuse a project page template from a previous year.** We have made significant updates to the template to support auto-generation of project pages, so previous years' templates will not function properly. + + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** + - Make sure to fill out / update all of the information at the top of the README file (title, category, location, etc) + +- Remember to fill out the title for your project when using the [project creation issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) + +- Check the formatting on the Key Investigators list when creating a [project issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) (this is critical for page generation): + + `- Firstname Lastname (Affiliation, Country)` + +## Updating existing project pages + +Here are the steps using the GitHub web interface: + +1. Navigate to your project's `README.md` on the GitHub website. For instance, if you want to update a project called **YourProjectName**, visit the URL like the following: + + ``` + https://github.com/NA-MIC/ProjectWeek/blob/master/{{ event_name }}/Projects/YourProjectName/README.md + ``` + +2. Click the edit button, as shown in this screenshot: ![Screenshot 2023-06-12 10 43 35](https://github.com/NA-MIC/ProjectWeek/assets/25040869/ab01a7bf-c1e4-4c23-9aca-e2c6421ca530) + +3. You can now edit the page, add images by dragging and dropping, and more. + +4. Once done, click "Commit Changes", and follow the instructions to create a fork and a pull request to add your changes to the webpage. See this screenshot for reference: ![Screenshot 2023-06-12 10 50 50](https://github.com/NA-MIC/ProjectWeek/assets/25040869/180e81bb-d4f9-4f65-8569-a93192b2828e) + +## Videos in project pages + +Here are some steps to make sure all of your awesome videos render correctly: + +1. Videos added by drag and drop will render correctly when viewed through GitHub, but need some extra tweaks to work in the final generated website. + + + In your `README.md`, if you have a video link that looks like this: + + ``` + https://github.com/NA-MIC/ProjectWeek/assets/66890913/8f257f29-fa9c-4319-8c49-4138003eba27 + ``` + + Update it to: + + ```html + + ``` + +2. Links to externally hosted videos (such as YouTube) will need an iframe. + + Replace: + + ``` + https://youtu.be/ZWxE5QcGvE8 + ``` + + with + + ````html + + ```` diff --git a/PW44_2026_GranCanaria/Photos/PW44.gif b/PW44_2026_GranCanaria/Photos/PW44.gif new file mode 100644 index 000000000..65d402a6e Binary files /dev/null and b/PW44_2026_GranCanaria/Photos/PW44.gif differ diff --git a/PW44_2026_GranCanaria/Photos/PW44_01.jpg b/PW44_2026_GranCanaria/Photos/PW44_01.jpg new file mode 100644 index 000000000..38e262c50 Binary files /dev/null and b/PW44_2026_GranCanaria/Photos/PW44_01.jpg differ diff --git a/PW44_2026_GranCanaria/Photos/PW44_02.jpg b/PW44_2026_GranCanaria/Photos/PW44_02.jpg new file mode 100644 index 000000000..7eb591cc9 Binary files /dev/null and b/PW44_2026_GranCanaria/Photos/PW44_02.jpg differ diff --git a/PW44_2026_GranCanaria/Photos/PW44_03.jpg b/PW44_2026_GranCanaria/Photos/PW44_03.jpg new file mode 100644 index 000000000..ad5d8a2cc Binary files /dev/null and b/PW44_2026_GranCanaria/Photos/PW44_03.jpg differ diff --git a/PW44_2026_GranCanaria/Photos/PW44_04.jpg b/PW44_2026_GranCanaria/Photos/PW44_04.jpg new file mode 100644 index 000000000..774f0ab73 Binary files /dev/null and b/PW44_2026_GranCanaria/Photos/PW44_04.jpg differ diff --git a/PW44_2026_GranCanaria/Photos/PW44_Group.jpg b/PW44_2026_GranCanaria/Photos/PW44_Group.jpg new file mode 100644 index 000000000..ede36e773 Binary files /dev/null and b/PW44_2026_GranCanaria/Photos/PW44_Group.jpg differ diff --git a/PW44_2026_GranCanaria/Projects/AModularPipelineForQualityControlledMultiModelBoneSegmentationInCtImaging/README.md b/PW44_2026_GranCanaria/Projects/AModularPipelineForQualityControlledMultiModelBoneSegmentationInCtImaging/README.md new file mode 100644 index 000000000..7ce975d03 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/AModularPipelineForQualityControlledMultiModelBoneSegmentationInCtImaging/README.md @@ -0,0 +1,117 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: A Modular Pipeline for Quality-Controlled Multi-Model Bone Segmentation in CT Imaging +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Hamid Alavi + affiliation: University of Twente + country: The Netherlands + +--- + +# Project Description + + + + +This project focuses on developing a pipeline for large-scale bone segmentation from CT images, with a focus on quality control and human correction. + +State-of-the-art segmentation models such as TotalSegmentator, MOOSE, nnU-Net, and MONAI Auto3DSeg provide powerful tools for anatomical segmentation. However, their outputs vary in label definitions, formats, and performance across anatomical structures. Moreover, pretrained models do not cover all structures (e.g., individual tarsal bones), and segmentation errors can occur due to anatomical variability, pathology, or imaging artifacts. + +Manual verification of all segmentations is impractical for large datasets. Therefore, this project proposes a system that integrates multiple segmentation models, standardizes their outputs, automatically evaluates segmentation quality using consistency-based metrics, and enables human-in-the-loop correction only for unreliable cases. + +The system is structured into three independent stages: +(1) Multi-model segmentation, +(2) Automatic quality control (QC), and +(3) Human-in-the-loop correction. + + + +## Objective + + + + +The main objective is to build a pipeline that: + +- Integrates multiple pretrained and custom bone segmentation models +- Converts all model outputs into a common label schema +- Automatically detects unreliable segmentations by calculating the inconsistency between segmentation +- Assigns a quality score per bone per scan +- Minimizes expert workload by sending only low-quality segmentations for manual correction +- Supports interactive correction using 3D Slicer and SlicerNNInteractive + +Ultimately, the goal is to produce a open-access, high-quality, standardized bone segmentation dataset through [https://www.bonehub.eu/](https://www.bonehub.eu/) + + + +## Approach and Plan + + + + +The system is divided into three separate stages. + +**Stage 1 — Multi-Model Segmentation** +Goal: Generate and store segmentation outputs in a unified format. + +- Wrap pretrained tools (e.g., TotalSegmentator, MOOSE) and custom models (nnU-Net, Auto3DSeg) using a standardized interface. +- Convert all outputs into a common label schema +- Use a segmentation pipeline capable of: + - Running multiple models + - Applying optional input augmentations (rotation, noise, intensity variations) + - Produce multiple segmentation outputs per case (different models × augmentations). + - Store all segmentation results for later analysis. +- This stage performs no quality evaluation. + +**Stage 2 — Automatic Quality Control (QC)** +Goal: Detect unreliable segmentations + +- This stage analyzes only the segmentations produced in Stage 1. +- QC is based on: Compare different segmentations on the same image (e.g., Dice score). +- For each bone and scan, QC metrics are combined into a final QC score, which is stored for later use. No segmentation models are executed in this stage. + +**Stage 3 — Human-in-the-Loop Correction** +Goal: Efficiently correct only unreliable segmentations. + +- Select segmentations with QC scores beyond a defined threshold. +- Extract a region of interest (ROI) around the target bone to reduce volume size. +- Load ROI image and mask into 3D Slicer with SlicerNNInteractive. +- Allow users to correct segmentations via prompt-based interactions. +- Map corrected masks back to full-volume space and replace the original segmentation artifacts. + + + +## Progress and Next Steps + + + +We will make the codes available at [https://github.com/BoneHub/BoneHub-Segmentation](https://github.com/BoneHub/BoneHub-Segmentation) + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ + diff --git a/PW44_2026_GranCanaria/Projects/AddingDicomSupportInformationToExtensionManager/README.md b/PW44_2026_GranCanaria/Projects/AddingDicomSupportInformationToExtensionManager/README.md new file mode 100644 index 000000000..ab7a98b89 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/AddingDicomSupportInformationToExtensionManager/README.md @@ -0,0 +1,94 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Adding DICOM Support Information To Extension Manager +category: DICOM +presenter_location: + +key_investigators: + +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + + +Currently DICOM support in extensions is written to a json file in the slicer repo ([DICOMExtensions.json](https://github.com/Slicer/Slicer/blob/main/Modules/Scripted/DICOM/DICOMExtensions.json)), however this information is not frequently updated as new extensions are added, unmaintained extensions removed, and additional modality support added to extensions. + +## Objective + + + + +1. Add additional metadata to the extension index to describe DICOM support implemented in each extension. +2. During DICOM import if no DICOM plugins are able to load the modality, then query the list of available extensions to find an extension that can import the data. +3. Also add additional metadata information (multiple categories, soft dependencies, etc). + + + +## Approach and Plan + + + + +1. Update Slicer ExtensionsIndex to include DICOM related metadata, update schema ([See branch here](https://github.com/Sunderlandkyl/ExtensionsIndex/tree/dicom_support_rule)) +2. Update [Slicer Package Manager](https://github.com/girder/slicer_package_manager) to handle additional DICOM support metadata + - This can be done by specifying a [rule-engine](https://pypi.org/project/rule-engine/) string that can be used to define extension compatibility with Modality and SOPClassUID. + - Ex: + ```"(Modality == 'SEG') or (SOPClassUID == '1.2.840.10008.5.1.4.1.1.30') or (Modality == 'SR' and (SOPClassUID == '1.2.840.10008.5.1.4.1.1.88.22' or SOPClassUID == '1.2.840.10008.5.1.4.1.1.88.33'))"``` +3. Update 3D Slicer core to query the list of available extensions to find an extension that can handle the current modality. ([See branch here](https://github.com/Sunderlandkyl/Slicer/tree/dicom_plugin_extension_check)) +4. Integrate this PR with other efforts to update the extension manager (extensions tiers, multiple categories, etc. - see [https://github.com/girder/slicer_package_manager/pull/124](https://github.com/girder/slicer_package_manager/pull/124)) +5. Update SlicerIDCBrowser? +6. Need to revisit QuantitativeReporting DICOM plugin - it won't be able to load segmentation of a SM image! + + +## Progress and Next Steps + + + +1. Support for additional metadata types in json/s4ext and Girder: + - dicom_support_rule: Logical expression that can be evaluated to suggest extensions that are able to handle a specific DICOM file. + - keywords: Additional terms that can be searched when filtering extensions + - tier: Rebased version of Jc's branch that adds extension support "tier" + - reccomendations: List of reccomended extensions that should be installed by the user. Not including build dependencies + +2. Evaluation of dicom_support_rule to suggest extensions from within Slicer + - Added logic to the DICOM module so that when the loading of an extension fails, we suggest that the user install an extension that can load their data. + +# Illustrations + + + +### Extension suggestion +![](https://github.com/user-attachments/assets/23334375-e646-4e1b-9437-091c561410c0) + +### Girder contents +![](https://github.com/user-attachments/assets/6dace467-46a2-4ee6-bd02-3029f130c8d0) + +# Background and References + + + +### Development branches +- [Sunderlandkyl/slicer_package_manager - additional_metadata](https://github.com/Sunderlandkyl/slicer_package_manager/tree/additional_metadata) +- [Sunderlandkyl/Slicer - additional_extension_metadata](https://github.com/Sunderlandkyl/Slicer/tree/additional_extension_metadata) + +### Other links +- [https://pypi.org/project/rule-engine/](https://pypi.org/project/rule-engine/) diff --git a/PW44_2026_GranCanaria/Projects/CastAStandardForRealTimeFrontEndIntegrationOfHealthcareApplication/README.md b/PW44_2026_GranCanaria/Projects/CastAStandardForRealTimeFrontEndIntegrationOfHealthcareApplication/README.md new file mode 100644 index 000000000..7fc76895e --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/CastAStandardForRealTimeFrontEndIntegrationOfHealthcareApplication/README.md @@ -0,0 +1,229 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Cast - A Standard for Real-Time Front-End Integration of Healthcare Application +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Martin Bellehumeur + affiliation: Radical Imaging + country: Germany + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +--- + +# Project Description + + + + +Standardize Real-Time Front-End Integration of Healthcare Application + + + +## Objective + + + + +1. Continue the development front-end integration of OHIF and Slicer. +2. Use the standard FHIRcast websocket hub messaging infrastructure for non-FHIR related data/events and real-time front-end integration. +3. Enable multi-user workflows. +4. Invite and support 3D Slicer developers who want to connect to Cast/FHIRCast. + + +## Approach and Plan + +1. Add Cast hub API to Slicer Web Server with a [AI prompt that generates the hub](https://github.com/mbellehumeur/cast/blob/main/cast-hub-ai-prompt). + +2. Add a Cast client to slicer with a [AI prompt that generates the client service](https://github.com/mbellehumeur/cast/blob/main/cast-hub-ai-prompt). + +3. Implement events: + * patient-open/close + * imagingstudy-open/close + * annotation-update (measurements,markups,...) + * sceneview-update + +4. Do some scene mirroring using scene-update OHIF/OHIF and OHIF/3DSlicver + +5. Make a small tutorial. + * Use [Test bench for Project Week 44](https://cast-hub-g6abetanhjesb6cx.westeurope-01.azurewebsites.net/api/hub/admin) and [client](https://na-mic-projectweek44-g0g4a5c5dgc5dcf3.westeurope-01.azurewebsites.net/) + * Invite and support developers to connect their application. + + + + +## Introduction + +Cast is a standard protocol for real-time client to client event communication across healthcare applications. Built upon the foundational architecture of FHIRCast, Cast extends beyond FHIR-specific data and context management to support a wide range of healthcare data formats, user interactions, controller inputs and event types. +DICOM, DICOMweb, FHIR and HL7v2 are server to server and client to server protocols. + +Cast is a client to client protocol. Client to client protocols differ because they often deal with temporary objects such as user interactions. Even when FHIR or DICOM data exchange is exchanged, it is usually to refer an existing object or initiate a new object that may or may not be saved to the server. For example, a DICOM annotation can be communicated but may or may not become part of a DICOM structured report. + +Cast serves as an umbrella standard that encompasses specialized variants such as FHIRCast (for FHIR context management), DICOMCast (for DICOM data exchange), NAVICast (for surgical navigation), and other domain-specific implementations. All variants share the same core infrastructure while defining specialized event types for their domains (see [Cast Ecosystem](#cast-ecosystem) below). + + + + + + + +## Progress and Next Steps + + + + +1. Getting started: + - Open the [OHIF client](https://na-mic-projectweek44-g0g4a5c5dgc5dcf3.westeurope-01.azurewebsites.net/) and note your user name in the top right corner. + - In the sprocket icon, open the Cast admin portal. Note the subscribtion for your subscriber (application) and the topic matching your user. + - In the sprokect icon, open the Cast test client. The test client will have the topic prefilled with you user. + - Click the Subscribe button. The subscription will appear in the admin portal. + - Click the Publish button. The imagingstudy-open event will cause the OHIF client to open the study in the message. + - Change the event to imagingstudy-close and Publish. The OHIF client will close the study. + + - Open a second OHIF client on the same computer or on a tablet. It will show up in the admin portal with its user. + - Open the conference portal in the sproket icon. Choose a title and choose both the PC and tablet user. + - Check that all three clients receive and send imaging-study open/close messages. + +Regarding authentication: Most 3D slicer integrations may not need user management. In that case, you can set the Cast API in single-user mode using the reset buttom on the bottom right. This will make the mock authentication to always return user id/topic 'SINGLE-USER'. In this way, all applications work on the same topic. + + +![](https://github.com/user-attachments/assets/1c38e1f3-c415-44f6-9888-f370684bb29c) + + +# Illustrations + +Cast API in Web Server module: + + ![](https://github.com/user-attachments/assets/9b4eff43-739f-4785-8ce1-3c0c1c3a8a53) + +Accessing the admin and test client pages from the test workbench: +![](https://github.com/user-attachments/assets/4b261445-c33e-465e-b3c3-096cbe5dd2af) + + +Cast admin page: + +![](https://github.com/user-attachments/assets/5e6b77d3-3400-4efd-a271-f42863ff115f) + + + + + + +Test client: + + +![](https://github.com/user-attachments/assets/4f7a6121-2864-47f2-a877-81f18eb501a2) + + + + +Conference portal: + +![](https://github.com/user-attachments/assets/0b7894dd-697a-4444-8d47-5f8ebd92d950) + + + + + + + + Three users working on the same annotation: + + + + + + + Loading a 3D Slicer scene view into OHIF with the 'GET' feature: + + + + + + Cast components: + + + + ![](https://github.com/user-attachments/assets/fa278c4d-4199-4a19-807e-41aa19acb59a) + + + + +_No response_ + + + +# Background and References + +## Background + +Healthcare environments sometimes involve multiple specialized applications working together to support clinical workflows. These applications need to communicate and coordinate in real-time, sharing events such as user interactions, data exchanges, state changes, and workflow transitions. A typical scenario is radiology reporting where a worklist, viewer, reporting and EMR integrate to produce the diagnostic report. This workflow is defined in the IHE Integrated Reporting Applications profile. + +FHIRcast provides a solid foundation for FHIR-based context management, focusing specifically on synchronizing FHIR resource context across applications. However, the healthcare ecosystem includes many non-FHIR data formats, such as DICOM, openEHR and use cases that extend beyond context management such has navigation controllers, VR controllsers, joysticks and footswitches. Cast addresses this by providing a flexible, extensible framework that supports: + +- **User Interaction Events**: Mouse clicks, keyboard input, 6DOF controller input, navigation, UI state changes +- **Data Exchange Events**: FHIR, DICOM data synchronization (potentially called DICOMCast), HL7 V2 messages, proprietary formats +- **Workflow Events**: Task assignments, status updates, notifications +- **Any Custom Event Types**: Domain-specific events defined by applications + +Cast supports **bi-directional WebSocket communication**. This enables low-latency, "gaming style" interactions where applications can exchange events in real-time with minimal delay, supporting use cases such as collaborative viewing, synchronized navigation, and interventional workflows that require immediate feedback and coordination. + +Cast also supports **collaborative multi-user workflows** through the hub's ability to group users together within sessions. The hub can coordinate multiple users, allowing them to share events and synchronize their applications in real-time. This enables scenarios such as tumor board meetings, where multiple radiologists and clinicians can simultaneously view and interact with the same DICOM study, with measurements, annotations, and navigation synchronized across all participants own viewers. + + +The hub-based architecture provides **flexible integration** because applications do not need to connect directly to each other—they only need to reach the hub. This enables applications running on different platforms and locations to seamlessly participate in the same workflow. For example, a 3D Slicer application running on trame in the cloud can communicate with a mobile device application, a web-based viewer, or local camera control , all through the hub without requiring direct network connections between them. + +### Cast Ecosystem + +Cast serves as an umbrella standard that encompasses specialized "Cast" variants for different healthcare domains and use cases: + +``` +┌──────────────────────────────────────────────────────────────────────────────────────────┐ +│ │ +│ Cast │ +│ (Core Standard) │ +│ │ +│ ┌──────────────────┐ ┌──────────────────────┐ ┌──────────────────┐ ┌────────────┐ │ +│ │ │ │ │ │ │ │ │ │ +│ │ FHIRCast │ │ DICOMCast │ │ IGT Cast │ │ Other │ │ +│ │ │ │ │ │ │ │ Cast │ │ +│ │ FHIR Context │ │ DICOM Data Exchange │ │ Surgical │ │ Variants │ │ +│ │ Management │ │ (Front-end) │ │ Navigation │ │ │ │ +│ │ │ │ │ │ │ │(Extensible)│ │ +│ └──────────────────┘ └──────────────────────┘ └──────────────────┘ └────────────┘ │ +│ │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + + + + + +## References + +[https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/OHIFSlicerBridge/](https://projectweek.na-mic.org/PW33_2020_GranCanaria/Projects/OHIFSlicerBridge/) + + +- **FHIRcast**: [http://hl7.org/fhir/fhircast.html](http://hl7.org/fhir/fhircast.html) - The foundational standard upon which Cast is based. Note: FHIRcast focuses on FHIR context management, while Cast extends beyond context to support any type of event including user interactions and DICOM data exchange (DICOMCast). +- **HL7 FHIR**: [http://hl7.org/fhir/](http://hl7.org/fhir/) - Fast Healthcare Interoperability Resources +- **WebSocket**: [RFC 6455](https://tools.ietf.org/html/rfc6455) - The WebSocket Protocol +- **DICOM**: [https://www.dicomstandard.org/](https://www.dicomstandard.org/) - Digital Imaging and Communications in Medicine + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/ClaudeScientificSkillForImagingDataCommons/README.md b/PW44_2026_GranCanaria/Projects/ClaudeScientificSkillForImagingDataCommons/README.md new file mode 100644 index 000000000..0205adaa7 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/ClaudeScientificSkillForImagingDataCommons/README.md @@ -0,0 +1,123 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: claude-scientific-skill for Imaging Data Commons +category: AI +presenter_location: + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Lalith Kumar Shiyam Sundar + affiliation: LMU + country: Germany + +- name: Andras Lasso + affiliation: Queen's U + country: Canada + +- name: Leonard Nürnberg + affiliation: AIM Lab + country: USA + +- name: Michael Halle + affiliation: SPL/BWH + country: USA + +- name: Justin Kirby + affiliation: Frederick National Lab + country: USA + +- name: Leonard Nürnberg + affiliation: AIM Lab + country: USA + +--- + +# Project Description + + + + +Agent Skills are folders of instructions, scripts, and resources that agents can load when relevant to perform specialized tasks. [claude-scientific-skills](https://github.com/K-Dense-AI/claude-scientific-skills) introduces a development pattern to describe such skills for tools and resources usable in scientific research via a human-readable document, accompanied by code samples and recipes covering key functionality of the resource. Further, the company maintaining that repository makes the resulting skills accessible via MCP server, which could be connected with an agentic dev platform to improve quality of responses. The goal of this project is to add a new skill to the aforementioned repo to cover [Imaging Data Commons](https://learn.canceridc.dev/). + + + +## Objective + + + + +1. IDC skill is available +2. Feedback and use cases collected from the community + + + +## Approach and Plan + + + + +1. Analyze existing skills, understand best practices. +2. Develop the IDC skill. +3. Submit a PR with the IDC skill. +4. Compare responses of LLM with and without using the `claude-scientific-skills` MCP server. +5. Evaluate usability of the skill using the questions from IDC forum, or any other questions from the community. + + + +## Progress and Next Steps + + + +**It is best to use the standalong skill available at [https://github.com/ImagingDataCommons/idc-claude-skill](https://github.com/ImagingDataCommons/idc-claude-skill) to reduce LLM confusion!** + +1. Set up Claude.AI with the `claude-scientific-skills` MCP server, experiment. +2. Started setting up the skill layout and deciding what should be covered. +3. Submitted PR with the initial skill: [claude-scientific-skills PR #35](https://github.com/K-Dense-AI/claude-scientific-skills/pull/35) (this has now been merged!) +4. Published standalone skill [https://github.com/ImagingDataCommons/idc-claude-skill](https://github.com/ImagingDataCommons/idc-claude-skill) +5. Discussed with Mike (who merged it with his own skill for IDC!); suggestions for improvement: + * keep the main skill small, break out details into references + * look into Mike's skill for managing skill versioning + * need to investigate improvements to how IDC BigQuery parquet files are organized, noted in [https://github.com/ImagingDataCommons/etl_flow/issues/130](https://github.com/ImagingDataCommons/etl_flow/issues/130) + * need to work on idc-index improvements: publish indices in GCS bucket ([idc-index #229](https://github.com/ImagingDataCommons/idc-index/issues/229)), add radiomics features table ([idc-index #230](https://github.com/ImagingDataCommons/idc-index/issues/229)), support search via remote parquet file ([idc-index #331](https://github.com/ImagingDataCommons/idc-index/issues/231). + * it is a known issue that Claude struggles dealing with too many skills (incorrect skill matching etc) +6. Tested to address use case from Leo (how many CT scans does NLST have, how many of those are segmented with TotalSegmentator); lessons learned: + * GPT-4o model is useless (eg, within the same response corrects its own mistake in one code snippet but not the other)! + * GPT-5-codex was able to answer the questions correctly, supported by correct python code, from the first try +7. Discussed usage issues with K-Dense-AI devs (via slack) + * they are aware of Claude struggling when number of skills grows over 300 (although in my experience, even 140+ seems to be too much already, at least when accessed via their MCP) + * discussed issues related to managing skill independently vs as part of their repo + * see [https://k-densecommunity.slack.com/archives/C09RL3JRBSB/p1769554262310839](https://k-densecommunity.slack.com/archives/C09RL3JRBSB/p1769554262310839) to join the discussion and learn more + + +# Illustrations + + + + +_No response_ + + + +# Background and References + +* standalone skill: [https://github.com/ImagingDataCommons/idc-claude-skill](https://github.com/ImagingDataCommons/idc-claude-skill) +* [IDC forum post on finding fractional DICOM SEG in IDC](https://discourse.canceridc.dev/t/locating-fractional-dicom-segmentations-in-idc/776) put together using the developed skill + +Coding agents learning materials: +* [agents.md](https://github.com/agentsmd/agents.md) +* [How to write a great agents.md: lessons from over 2,500 repositories](https://github.blog/ai-and-ml/github-copilot/how-to-write-a-great-agents-md-lessons-from-over-2500-repositories/) +* [everything-claude-code](https://github.com/affaan-m/everything-claude-code) + +Mike Halle's IDC skill (works with Claude Code and Claude platform/web/mobile) + * [https://github.com/mhalle/idc-skill/](https://github.com/mhalle/idc-skill/) + * [https://github.com/mhalle/idc-skill/releases/latest/download/idc-skill.skill](https://github.com/mhalle/idc-skill/releases/latest/download/idc-skill.skill) + + diff --git a/PW44_2026_GranCanaria/Projects/CollaborativeArApplicationUsingOpenigtlink3DSlicerAndMagicLeap2/README.md b/PW44_2026_GranCanaria/Projects/CollaborativeArApplicationUsingOpenigtlink3DSlicerAndMagicLeap2/README.md new file mode 100644 index 000000000..ae7854a86 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/CollaborativeArApplicationUsingOpenigtlink3DSlicerAndMagicLeap2/README.md @@ -0,0 +1,113 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Collaborative AR application using OpenIGTLink, 3D Slicer and Magic Leap 2 +category: VR/AR and Rendering +presenter_location: + +key_investigators: + +- name: Alicia Pose Díez de la Lastra + affiliation: Universidad Carlos III de Madrid + country: Madrid, Spain + +- name: Javier Pascau + affiliation: Universidad Carlos III de Madrid + country: Madrid, Spain + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Ron Kikinis + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +In previous Project Weeks, we showed a way to communicate Microsoft HoloLens 2 with 3D Slicer using OpenIGTLink protocol. The current solution is implemented in a 3 elements system. It is composed by A Microsoft HoloLens 2 headset, the Unity software, and the 3D Slicer platform. The HoloLens 2 application is not directly built on the device, but remotely transferred from Unity in real time using Holographic Remoting. The communication workflow is represented in the following image: + +![image](https://github.com/NA-MIC/ProjectWeek/assets/66890913/6be8aff6-c4e8-48f1-a5ce-dfebff0dc0df) + +The results of that work are publicly available at [this GitHub repository](https://github.com/BSEL-UC3M/HoloLens2and3DSlicer-PedicleScrewPlacementPlanning). + + +In more recent Project Weeks we evaluated the transferability of the aforementioned project to other AR devices. Specifically, we focused on the VARJO XR-3 headset, obtaining fast and high-quality results: + + + + + +This time, we aim to follow a similar approach, this time with Magic Leap 2, with the ultimate goal of creating a colaborative application between multiple headsets communicating through OpenIGTLink. + + + +## Objective + + + + +1. Objective A: Adapt previous Unity apps to Magic Leap 2. +2. Objective B: Exchange information between Magic Leap 2 and 3D Slicer. + + + +## Approach and Plan + + + + +1. Adapt previous Unity apps to Magic Leap 2. +2. Test Holographic Remoting in Magic Leap 2. +3. Test the exchange of information between Magic Leap 2 and 3D Slicer. +4. Add a new headset to the system and try to exchange information between HoloLens 2 and Magic Leap 2 + + + +## Progress and Next Steps + + + + +1. Bring both a HoloLens 2 and a Magic Leap 2 headsets to PW. +2. Also tested with Viture Luma Ultra with Neckband - confirmed that Unity SDK can be used to develop Viture apps and should be able to use core code as Magic Leap 2 + * Viture [native SDK](https://www.viture.com/developer) released on Thursday + + + + +# Illustrations + + +## Alicia with Viture Luma Ultra and Pro Neckband + +![](https://github.com/user-attachments/assets/b52efeb7-17c6-466e-a74a-4b0d3ed2c2b2) + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/DebugParemeterNodeAndQtmrmlcomboboxSetToNoneAfterAddingToASubjectHierarchy/README.md b/PW44_2026_GranCanaria/Projects/DebugParemeterNodeAndQtmrmlcomboboxSetToNoneAfterAddingToASubjectHierarchy/README.md new file mode 100644 index 000000000..dfc96ade9 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/DebugParemeterNodeAndQtmrmlcomboboxSetToNoneAfterAddingToASubjectHierarchy/README.md @@ -0,0 +1,135 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Subject hierarchy combobox selection bug +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Chi Zhang + affiliation: Texas A&M University College of Dentistry + country: USA + +- name: Csaba Pintér + affiliation: Ebatinca SL + country: Spain + +--- + +# Project Description + + + + +Bug description: +1. Sync a parameter node to a qtMRLComboBox node. +2. Add the parameter node to a subjectHierarchy folder. Both the parameter Node and the qtMRMLComboBox node set to 'None'. + + [Slicer Discourse thread](https://discourse.slicer.org/t/slicer-5-10-qmrmlsubjecthierarchycombobox-turned-to-none-but-no-issue-with-5-8-1/45203/5) + + + +## Objective + + + + +1. Fix the bug + + + + +## Approach and Plan + + + + +Here is a sample script that connect a qtMRMLComboBox node to a parameter node, and creating a subjectHierarchy folder to add the parameter node to it. Both will be set to 'None' + +``` +import slicer, qt + +dialog = qt.QDialog(slicer.util.mainWindow()) +layout = qt.QVBoxLayout(dialog) + +shComboBox = slicer.qMRMLSubjectHierarchyComboBox() +shComboBox.nodeTypes = ["vtkMRMLMarkupsFiducialNode"] +shComboBox.noneEnabled = True +shComboBox.setMRMLScene(slicer.mrmlScene) + +layout.addWidget(qt.QLabel("Select a fiducial (SubjectHierarchyComboBox):")) +layout.addWidget(shComboBox) + +runButton = qt.QPushButton("Move selected node into folder (triggers reset)") +layout.addWidget(runButton) + +shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) + +def printState(tag): + print(f"\n[{tag}]") + print(" shComboBox.currentItem():", shComboBox.currentItem()) + print(" shComboBox.currentNode():", repr(shComboBox.currentNode())) + +# Add the parameter node to a folder +def onRun(): + currentItemID = shComboBox.currentItem() + if currentItemID == 0: + print("No selection (currentItemID==0)") + return + + printState("BEFORE MOVE") + + folderItemID = shNode.CreateFolderItem(shNode.GetSceneItemID(), "SHCombo_TestFolder") + shNode.SetItemParent(currentItemID, folderItemID) + + printState("AFTER MOVE (post SetItemParent)") + +runButton.clicked.connect(onRun) + +dialog.show() +``` + +![](https://github.com/user-attachments/assets/fb1dc7c1-f782-49a3-9688-9da18e58a1c1) + +``` +[AFTER MOVE (post SetItemParent)] +shComboBox.currentItem(): 0 +shComboBox.currentNode(): None +parameter.selectedFiducial: None +``` + +If qt combobox signal is blocked using `qt.QSignalBlocker`, then parameter node will not be set to 'None' + + + +## Progress and Next Steps + + + +1. First finding is that the bug is simpler than seemed originally, and the parameter node is not needed, self-contained test script simplified +2. Actual bug confirmed: when an SH item is reparented in the logic that is selected in a view (tree view or combobox), then that selection is cleared. The reason is that when reparenting, the item is first removed then re-inserted under the new parent +3. [PR](https://github.com/Slicer/Slicer/pull/9009) opened about possible solution - behavior turns out to be wrong in a different way +4. Proper reparenting seems to involve wrapping the steps between `beginMoveRows` and `endMoveRows`. This will probably be the correct solution if we can me it work (first attempt is not robust, something is missing) + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/DefacingHeadAndNeckCtScansWhilePreservingLymphNodes/README.md b/PW44_2026_GranCanaria/Projects/DefacingHeadAndNeckCtScansWhilePreservingLymphNodes/README.md new file mode 100644 index 000000000..50fd39848 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/DefacingHeadAndNeckCtScansWhilePreservingLymphNodes/README.md @@ -0,0 +1,129 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Defacing head and neck CT scans while preserving lymph nodes +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Steve Pieper + affiliation: Isomics + country: Inc., USA + +- name: Ron Kikinis + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +For the Lymph Node Quantification project we want to analyze and share head and neck scans but don't want to share PHI. + + + +## Objective + + + + +1. Use generic segmentation tools (CADS, SuperSynth, etc.) +2. Identify facial feature landmarks. Find existing models or examples that identify features like corners of eyes and mouth +3. Figure out which features can be reliably detected that define a reasonable identity removal but leaving the nodes in the cheeks and jaw +4. Decide what bluring or removal to do in the identified face area + + + +## Approach and Plan + + + + +* Figure out if existing MR defacing tools can be used for this, perhaps by mapping the CT to MR +* Explore the state of the art in [Facial animation systems](https://research.nvidia.com/labs/amri/projects/gaia/) +* A more recent paper referencing the Milchenko paper [Deidentification of CT head images](https://pmc.ncbi.nlm.nih.gov/articles/PMC10406725/pdf/12021_2023_Article_9631.pdf) + + +## Progress and Next Steps + +* Created mockups of the areas to be obscured +* Discussed the core issues with several project week attendees (Thank you Andras, David, Alexandra!) +* Created plan to test the nnU-Net based landmark detection system on the lymph node preserving face and ear definition + * Help test the new extension on private data +* Test existing public data on David's Zenta defacing model for CT data +* Consider a custom version of Zenta's model that preserves lymph nodes (submandibular and submental) + +### Mockups + +### Area to be defaced + +#### Closed curve snapped to model surface + +![](https://github.com/user-attachments/assets/bb1f2597-e68e-416f-a87f-619629f95674) + +#### Dynamic modeling applied to suface wrap solidfy model + +![](https://github.com/user-attachments/assets/95876794-4d8e-414b-8e9e-f9361d6fe97a) + +#### Facial blur + +![](https://github.com/user-attachments/assets/b3c02fc2-2d83-4b3d-b2ce-e89b3186f67a) + +#### Extra blur + +![](https://github.com/user-attachments/assets/acb70748-22ba-45fd-a85e-954d9ac39d9c) + +#### Zenta's current solution + +![](https://github.com/user-attachments/assets/6f07147c-cb90-4fd1-8816-1b0946b5f8f4) + +![](https://github.com/user-attachments/assets/9917273d-7fe0-4654-94d7-49060f919b3e) + +### Open Questions + +* What are the state-of-the-art definitions for facial deidentitication and how do they intersect with our requirements? +* What primitives exist in the Slicer ecosystem that can be used as building blocks, such as landmark detection, image manipulation, and segmentation to make a robust pipeline. +* Can we use exising infrastructure or is it better to build something custom. + + + + + +# Illustrations + + + + +![](https://github.com/user-attachments/assets/b93bf4ce-485d-46c6-af8b-bda41197eb1b) + +We will use the [CTHead](https://drive.google.com/file/d/1a0tt9_Uu7whrYs2VKbBezwKi7gJGG823/view?usp=sharing) sample data for experiments. + +![](https://github.com/user-attachments/assets/86c88b58-2874-435f-90ef-111bb53fb808) + + +# Background and References + + + +* [Facial lymph nodes - Wikipedia](https://en.wikipedia.org/wiki/Facial_lymph_nodes) +* [Paper describing defacing by adding variable thickness epidermis](https://pmc.ncbi.nlm.nih.gov/articles/PMC3538950/pdf/nihms407349.pdf) + +## Inspirations + +### "Phantom of the Opera" deidentification: + +![](https://github.com/user-attachments/assets/9cf6fd85-43c6-4e12-9de7-a7ded3f9c485) + +### "Venetian Masquerade" deidentification: + +![](https://github.com/user-attachments/assets/c7aad156-f783-47bb-8e3a-125bbb1edb97) + +### "Traditional medical publication" deidentification: + +![](https://github.com/user-attachments/assets/f6d69bb7-3356-4334-813b-e6eea7862bb4) diff --git a/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/README.md b/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/README.md new file mode 100644 index 000000000..fbde4d1b8 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/README.md @@ -0,0 +1,388 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: 'DeformView: Quantitative Visualization of Non-Linear Deformation Fields for Use in + Image-Guided Neurosurgery' +category: Quantification and Computation +presenter_location: + +key_investigators: + +- name: Isabel Frolick + affiliation: McGill University + country: Canada + +- name: Elise Donszelmann-Lund + affiliation: McGill University + country: Canada + +- name: D. Louis Collins + affiliation: McGill University + country: Montreal Neurological Institute, Canada + +--- + +# Project Description + + + + +We have been developing DeformView, a visualization module for 3D Slicer that improves the interpretation of non-linear brain deformation (“brain shift”) during image-guided neurosurgery and as a training tool for inexperienced surgeons and researchers. +DeformView provides two dense, intuitive visualization maps: (1) a dense displacement magnitude map (mm), and (2) +a Jacobian determinant magnitude map representing local tissue expansion and compression (%). + +The proposed module combines scientifically derived, intuitive colour maps and voxel-wise +cursor pointer that directly displays displacement values on hover, a function not available in existing +Slicer tools, to improve user understanding and confidence. + +### Updated Our Module README + +We'll make our GitHub public for interested parties in the coming days. See GitHub Repo here: **[https://github.com/elisedl1/BrainshiftModule](https://github.com/elisedl1/BrainshiftModule)** + +If you have any questions/ issues, contact us at isabel.frolick@mail.mcgill.ca and/or elise.donszelmannlund@mail.mcgill.ca + +## Objective + + + + +1. **_Objective A._ Improve user experience** and stability by identifying and fixing bugs, refining interactions, and ensuring reliable performance across datasets. +2. **_Objective B._ Gather user feedback** from researchers and clinicians to guide the design of additional features, including potentially adding features to visualize registration error and uncertainty within the module). +3. **_Objective C._ Integrate transform grid/ glyph** visualizations directly into DeformView to provide complementary spatial context alongside dense deformation maps. + + + +## Approach and Plan + + + + +1. We will systematically test DeformView across representative datasets (focusing on IGNS-focused data - ReMIND, RESECT, BITE, etc.) and use cases to identify and resolve software bugs. We will ask attendees to use the module to identify common workflows, areas of improvement. We will also perform stress and destructive testing. +2. User-centered design and feedback: We will conduct structured feedback sessions with expert users, non-expert users, and clinicians, using our targeted questionnaires and short tasks to identify desired features and usability gaps. We will lead discussions with attendees to identify areas of improvement and feature prioritization. + +### Plan for Project Week +We have implemented the core functionality of the DeformView module, including dense deformation visualization, Jacobian-based expansion/compression maps, and voxel-wise readout on cursor hover. Initial testing confirms that primary visualization goals have been achieved, with only minor usability and stability issues remaining. + +We conducted a user study with 10 non-expert participants (average 2.9 years of imaging research experience) to evaluate module functionality. Participants compared DeformView to the existing 3D Slicer Transform Visualizer across four attributes: helpfulness in comprehension, interpretability, intuitiveness, and user confidence, using Likert ratings and the System Usability Scale. On average, DeformView was rated higher across all categories (mean: 4.1/5.0 vs 3.2/5.0), with statistically significant improvements in helpfulness (p=0.008) and intuitiveness (p=0.027). Overall, 80% of participants preferred DeformView over the existing module, confirming the value of our visualization approach. + +We plan to address the remaining bugs (that we know of), get user feedback, and engage in discussions with Slicer developers to optimize our module: +1. Colour Map and Legend Modifications +- Fix legend scale reload bug +- Resolve remaining default colour map behaviour, consistent default colour levels when loading new maps +- Interaction between legend and colour level/window controls +- Clarity of colour map loading and switching- add descriptive text under the “Color Map” selector + +2. Jacobian-Specific Visualization Controls +- Jacobian colour legend labels +- Set Jacobian window and level to constant values to ensure consistent interpretation + +3. User Interface and Readability Improvements +- Adjust cursor text size for improved readability +- Implement a full reset of default settings, not limited to window/level + + +## Progress and Next Steps + + + + + + +### Introduced the 'Increment Transform' feature + +The transformation is incrementally applied to the moving image over 10 discrete steps. This creates a sliding scale of the transform applied to the underlying image at discrete quantities (ie: 0.1x full transform, 0.2x final transform, etc.) + + + + + + + +### Fixed Remaining (Known) Bugs and Functional Errors + +1. Colour Map and Legend Modifications +- Fix legend scale reload bug +- Resolve remaining default colour map behaviour, consistent default colour levels when loading new maps +- Interaction between legend and colour level/window controls +- Clarity of colour map loading and switching- add descriptive text under the “Color Map” selector + +2. Jacobian-Specific Visualization Controls +- Jacobian colour legend labels +- Set Jacobian window and level to constant values to ensure consistent interpretation + +3. User Interface and Readability Improvements +- Adjust cursor text size for improved readability +- Implement a full reset of default settings, not limited to window/level + + + + + +### Started Integration of DeformView and Transforms Visualizer Module + +Local UI changes have begun, no functionality is attached yet. Implementing a 'sparse' tab (current functionality) and a 'dense' tab (added functionality) - but open to feedback. + +| Sparse Visualization Tab UI | Dense Visualization Tab UI | +|---|---| +| ![](https://github.com/user-attachments/assets/b0cc065d-97f5-4f22-a7b6-7b2c2ff61e64)
**Sparse Visualization Tab UI (current Transforms Visualizer functionality)** | ![](https://github.com/user-attachments/assets/8a12f53f-4d10-481e-8b64-d89fff406e24)
**Dense Visualization Tab UI (DeformView added functionality)** | + + + +### Slicer Build on Mac (Silicon) + Documentation + +Worked with Steve to get a local build of 3D Slicer on OSX (Silicon). Also wrote documentation to configure, build, run, and debug 3D Slicer from source on macOS (ARM64) using Qt6 and Xcode. + +![](https://github.com/user-attachments/assets/56d00342-6869-43ad-8035-f480e3ae3f6e) + + +#### Current Recipe for Slicer + +##### My config: +``` +frolick@IsabelMacBook % sw_vers +ProductName: macOS +ProductVersion: 15.7.3 +BuildVersion: 24G419 + +frolick@IsabelMacBook % qmake --version +QMake version 3.1 +Using Qt version 6.10.1 in /opt/homebrew/lib + +frolick@IsabelMacBook % cmake --version +cmake version 3.31.5 + +CMake suite maintained and supported by Kitware (kitware.com/cmake). + +``` + +##### Prerequisites + +###### Install Qt6 + +Verify Qt6 Install +``` +qmake --version +``` + +1. If not installed, Install Qt6 (via Homebrew) +``` +brew install qt@6 +``` +2. Add Qt6 to shell environment: +``` +echo 'export PATH="/opt/homebrew/opt/qt@6/bin:$PATH"' >> ~/.zshrc +echo 'export CMAKE_PREFIX_PATH="/opt/homebrew/opt/qt@6:$CMAKE_PREFIX_PATH"' >> ~/.zshrc +``` +3. Reload shell: +``` +source ~/.zshrc +``` + +4. Verify Qt6 installation: +``` +qmake --version +``` + +###### Install Xcode Command Line Tools +``` +xcode-select --install +``` + +###### Install XCode (not strictly necessary (?) but I couldn't do it without XCode Desktop) + +If installing full Xcode from the App Store: + +1. Install Xcode from App Store + +2. Set it as the active developer directory: +``` +sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer +``` +3. Accept the license: +``` +sudo xcodebuild -license accept +``` + +4. Verify: +``` +xcodebuild -version +xcode-select -p +``` + +Else, check the SDK path used by Xcode: +``` +xcrun --show-sdk-path +``` +--- + +##### Directory Setup + +Create build directories and set ownership: +``` +sudo mkdir -p /opt/scmake /opt/scd +sudo chown -R $(whoami) /opt/scmake /opt/scd +``` + +##### Create Build Script + +``` +vim build_slicer.sh +``` + +Paste: +``` +#!/bin/bash + +# Configuration +SLICER_SOURCE_DIR="$HOME/slicer/latest/Slicer" +SLICER_BUILD_DIR="/opt/scd" +SLICER_SUPERBUILD_DIR="/opt/scmake" + +# Create directories +mkdir -p "$SLICER_BUILD_DIR" +mkdir -p "$SLICER_SUPERBUILD_DIR" + +# Clone Slicer source if it doesn't exist +if [ ! -d "$SLICER_SOURCE_DIR" ]; then + echo "Cloning Slicer repository..." + mkdir -p "$(dirname "$SLICER_SOURCE_DIR")" + git clone https://github.com/Slicer/Slicer.git "$SLICER_SOURCE_DIR" +fi + +cd "$SLICER_SUPERBUILD_DIR" + +# Configure with CMake +cmake \ + -DCMAKE_OSX_ARCHITECTURES=arm64 \ + -DSlicer_REQUIRED_QT_VERSION="6.10" \ # CHANGEME: Qt version (qmake --version) + -DCMAKE_BUILD_TYPE:STRING=Debug \ + -DSlicer_USE_SimpleITK:BOOL=OFF \ + -DSlicer_BUILD_I18N_SUPPORT:BOOL=OFF \ + -DSlicer_BUILD_DICOM_SUPPORT:BOOL=OFF \ + -DSlicer_VTK_SMP_IMPLEMENTATION_TYPE:STRING=Sequential \ + -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=15.7 \ # CHANGEME: OSX version (About this Mac -> macOS) + -DCMAKE_OSX_SYSROOT:STRING="$(xcrun --show-sdk-path)" \ + "$SLICER_SOURCE_DIR" + +# Build (use -j for parallel jobs, -k to keep going on errors) +make -j10 -k +``` + + +##### Run Build + +``` +cd /opt/scmake +chmod +x ./build_slicer.sh +./build_slicer.sh +``` + + +--- + +##### Running Slicer Locally + +###### Option 1: Navigate to the build directory, make, launch: +Executable path: /opt/scmake/Slicer-build/Slicer + +``` +cd /opt/scmake/Slicer-build +make -j8 #Optional rebuild to show local changes +./Slicer +``` + +###### Option 2: Run on XCode with Debugging + +1. Create a Dummy Xcode Project + Xcode requires an open project in order to attach a debugger. + 1. Open Xcode + 2. File → New → Project + 3. Create a Command Line Tool project (macOS) + 4. Enter an Organization Name (required!) + +2. Get the Slicer Process ID (PID) + 1. Open local Slicer through the build directory (following the steps in Option 1) + 2. In Python Interactor: + ``` + import os + os.getpid() + ``` + Copy the PID +3. Attach the Debugger + In Xcode: + 1. Debug → Attach to Process by Name or PID… + 2. Paste the PID + 3. Now you can attach process and use debugger functionality (breakpoints, etc.) as usual + + + +# Illustrations + + + + +**Displacement Magnitude Map** +Voxel-wise magnitude of non-linear deformation between preoperative T2-FLAIR MRI and intraoperative tumour resection T2-FLAIR, from Case 50 of the ReMIND dataset. Warmer colours indicate larger tissue displacement. + +![](https://github.com/user-attachments/assets/72cd8330-0b6c-4e1b-bed4-29fcaef86351) + +--- + +**Overlay of Displacement Magnitude (Colour Map) & Current 3D Slicer Transform Visualizer Module (Glyphs) + +The current Transform Visualizer module (core 3D Slicer module) visualizes deformation as glyphs (arrows), grid, and contour. When integrated together, it is more intuitive where deformation has occurred (DeformView) and the direction of deformation (Transform Visualizer). + +![](https://github.com/user-attachments/assets/7f059022-35c2-41d7-ae36-b223d832a4a6) + + +--- +**Jacobian determinant magnitude map** +Visual of the Jacobian map, where red indicates tissue expansion and blue is tissue compression, as a percentage. This is the same data as the above displacement magnitude example. + +![](https://github.com/user-attachments/assets/4b01baf2-ddc9-44da-b8c2-9a5b81b702b8) + + +--- + +**User Study Results** +Comparison of **DeformView** with the existing Transform Visualizer module (n=10) across four attributes: helpfulness, interpretability, intuitiveness, and user confidence (1–5 scale; higher scores indicate better performance). DeformView is rated higher across all categories, with significant improvements in helpfulness and intuitiveness. + +![](https://github.com/user-attachments/assets/22094efd-4df5-4a33-b21e-ed3e88a6c897) + + +#Contact +Any issues, questions, or inquiries - email us at isabel.frolick@mail.mcgill.ca and/or elise.donszelmannlund@mail.mcgill.ca + + +# Background and References + + + + +Miner, R. C. (2017). Image-guided neurosurgery. Journal of Medical Imaging and Radiation Sciences, 48(4), 328–335. + +Abhari, K., Baxter, J. S., Chen, E. C., Khan, A. R., Peters, T. M., De Ribaupierre, S., & Eagleson, R. (2014). Training for planning tumour resection: augmented reality and human factors. IEEE Transactions on Biomedical Engineering, 62(6), 1466–1477. + +King, F., Lasso, A., & Pinter, C. (2015, August 4). TransformVisualizer (Documentation/Nightly/Modules). 3D Slicer Wiki. [Link](https://www.slicer.org/wiki/Documentation/Nightly/Modules/TransformVisualizer) + +Vlachogianni, P., & Tselios, N. (2022). Perceived usability evaluation of educational technology using the System Usability Scale (SUS): A systematic review. Journal of Research on Technology in Education, 54(3), 392–409. + +Drouin, S., Kochanowska, A., Kersten-Oertel, M., Gerard, I. J., Zelmann, R., De Nigris, D., … & Collins, D. L. (2017). IBIS: an OR ready open-source platform for image-guided neurosurgery. International Journal of Computer Assisted Radiology and Surgery, 12(3), 363–378. + +Chung, M. K., Worsley, K. J., Paus, T., Cherif, C., Collins, D. L., Giedd, J. N., … & Evans, A. C. (2001). A unified statistical approach to deformation-based morphometry. NeuroImage, 14(3), 595–606. + +Juvekar, P., Dorent, R., Kögl, F., Torio, E., Barr, C., Rigolo, L., … & Kapur, T. (2024). REMiND: The brain resection multimodal imaging database. Scientific Data, 11(1), 494. + +Crameri, F., & Hason, S. (2024). Navigating color integrity in data visualization. Patterns, 5(5), 100972. doi:10.1016/j.patter.2024.100972 diff --git a/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/incrementVideo.mov b/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/incrementVideo.mov new file mode 100644 index 000000000..52e2f5bda Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/incrementVideo.mov differ diff --git a/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/increment_project_page.gif b/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/increment_project_page.gif new file mode 100644 index 000000000..925415862 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/increment_project_page.gif differ diff --git a/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/trimmed_demo.mov b/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/trimmed_demo.mov new file mode 100644 index 000000000..eea284fc6 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/DeformviewQuantitativeVisualizationOfNonLinearDeformationFieldsForUseInImageGuidedNeurosurgery/trimmed_demo.mov differ diff --git a/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/README.md b/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/README.md new file mode 100644 index 000000000..252582ffd --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/README.md @@ -0,0 +1,91 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: DMRI population analysis library bridge +category: Quantification and Computation +presenter_location: + +key_investigators: + +- name: Ebrahim Ebrahim + affiliation: Kitware + country: USA + +- name: Arthur Chakwizira + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +We've been working on a python library, currently (badly) named [abcdmicro](https://github.com/brain-microstructure-exploration-tools/abcd-microstructure-pipelines/), for diffusion MRI population analysis. Its goal is to make it easy to have the tools you need for processing population brain diffusion MRI in one convenient-to-set-up python package, with normally disparate processing steps getting linked together nicely. Slicer isn't where one would typically do large population analysis, but it is an excellent for visualizing examples while putting together a pipeline, and it's excellent for interacting with results. For this reason, I'd like to look into bridging abcdmicro with Slicer. + + + +## Objective + + + + +* Make it easy for someone who is building a dmri processing pipeline with abcdmicro to try out the steps of their pipeline in Slicer. + + + +## Approach and Plan + + + +* Create vtk-mrml-based versions of some of the [`Resources`](https://github.com/brain-microstructure-exploration-tools/abcd-microstructure-pipelines/blob/e1ca05eed77a9fcc3e934c4de7f6f43fbcf8bc1f/src/abcdmicro/resource.py) in abcdmicro and conversion utilities that allow them to be created and used. + + + +## Progress and Next Steps + + + + +[Added two example notebooks](https://github.com/brain-microstructure-exploration-tools/abcd-microstructure-pipelines/pull/117) to abcdmicro. This helps introduce the functionality. + +One notebook works up through NODDI estimation and tract segmentation for a particular subject: + +![NODDI estimate for a particular subject](noddi-example.png) + +![Tract segmentation example](tractseg-example.png) + +The other demonstrates multimodal population template construction: + +![FA and MD population templates](multimodal-template-example.png) + +In a discussion with Arthur, here are some future directions we identified as valuable: + +- Adding more pre-processing such as motion correction, gibbs ringing correction, etc. +- Incorporating strucural mri processing and handling co-registration +- Adding more microstructure models +- Support DICOM as an on-disk format, and consider having dicom-nifti conversion +- Adding (optional?) experimental parameters to DWI class (or a subclass?) to support things like time-dependent diffusion mri + +## Final state of things + +The outcome of this project was mostly discussions and ideas; here are a couple of last-minute updates to wrap things up: + +- The abcdmicro notebooks now include built-in downloading of suitable example data. Now anybody can try them without having ABCD data! [Here is a snapshot you can try right now](https://github.com/brain-microstructure-exploration-tools/abcd-microstructure-pipelines/tree/6da13394a10e912d50cdc9ea26dccc5cae0f6b65/notebooks), but once they are merged you should refer to the main branch. +- After some back-and-forth on different ways of approaching the Slicer bridge, [here is a (AI generated) way of approaching a `SlicerVolumeResource`](https://gist.github.com/ebrahimebrahim/57d4f7f2999b29138a9ec4146febb7f3). + + +Side story: Trying to install abcdmicro into the Slicer python environment immediatley raised ia problem: abcdmicro depends on TractSeg which depends on fury which depends on a version of vtk that conflicts with Slicer's. To install it we had to install abcdmicro with `--no-deps`, then install the deps manually, except for TractSeg. In the case of TractSeg it has to be installed with `--no-deps`, and then its deps have to be installed manually, with the exception of fury. We don't need fury in Slicer. This whole story could have been averted if TractSeg made fury an optional dependency; abcdmicro doesn't really need fury after all. But we can't rely on package maintainers to do things the exact way we want. This story relates to my other PW44 project! It would be useful to support some way of just forcibly skipping a `Requirement`. + + +# Background and References + + + +- [abcdmicro](https://github.com/brain-microstructure-exploration-tools/abcd-microstructure-pipelines/) is the library we have currently been working on for which I'd like to experiment with bridging with Slicer. + diff --git a/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/multimodal-template-example.png b/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/multimodal-template-example.png new file mode 100644 index 000000000..a84517a5e Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/multimodal-template-example.png differ diff --git a/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/noddi-example.png b/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/noddi-example.png new file mode 100644 index 000000000..f6fc66da9 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/noddi-example.png differ diff --git a/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/tractseg-example.png b/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/tractseg-example.png new file mode 100644 index 000000000..e63f9bce1 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/DmriPopulationAnalysisLibraryBridge/tractseg-example.png differ diff --git a/PW44_2026_GranCanaria/Projects/ExplorationOfFoundationModelsAndTheirEmbeddingsForOtherTasksUsingTheCloud/README.md b/PW44_2026_GranCanaria/Projects/ExplorationOfFoundationModelsAndTheirEmbeddingsForOtherTasksUsingTheCloud/README.md new file mode 100644 index 000000000..981a4dcb6 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/ExplorationOfFoundationModelsAndTheirEmbeddingsForOtherTasksUsingTheCloud/README.md @@ -0,0 +1,121 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Exploration of foundation models and their embeddings for other tasks using the cloud +category: Cloud / Web +presenter_location: + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital + country: USA + +- name: Andrey Fedorov + affiliation: Brigham and Women's Hospital + country: USA + +- name: Steve Pieper + affiliation: Isomics, Inc. + country: USA + +- name: Mike Halle + affiliation: Brigham and Women's Hospital + country: USA + +- name: Suraj Pai + affiliation: Brigham and Women's Hospital + country: USA + + +--- + +# Project Description + + + + +The popularity and use of foundation models (FMs) have exploded in recent years. Within the medical imaging field alone, numerous models have been developed to support various downstream tasks, including classification and segmentation. + +However, as a user, it's hard to understand the embeddings that the models produce. Also, it's hard to figure out: +1) which model to use, and +2) what tasks the model supports. + +In this project, we plan to explore how the cloud can help us understand these embeddings from various FMs. Recently, we have extracted embeddings from lung cancer tumors in the National Lung Screening Trial (NLST) CT dataset from 9 different models. We will use the latest features in the Google Cloud Platform to help us explore and understand these embeddings. + +We are most interested in: +1) how these embeddings can be visualized, and if clusters are visible, +2) if these embeddings can be used to find similar patients, and +3) if they can actually be used for other tasks + +Possible extensions to this work: +1) We could explore the embeddings that Google has provided from their pathology foundation model [here](https://research.google/blog/health-specific-embedding-tools-for-dermatology-and-pathology/) and [here](https://github.com/Google-Health/imaging-research/tree/master/path-foundation). +2) We could extend this exploration to lung ultrasound images, where visualizing the embedding space could help us choose representative and diverse images for expert annotation + +How could it relate to Slicer and Imaging Data Commons (IDC)? Given a sample patient image, we could retrieve the k-closest patients in IDC. + + + +## Objective + + + + +1. We will figure out how to store these embeddings in the cloud to enable a quick search and comparison. +2. Next, we will explore and visualize these embeddings. +3. Then, we will use these embeddings to perform image retrieval -- finding similar patients in the NLST collection. +4. Lastly, we will show how to use these embeddings for a downstream task. + + + + +## Approach and Plan + + + + +1. We will see if Google Cloud Platform BigQuery (BQ) can be used to store the embeddings: [vector-search-intro](https://docs.cloud.google.com/bigquery/docs/vector-search-intro) and [vector-index](https://docs.cloud.google.com/bigquery/docs/vector-index) +2. Next, we will create an interactive plot to explore embeddings and any clustering in a low-dimensional space. We will let the user click on points to open up an OHIF link with the original image data. +3. We will investigate whether vector search or a similarity search can be performed to find similar patients. + + + +## Progress and Next Steps + + + +1. We first explored the embeddings in a low-dimensiontal space using UMAP, but we couldn't see any visible clusters. +2. Then, we used BigQuery vector search from Google Cloud to try content-based image retrieval. This worked, but was not efficient. +3. Then, we decided to precompute distances between these embeddings. +4. We used Apache e-charts to show results from querying a patient and finding the top 5 matches. + +This project was further developed - see the following: +* https://github.com/ImagingDataCommons/nlst-sybil-connectome +* https://imagingdatacommons.github.io/nlst-sybil-connectome/ + +# Illustrations + + + +Overview of project: +![](https://github.com/user-attachments/assets/477f71aa-07b9-428b-b246-a3941e0733d6) + +Sample of connectome plot and showing the query image: +![](https://github.com/user-attachments/assets/cdee6e22-2c54-4395-a07f-1e08a1a70d31) + +Demonstration of content-based image retrieval: + + +# Background and References + + + +[Try this out yourself!](https://storage.googleapis.com/pw_44_demo/FM/site/index_connectome_with_urls.html?query=100158&matches=122866,131423,119358,134408,124261&model=FMCIB&bucket=pw_44_demo/FM) diff --git a/PW44_2026_GranCanaria/Projects/ExtendingSlicertEbpModuleForwardPlanningCapabilitiesAndImportExportInfrastructure/README.md b/PW44_2026_GranCanaria/Projects/ExtendingSlicertEbpModuleForwardPlanningCapabilitiesAndImportExportInfrastructure/README.md new file mode 100644 index 000000000..7bd101eb1 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/ExtendingSlicertEbpModuleForwardPlanningCapabilitiesAndImportExportInfrastructure/README.md @@ -0,0 +1,121 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Extending SliceRT EBP Module - RTPlan recalculation capabilities and import-export infrastructure +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Lina Bucher + affiliation: KIT & DKFZ + country: Germany + +- name: Maria Francesca Spadea + affiliation: KIT + country: Germany + +- name: Niklas Wahl + affiliation: DKFZ + country: Germany + +- name: Csaba Pintér + affiliation: EBATINCA + country: Spain + +--- + +# Project Description + + + + +Recent work on SlicerRT's EBP module has focused on its inverse planning infrastructure for dose calculation and plan optimization, integrating the multi-modality treatment planning toolkit pyRadPlan (see [https://github.com/e0404/pyRadPlan](https://github.com/e0404/pyRadPlan)). + +Now we want to further extend the module's forward planning capabilities as well as its export and import features. + +Our overall goal is to establish a well-integrated treatment planning tool in Slicer, that remains customizable and user-friendly. + + + +## Objective + + + + +1. **RTPlan recalculation:** Advance the pyRadPlan dose engine for forward calculations on loaded plans. +2. **Import/Export Features:** Build infrastructure for saving and loading dose influence matrices and objectives. + + + +## Approach and Plan + + + + +1. **RTPlan recalculation:** Implement handling of loaded MLC shapes (photon plan), spot positions (ion plan) and machine specifications in the pyRadPlan dose engine. +2. **Import/Export Features:** Revise storing of the dose influence matrices (currently beam-wise) and develop method for exporting and reloading into plan. Include saving/exporting of the user-specified objectives table in the optimization workflow. + + + +## Progress and Next Steps + + + +### RTPlan recalculation: + +![](https://github.com/user-attachments/assets/f01bee65-8d97-4062-b1c1-10c09011938a) + +
+
+ +**pyRadPlan:** +1. Enabled **field-based forward dose calculation (PHOTONS)** from loaded **field shapes**. + +![](https://github.com/user-attachments/assets/c425fdf7-2139-4770-9104-f65f963105d7) +![](https://github.com/user-attachments/assets/b54964b0-4331-44e8-a678-087d0bf7eede) + (coronal view) + +
+
+ +**SlicerRT:** +
+ +2. Added *CreateMLCAperturePolyData* in *MLCPositionLogic* --> Extracts **fieldshape** from beam and MLCTableNode. (Currently only MLCs!) + +3. Locally integrated forward calculation in **pyRadPlanDoseEngine**. + +4. Started testing on **loaded RTPlans**. + +
+ +**Next steps:** +1. Solve orientation problem. +2. Further test on loaded Plans (**TROTS**, **matRad**). +3. Refine fieldshape by **jaws** & other **beam-limiting devices**. + + +### Import/Export Features: +No progress so far + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/GetfemSlicerIntegrationForwardAndInverseLungDeformationModeling/README.md b/PW44_2026_GranCanaria/Projects/GetfemSlicerIntegrationForwardAndInverseLungDeformationModeling/README.md new file mode 100644 index 000000000..a03465024 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/GetfemSlicerIntegrationForwardAndInverseLungDeformationModeling/README.md @@ -0,0 +1,107 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: GetFEM Slicer integration Forward and Inverse Lung Deformation Modeling +category: Quantification and Computation +presenter_location: + +key_investigators: + +- name: Domenico Riggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Johannes Pfeil + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Florian Schulte + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Maria Francesca Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Ron Kikinis + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +This project focuses on the integration of 3D Slicer with the GetFEM library to enable forward and inverse finite element (FEM) simulations directly on anatomical models derived from medical imaging. By combining Slicer’s strengths in segmentation, visualization, and user interaction with GetFEM’s FEM capabilities, the project provides a unified environment for biomechanical modeling. + +The integrated framework is applied to the study of lung deformation, enabling both direct FEM simulations under prescribed mechanical conditions and inverse FEM analyses aimed at estimating unknown parameters by fitting simulated deformations to a target anatomy. + + + +## Objective + + + + +The objectives of the project are to: + • Integrate GetFEM-based FEM simulations into the 3D Slicer environment. + • Enable both forward and inverse FEM analyses on segmented anatomical models. + • Provide tools to estimate mechanical parameters through inverse modeling. + • Support interactive definition of boundary conditions and loads. + • Apply the framework to lung deformation modeling as a representative use case. + + + +## Approach and Plan + + + + +The project starts by establishing a connection between 3D Slicer and GetFEM, allowing segmented anatomical models to be used directly as inputs for FEM simulations, after a proper volumetrization. Users define the deformable model, boundary conditions, and simulation parameters through a graphical interface in Slicer. + +For forward FEM, simulations are performed using predefined material properties and pressure values to generate sequences of deformed models. For inverse FEM, a target model is provided, and mechanical parameters are iteratively optimized by comparing the simulated deformation to the target anatomy. Both workflows share common inputs and are accessible through a unified interface. + +The plan is to maintain a modular and extensible architecture, enabling future improvements without altering the core integration. + + + +## Progress and Next Steps + + + + +The integration between 3D Slicer and GetFEM has been successfully established. A functional Slicer extension has been developed, providing a graphical interface for defining models, constraints, and simulation parameters. Both forward and inverse FEM workflows have been implemented and validated on lung models, demonstrating the feasibility of parameter estimation and deformation analysis within the integrated environment. + +Next Steps +Future developments will focus on: + • Debugging and optimization. + • Improving result management and output model handling. + • Adding quantitative metrics for evaluating simulation accuracy. + • Enhancing visualization of deformation and pressure regions. + + + +# Illustrations + + + + +![](https://github.com/user-attachments/assets/a1dc8d4e-cb4a-4e91-a47a-bb5522055a2f) + + + +# Background and References + + + +- Steve Pieper, SlicerGetFEM, [https://github.com/pieper/SlicerGetFEM](https://github.com/pieper/SlicerGetFEM) diff --git a/PW44_2026_GranCanaria/Projects/GradientWaveformOptimisationForMicrostructureMappingWithDiffusionMri/README.md b/PW44_2026_GranCanaria/Projects/GradientWaveformOptimisationForMicrostructureMappingWithDiffusionMri/README.md new file mode 100644 index 000000000..246c69120 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/GradientWaveformOptimisationForMicrostructureMappingWithDiffusionMri/README.md @@ -0,0 +1,93 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Gradient waveform optimisation for microstructure mapping with diffusion MRI +category: Quantification and Computation +presenter_location: + +key_investigators: + +- name: Arthur Chakwizira + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA + +- name: Carl-Fredrik Westin + affiliation: Brigham and Women's Hospital + country: Harvard Medical School, USA +--- + +# Project Description + + + + +Time-dependent diffusion MRI offers sensitivity to brain tissue microstructure, but has limited specificity. Multiple features of the tissue microstructure tend to map onto the same signal contrast. Multi-dimensional experiment designs using freely modulated gradient waveforms have been proposed as a remedy, with previous work demonstrating the ability to disentangle features such as cell size, cell shape and membrane permeability. However, the waveforms used in previous studies were stochastically generated and a rigorous optimiser remains an unmet need. + + + +## Objective + + + + +Develop a gradient waveform optimiser that allows targeting specific tissue characteristics (such as cell size) while respecting hardware constraints and maximising diffusion encoding efficiency + + + + +## Approach and Plan + + + + +1. Parameterise gradient waveforms using a set of control points in the Cartesian plane, together with cubic spline interpolation +2. Define a cost function predicting sensitivity to various microstructural properties, using the gradient waveform and analytical microstructure models +3. Set up constraints to account for hardware and time limitations. Enforce a minimum b-value. +4. Choose an appropriate solver + + + +## Progress and Next Steps + + + + +1. Parameterised waveforms using control points and cubic spline interpolation +2. Defined a cost function evaluating sensitivity using the gradient waveform and microstructure models +3. Imposed constraints to account for hardware (slew rate, gradient amplitude) and echo time. Enforced a minimum b-value of 4000 s/mm2. +4. Chose the patternsearch solver with randomised initial conditions + + + +# Illustrations + + + + +Example waveforms from the new optimiser, illustrating both the gradient in time and the encoding power spectrum. These waveforms are optimised for specificity to restricted diffusion (cell size) and they are designed for the MAGNUS MRI scanner with a maximum gradient strength of 300 mT/m. + + + +![](https://github.com/user-attachments/assets/9bddf89c-8539-4f7e-bf7c-61957deb83ba) + + + + +# Background and References + + + + +[GitHub repository](https://github.com/arthur-chakwizira/waveform-optimisation) + + + +Previous work presenting the idea of time-dependent diffusion MRI with non-standard waveforms: + +- Chakwizira, A., Zhu, A., Foo, T., Westin, C.-F., Szczepankiewicz, F. & Nilsson, M. 2023. Diffusion MRI with free gradient waveforms on a high-performance gradient system: Probing restriction and exchange in the human brain. NeuroImage. 283: 120409 + +- Chakwizira, A., Westin, C.-F., Brabec, J., Lasič, S., Knutsson, L., Szczepankiewicz, F. & Nilsson, M. 2022. Diffusion MRI with pulsed and free gradient waveforms: Effects of restricted diffusion and exchange. NMR in Biomedicine. n/a(n/a): e4827. diff --git a/PW44_2026_GranCanaria/Projects/HandsOnTutorialsExtension/README.md b/PW44_2026_GranCanaria/Projects/HandsOnTutorialsExtension/README.md new file mode 100644 index 000000000..b99938619 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/HandsOnTutorialsExtension/README.md @@ -0,0 +1,126 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Hands-on tutorials extension +category: IGT and Training +presenter_location: + +key_investigators: + +- name: Alejandro Rodríguez Moreno + affiliation: Ebatinca SL + country: Spain + +- name: Csaba Pinter + affiliation: Ebatinca SL + country: Spain + +- name: Interested people welcome! + affiliation: Andriy + country: Tina, Andras? + +--- + +# Project Description + + + + +Unfortunately the issue of useful and up-to-date tutorials is quite difficult, because +- Slide-based tutorials need to be updated manually (see that many tutorials are pre-5.0 in the [training material](https://training.slicer.org)) +- Video tutorials are impossible to update +- In the above two cases it is next to impossible to offer them in different languages +- In-repo markdown files are quite limited in format and usefulness + +We have developed a tutorials infrastructure and some basic tutorials for a commercial project, which could be repurposed for Slicer core. Basically it consists of a curriculum that is described by a JSON file, with dependencies among the tutorials, and a set of hands-on tutorials that can be started from this home screen. + +The hands-on tutorials guide the users through a certan sequence of steps using targeted tooltips and a mechanism detecting if the current step has been completed successfully. This way we could offer some basic tutorials for Slicer core in multiple languages, which is easier to maintain than the current modalities. Of course maintenance will remain an issue, because if API changes the tutorials will break, but the basic functions of Slicer has not really changed in the last decade, and hopefully there won't be much maintenance necessary. + + + +## Objective + + + + +1. Objective A: Reach a common understanding about the necessity of this in general +2. Objective B: Get started with the extension + + + +## Approach and Plan + + + + +1. Discuss the proposal, hopefully in a breakout session, but in any case involving the interested people + a. Decide if the basics are sensible, feasible, and useful, or not + b. Define an initial set of tutorials +2. Start to adapt the tutorials infrastructure to the proposed goals + + + +## Progress and Next Steps + + + + +1. Development of the tutorial extension was initiated and is currently hosted in the following repository: https://github.com/xskere/SlicerTutorial. + +2. A dependency system was used to control tutorial accessibility and to build the tutorial tree, ensuring that tutorials which depend on others (e.g., `Tutorial_002` depending on `Tutorial_001`) can only be accessed once their prerequisites are completed. + +3. The tutorial system is built around a state machine structure; initially, all step logic was implemented inside the `enter()` function with setup and validator functions defined elsewhere, which made maintenance and updates difficult. + +4. Based on feedback (notably from Andras), the tutorial authoring model was refactored to improve readability, sustainability, and maintainability while preserving the original state machine design. + +5. Each tutorial is now defined as a fixed sequence of steps written linearly, where every step has a dedicated setup function and a validator function. + +6. For each step, the setup function prepares the tutorial state, followed by a validator function that determines whether the step has been successfully completed. + +7. Each step is appended to a `steps` array as a dictionary that contains required metadata describing how the step behaves and how it is validated: +Each step dictionary includes a pointer to the setup function, a pointer to the validator function, a `completed` boolean flag (defaulting to `False`), and a short textual `description` used to guide the user if validation fails. +An optional `module` field can also be included in the step dictionary to restrict tooltip visibility to a specific module, which is only necessary when the tooltip is attached to widgets that exist in that module. + +8. Each tutorial step operates in one of two validation modes: continuous validation, where the validator function runs repeatedly until the condition is met, or manual validation, where the validator is only triggered when the user clicks “Go next step”. + +9. Continuous validation is enabled by calling `self.timerCheck()` at the end of the step’s setup function. + +10. At present, two simple mock tutorials are fully functional: one demonstrates continuous validation and the other demonstrates manual, user-triggered validation. + +11. Three additional tutorials currently exist as placeholders, containing three steps each where the validator always returns `True`. + +12. Tutorial progress is persisted locally in the file `%LOCALAPPDATA%/NA-MIC/Slicer/Tutorials/tutorialProgress.json`. + +13. The progress file stores a JSON object where each tutorial has a `completed` state and an `enabled` flag that controls whether the tutorial appears in the tutorial tree. + +14. If a tutorial is disabled, it is hidden from the tutorial tree, and any tutorials that depended on it will instead inherit its dependency chain. + +15. Investigation was done to augment the functionality of these tutorials, for example: + +- The TutorialMaker extension could be extended to automatically generate review slides after each step once validation succeeds, allowing users to revisit completed steps without rerunning the entire tutorial. +- Tutorials are currently linear and do not support navigating back to previous steps. +- Non-linear navigation could be implemented in the future using scene views to save and restore the application state at each step. +- Scene views can be created and restored programmatically, enabling step-by-step state restoration by index or by name. + + +# Illustrations + + + + +![](https://github.com/user-attachments/assets/92e1aa2e-c6ab-4ed5-affc-285a2336cb72) +Part of the curriculum tree in the commercial app that we propose to adapt to Slicer core + + + +# Background and References + + + + +* The commercial app in question: [https://ebatinca.com/productos/start](https://ebatinca.com/productos/start) +* The current training material for Slicer core: [https://training.slicer.org/](https://training.slicer.org/) diff --git a/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/README.md b/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/README.md new file mode 100644 index 000000000..c8b2cb29b --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/README.md @@ -0,0 +1,78 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Helping LLMs view IDC images +category: Cloud / Web +presenter_location: + +key_investigators: + +- name: Michael Halle + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +The Imaging Data Commons is filled with imaging data, but idc-index and database searches only return textual data/metadata to LLMs. Modern LLMs have vision capabilities and can view selected images. I will complete a standalone non-interactive imaging utility that will give LLMs an image viewing capability for IDC data. + + + +## Objective + + + + +1. A standalone python CLI for displaying an image or mosaic from an IDC series. +2. A Claude Skill (or an extension of an existing skill) that teaches an LLM to look at IDC images. + + + +## Approach and Plan + + + + +1. Finish the CLI +2. Get feedback +3. Finish the Skill + + + +## Progress and Next Steps + + + + +1 Prototype "idc-series-preview" is written but needs knowledgable eyes on the project. It also needs some features like annotations added. This project could well be done using fancy DICOM viewer code, but for right now Python works fine for the prototype. +1 During Project Week, I added annotations to the python module and made it into a Claude Skill. + +Python module/skill: + +Skill download: + +# Illustrations + +"Find the bottom of the lungs in an IDC NLST series": + +![Base of lungs](nlst_lung_bases_v070.jpg) + +Contrast Mosaic: + +![Contrast mosaic](nlst_contrast_full.jpg) + +# Background and References + + + + +_No response_ + diff --git a/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_contrast_full.jpg b/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_contrast_full.jpg new file mode 100644 index 000000000..8e146c0a3 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_contrast_full.jpg differ diff --git a/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_lung_bases_v070.jpg b/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_lung_bases_v070.jpg new file mode 100644 index 000000000..729af35ed Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_lung_bases_v070.jpg differ diff --git a/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_lung_bases_v070.webp b/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_lung_bases_v070.webp new file mode 100644 index 000000000..386045c03 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/HelpingLlmsViewIdcImages/nlst_lung_bases_v070.webp differ diff --git a/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/LatestSlicerVRCorrectColors.png b/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/LatestSlicerVRCorrectColors.png new file mode 100644 index 000000000..0ce8eb5f6 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/LatestSlicerVRCorrectColors.png differ diff --git a/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/README.md b/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/README.md new file mode 100644 index 000000000..7550062c7 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/README.md @@ -0,0 +1,108 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Improvements and Bug Fixes for SlicerVirtualReality +category: VR/AR and Rendering +presenter_location: + +key_investigators: + +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada + +- name: Simon Drouin + affiliation: École de Technologie Supérieure + country: Canada + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Csaba Pinter + affiliation: Ebatinca SL + country: Spain + +--- + +# Project Description + + + +![SlicerVirtualReality Logo](https://github.com/KitwareMedical/SlicerVirtualReality/raw/master/SlicerVirtualReality.png) + + +Currently SlicerVR is usable with OpenVR/OpenXR, however there are some pending issues: + +- Nightly dashboard + - Errors in the nighlty dashboard ([SlicerPreview Dashboard](https://slicer.cdash.org/viewBuildError.php?buildid=4075074)) + +- Performance + - Adding markups to the scene causes an immediate drop in framerate and rendering artifacts + - Visualization of interaction handles results in performance issues + - Volume rendering with multi-component images/sequences causes a drop in framerate when the camera is close to the volume + +- Visualization + - Investigate "washed-out" appearance in VR view + +- Interaction + - Not all controller interactions are recognized + - Investigate remappable controller bindings from python + +- Etc. + + + +## Objective + + + +Bring SlicerVR to stable, usable performance and complete basic interaction support. + +## Approach and Plan + + + +1. Make SlicerVirtualReality available from the extension manager again +2. Profile and baseline performance (CPU, GPU, FPS in representative scenes) +3. Fix highest-impact performance issues (markups, widgets, volume rendering) +4. Add missing OpenXR controller input mapping +5. Iterate with testing in real scenes + +## Progress and Next Steps + + + +1. SlicerVirtualReality is again available on the extension index ([SlicerPreview Dashboard](https://slicer.cdash.org/builds/4080509)) +2. Greatly improved performance for rendering Markups in VR by bypassing depth check for visible points ([Slicer#8979](https://github.com/Slicer/Slicer/pull/8979)) ([SlicerVirtualReality#185](https://github.com/KitwareMedical/SlicerVirtualReality/pull/185)) +3. Washed-out colors in virtual reality appear to be resolved. + +# Illustrations + + + +### Color issue + +#### Before + +![](VRBadColors.png) +Left: Slicer view using OpenXR without shadows Right: VR view (with back lights / without two sided lighting) + +#### Now + +In latest Slicer Preview Release: colors are the same in desktop rendering and VR headset. + +![](LatestSlicerVRCorrectColors.png) + + +# Background and References + + + +Previous project week page on color isues in virtual reality: [Previous Project Week](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/LightingProblemsWithLatestSlicervr/) +Extension repository: [SlicerVirtualReality](https://github.com/KitwareMedical/SlicerVirtualReality/) + diff --git a/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/VRBadColors.png b/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/VRBadColors.png new file mode 100644 index 000000000..628b6d2b4 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/ImprovementsAndBugFixesForSlicervirtualreality/VRBadColors.png differ diff --git a/PW44_2026_GranCanaria/Projects/Integrating3DLandmarkDetectionModels/README.md b/PW44_2026_GranCanaria/Projects/Integrating3DLandmarkDetectionModels/README.md new file mode 100644 index 000000000..a1fce557c --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/Integrating3DLandmarkDetectionModels/README.md @@ -0,0 +1,74 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Integrating 3D Landmark Detection Models +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Alexandra Ertl + affiliation: German Cancer Research Center DKFZ + country: Germany + +--- + +# Project Description + + + + +We developed a framework for 3D medical landmark detection, based on a U-Net and heatmap regression. The method was evaluated on several public datasets. We want to make these trained models available within a slicer extension. + + +## Objective + + + + +Within the extension, users should be able to: +1) Choose and run a model for prediction. +2) Visualize predicted landmarks. + + + +## Approach and Plan + + + + +_No response_ + + + +## Progress and Next Steps + + + +We have adjusted the SLicerNNUnet extension for landmark detection. + +![](https://github.com/user-attachments/assets/fa47cf3d-482b-4752-9245-883dfb58dff1) + + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +The respective publication is currently under review for MIDL 2026 ([OpenReview submission](https://openreview.net/forum?id=G2Dpy3hayS)) diff --git a/PW44_2026_GranCanaria/Projects/InteractiveRhombicuboctahedronVolumeOrientationMarker/README.md b/PW44_2026_GranCanaria/Projects/InteractiveRhombicuboctahedronVolumeOrientationMarker/README.md new file mode 100644 index 000000000..6a0d75567 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/InteractiveRhombicuboctahedronVolumeOrientationMarker/README.md @@ -0,0 +1,103 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Interactive rhombicuboctahedron volume orientation marker +category: VR/AR and Rendering +presenter_location: + +key_investigators: + +- name: Martin Bellehumeur + affiliation: Radical Imaging + country: Germany + +--- + +# Project Description + + + + +Make an orientation marker for OHIF whose 26 surfaces (6 faces, 8 corners and 12 edges) can be clicked to reorient the volume in 3D/VRT viewport. + +![](https://github.com/user-attachments/assets/d9a50e00-74ba-4549-93a7-6d34229c4c18) + + + +## Objective + + + + +Enhanced usability of 3D viewport. +Provide volume orientation control when the rotate tool is rotating the clipping planes. + + + + +## Approach and Plan + + + + +Seek advice on implementation. + + + +## Progress and Next Steps + +Implementing a cornerstone tool named "Orientation controller' for volume 3D viewports. +It uses a VTK.js-based widget architecture. + +1. **`vtkOrientationControllerWidget`** (`packages/tools/src/utilities/vtkjs/OrientationControllerWidget/`) + - Core widget managing VTK actors and interaction + - Creates and manages `vtkActor` instances for the polyhedron + - Handles mouse picking via `vtkCellPicker` + - Manages actor lifecycle (add/remove from viewports) + - Handles positioning and sizing relative to viewport corners + +2. **`AnnotatedRhombicuboctahedronActor`** (`packages/tools/src/utilities/vtkjs/AnnotatedRhombicuboctahedronActor/`) + - Generates the 26-faced polyhedron geometry + - Creates VTK actors with textured faces and labels + - Supports main faces (6), edge faces (12), and corner faces (8) + - Applies anatomical labels (L/R, A/P, S/I) using LPS convention + +3. **`OrientationController` Tool** (`packages/tools/src/tools/OrientationController.ts`) + - Cornerstone3D tool extending `BaseTool` + - Wraps the VTK widget for integration + - Manages tool lifecycle (enable/disable) + - Handles configuration updates and viewport synchronization + + + + + + + + + + +# Illustrations + + + + + +# Acknowledgements + +This project is supported by [Freie Universität Berlin](https://www.fu-berlin.de/en/index.html) + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/KidneynavRealTimeKidneyUltrasoundSegmentationAnd3DReconstructionIn3DSlicer/README.md b/PW44_2026_GranCanaria/Projects/KidneynavRealTimeKidneyUltrasoundSegmentationAnd3DReconstructionIn3DSlicer/README.md new file mode 100644 index 000000000..4b5e8dd13 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/KidneynavRealTimeKidneyUltrasoundSegmentationAnd3DReconstructionIn3DSlicer/README.md @@ -0,0 +1,86 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: 'KidneyNav: Real-time kidney ultrasound segmentation and 3D reconstruction in 3D Slicer:' +category: IGT and Training +presenter_location: + +key_investigators: + +- name: Gabriella d'Albenzio + affiliation: Queen's University + country: Canada + +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada + +- name: Emese Elkind + affiliation: Queen's University + country: Canada + +- name: Lily Morrell + affiliation: Queen's University + country: Canada + +- name: Ron Kikinis + affiliation: BWH + country: USA + +- name: Gabor Fichtinger + affiliation: Queen's University + country: Canada + +--- + +# Project Description + +KidneyNav is a 3D Slicer scripted module designed for real-time ultrasound navigation and intraoperative visualization. The module connects to a PLUS server via OpenIGTLink to stream live 2D ultrasound images and tracking transforms, and it supports live volume reconstruction using Slicer’s VolumeReconstruction infrastructure. The current implementation includes automatic node setup (input image, prediction volume, transforms, connectors, reconstruction node/ROI), a custom 2D/3D layout for simultaneous slice and 3D rendering, and tools to record synchronized sequences (ultrasound, predictions, transforms, and needle model) for later analysis. + +During this Project Week, we want to validate the module in a real live scanning setting and integrate real-time AI-based multiclass segmentation (kidney + calyces + fluid)). We also want to connect multiclass predictions to live volume reconstruction and discuss best practices for reconstructing volumes from two different (or complementary) prediction streams (e.g., kidney mask vs calyces mask, or two model outputs), including visualization, synchronization, and reconstruction strategies. + + + +## Objective + +1. **Objective A.** Validate the end-to-end live workflow by recording synchronized ultrasound, prediction, transform, and needle model sequences during real-time acquisition. +2. **Objective B.** Integrate real-time multiclass AI segmentation (kidney, calyx, fluid) streamed into 3D Slicer via OpenIGTLink and used directly for live volume reconstruction. +3. **Objective C.** Establish and compare reconstruction strategies for multiclass predictions, including single-volume and dual-volume approaches, and derive community-informed recommendations on synchronization, labeling, interpolation, and fusion. + + + +## Approach and Plan + +1. Record short test sequences (ultrasound image, prediction, transforms, needle model). +2. Stream multiclass prediction (kidney / calyx / fluid) into Slicer via OpenIGTLink as a label or scalar volume synchronized with the ultrasound stream. +3. Run live volume reconstruction directly from the prediction stream. +4. Review with the Slicer/IGT community best practices for synchronization, label consistency, interpolation, and reconstruction from dual prediction outputs. +5. Compare reconstruction strategies: + - **Creates and registers a volume rendering preset that visualizes segmentation labelmaps using a discrete transfer function derived from Segmentation_ColorTable.** + - **Visualizes multi-class predictions as a multi-component scalar volume, using Slicer’s Independent Multi-Component Volume Rendering mode** + + +## Progress and Next Steps + +1. Recorded five short ultrasound test sequences on a control group of participants. +2. Trained an nnU-Net–based multiclass segmentation model and obtained a lightweight pretrained model. +3. Streamed multiclass segmentation predictions (kidney, calyx, fluid) into 3D Slicer via OpenIGTLink as a label volume, synchronized with the live ultrasound stream. +4. Performed live volume reconstruction directly from the incoming prediction stream. +5. Implemented Rendering Strategy 1 only: discrete labelmap volume rendering, where each label value is mapped to its predefined color from the Segmentation Color Table. + + +# Illustrations + + +![KidneyNav](https://github.com/user-attachments/assets/25fa7367-8c2f-4506-8d56-5a2cd1663fe3) +![](https://github.com/user-attachments/assets/302e2f0a-ffdf-4219-a2f0-864fb112368f) + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/MakeClassannotationExtensionMultiLabelAndCollaborative/README.md b/PW44_2026_GranCanaria/Projects/MakeClassannotationExtensionMultiLabelAndCollaborative/README.md new file mode 100644 index 000000000..425c867d8 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/MakeClassannotationExtensionMultiLabelAndCollaborative/README.md @@ -0,0 +1,98 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Make ClassAnnotation Extension Multi-Label and collaborative +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Lorena Romeo + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Ciro Benito Raggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Maria Francesca Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +--- + +# Project Description + + + + +ClassAnnotation is an extension designed to support users during medical image annotation process and it is able to generate a structured output for AI applications. Currently, the system does not support multiple label per patient; therefore, it's desiderable to implement a Multi-Label module to enable this feature. We would also to investigate an approach to make the annotation task collaborative. + + + + +## Objective + + + + +1. Create Multi-Label module +2. Try to implement a prototype for collaborative annotation + + + +## Approach and Plan + + + + +1. Gather feedback about ClassAnnotation +2. Create Multi-Label module +3. Discuss about the implementation of a collaboration between annotators + + + +## Progress and Next Steps + + + + +1. Extended ClassAnnotation from single-label to multi-label allowing multiple features per patient (still working in progress). +2. Added a clear multi-label user interface with automatically generated class buttons and consistent Single/Multi mode behavior. +3. Defined a structured multi-label output. +4. Talked to Andras and got some good advice from him. + +Next Steps + +- Perform thorough testing and validation of the multi-label mode. +- Add support for a collaborative configuration. + + + + +# Illustrations + + + + +![ClassAnnotation logo](https://github.com/lorenaromeo/SlicerClassAnnotation/blob/main/ClassAnnotation.png?raw=true) +![ClassAnnotation pannel](https://raw.githubusercontent.com/lorenaromeo/SlicerClassAnnotation/refs/heads/main/ClassAnnotation_screenshot.png) + +![](https://github.com/user-attachments/assets/c52cdc71-2d16-40a4-a506-dcb2a6a3e56e) + + +# Background and References + + + + +- Source code: [https://github.com/lorenaromeo/SlicerClassAnnotation](https://github.com/lorenaromeo/SlicerClassAnnotation) +- Demo (video): [https://drive.google.com/file/d/1aquVa_120tXwTltVutA9tb2mD2o8SCwK/view?usp=sharing](https://drive.google.com/file/d/1aquVa_120tXwTltVutA9tb2mD2o8SCwK/view?usp=sharing) diff --git a/PW44_2026_GranCanaria/Projects/MakingLnqMoreFair/README.md b/PW44_2026_GranCanaria/Projects/MakingLnqMoreFair/README.md new file mode 100644 index 000000000..29a173aaf --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/MakingLnqMoreFair/README.md @@ -0,0 +1,72 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Making LNQ more FAIR +category: Quantification and Computation +presenter_location: + +key_investigators: + +- name: Michael Halle + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +The Lymph Node Quantification Project currently has no web presence, pointer to its data, or source of reference material. I plan to explore a set of traditional (web) and new (LLM and Claude Skill) technologies to make the project more findable, accessible, interoperable and reusable. This includes developing a searchable citation tree to improve search for information about lymph node quantification, and a roadmap (skill) to help LLMs look for LNQ resources. + + + +## Objective + + + + +1. An LNQ web site/landing page +2. An SQLite database of citations in the citation tree rooted by seminal or otherwise important papers in the field +3. A Claude Skill that can search the database, the LNQ data stored in IDC, and the information from the MICCAI challenge + + + +## Approach and Plan + + + + +1. Github website +2. Identify publications +3. Build citation tree +4. Write skill + + + +## Progress and Next Steps + + + +1. Built the website (mkdocs with material theme) +2. Wrote llms.txt for the site +3. added citation database with datasette browser: [Browse LNQ Citations](https://lite.datasette.io/?url=https://raw.githubusercontent.com/mhalle/LNQ-citations/master/citations.db&metadata=https://raw.githubusercontent.com/mhalle/s2cli/main/citetree-metadata.yaml#/citations/papers) + + +# Illustrations + + + + +# Background and References + + + + +_No response_ + diff --git a/PW44_2026_GranCanaria/Projects/MhubAiMcpServerForModelProvisionAndExecution/README.md b/PW44_2026_GranCanaria/Projects/MhubAiMcpServerForModelProvisionAndExecution/README.md new file mode 100644 index 000000000..7f6665810 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/MhubAiMcpServerForModelProvisionAndExecution/README.md @@ -0,0 +1,77 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: MHub.ai MCP server for Model Provision and Execution +category: AI +presenter_location: + +key_investigators: + +- name: Leonard Nürnberg + affiliation: AIM Lab + country: USA + +--- + +# Project Description + + + + +MCP servers have significantly shaped and improved the abilities of agentic AI systems. They provide AI agents with structured, on-demand capabilities (tools) and information (resources / promts). The aim is, to enable AI systems to **find** suitable AI models, **answer questions** about specific models and upon request **run** suitable AI models. The secondary aim is to enable **automatic conversion of open source models into** the **MHub.ai** deployment format. + +In this project, we want to enable AI agents to interact with MHub.ai models by providing a (local) MCP server for MHub.ai. The public api, structured metadata and output-level information (e.g., anatomic structures segmented by each model) are already present and can be directly exposed through the server. The detailed technical documentation and step-by-step tutorials furthermore might already be a decent starting point to automatically generate MHub.ai project structure, Dockerfile, Module wrapper class, default workflow configuration and metadata json, to automate or speed-up the contribution process. + + + +## Objective + + + + +1. Local MCP server to interact with the MHub.ai model repository +2. Provision of tools and guidelines for automatic model deployment + + + +## Approach and Plan + + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. + + + + +## Progress and Next Steps + + + + +We developed a public PoC Skill that can discover, run and adapt MHub models, the skill can be found under +[https://github.com/MHubAI/MHubSkill](https://github.com/MHubAI/MHubSkill). + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ + diff --git a/PW44_2026_GranCanaria/Projects/MorphodepotDiscussionAndOptimization/README.md b/PW44_2026_GranCanaria/Projects/MorphodepotDiscussionAndOptimization/README.md new file mode 100644 index 000000000..9993f7d59 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/MorphodepotDiscussionAndOptimization/README.md @@ -0,0 +1,82 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: MorphoDepot discussion and optimization +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Murat Maga + affiliation: Seattle Children's + country: USA + +--- + +# Project Description + + + + +As part of the [MorphoCloud](https://morphocloud.org/) project, we have developed a [MorphoDepot](https://github.com/MorphoCloud/SlicerMorphoDepot), as 3D Slicer extension that is already available and in use. + +The goals are to help organize work related to 3D imaging, primarily for biological morphology applications. Initial target is segmentation of microCT scans of animals. In the future it could expand to applications like landmarking and other annotation / analysis tasks. + +The extension relies heavily on GitHub as the back-end, with each scan being associated with a repository, and segmentation tasks being managed by issues and pull requests. The system is working and is in the early stages of real-world usage and we are collecting feedback about usability. + +The system uses the Github CLI, gh, to interact with Github but has some performance issues and sometimes the API is throttled due to quota restrictions. + + + +## Objective + + + + +1. Make the Project Week community aware of this extension +2. Gather feedback about possible other use cases for similar technology and explore possible collaborations +3. Get ideas about how to improve performance to scale up to more users and repositories. + + + +## Approach and Plan + + + + +* Give demos to any interested parties +* Discuss github actions and other ways to optimize the gh data process + + + +## Progress and Next Steps + +* I had a productive discussion with Deepa about how the github repo approach might apply to the multisite ultrasound analysis use case +* I researched the github app approach to centralizing a database cache of repository status, and this seems feasible if still a bit clunky +* More research and refinement is needed to put an actual system in place, but the contraints space is getting clearer (what things can be done with the gh cli vs which things require manual interaction with the github web site) + + + +# Illustrations + +## An Example MorphoDepo specimen (Bumblebee microCT) + +image + + + + +# Background and References + + + + +Preprint describing the project: [https://arxiv.org/abs/2601.00030](https://arxiv.org/abs/2601.00030) + diff --git a/PW44_2026_GranCanaria/Projects/MultimodalTumorClassificationOfRiceInKapaanaIntegratingVlms/README.md b/PW44_2026_GranCanaria/Projects/MultimodalTumorClassificationOfRiceInKapaanaIntegratingVlms/README.md new file mode 100644 index 000000000..2e4a8fede --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/MultimodalTumorClassificationOfRiceInKapaanaIntegratingVlms/README.md @@ -0,0 +1,91 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Multimodal tumor classification of RICE in Kapaana - integrating VLMs +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Robin Peretzke + affiliation: DKFZ + country: Heidelberg + +- name: Maximilian Fischer + affiliation: DKFZ + country: Heidelberg + +--- + +# Project Description + + + + +- New contrast-enhancing lesions following treatment of intracranial tumors may reflect either true tumor recurrence or radiation-induced contrast enhancements (RICE). Distinguishing between these entities remains unreliable but is critical for subsequent treatment decisions. +- We have developed and trained a multimodal deep learning model (RICE-Net) that achieves good performance in differentiating RICE from tumor recurrence using longitudinal MRI data (e.g., post-intervention, post-pseudoprogression) in combination with radiation treatment plans. +- RICE-Net is currently integrated into Kapaana, providing a structured environment for processing imaging and radiotherapy data and enabling inference on these inputs within a clinical routine. +- However, relevant clinical information, including pathology and radiology reports as well as medication plans, is not yet integrated, although it could substantially improve model performance and clinical utility. Corresponding multimodal extensions are currently under development and planned for integration into a new Kapaana workflow.im + +![](https://github.com/user-attachments/assets/15a5ca20-6ef7-4e92-b605-220c263ef539) + +- However, such models cannot yet be integrated into Kapaana, as the platform currently does not support text processing or the integration of large language and vision–language models. + + + + +## Objective + + + + +- Extend Kapaana to support inference with vision-language models. +- Enable ingestion, parsing, and tokenization of PDF-based clinical documents for use as model inputs. +- Create the technical basis for multimodal models that combine imaging, textual, and structured clinical data. + + + + +## Approach and Plan + + + + +- Develop the necessary infrastructure within Kapaana for deployment and inference of LLM/VLM models. +- Implement robust pipelines for extracting and tokenizing text from clinical documents and aligning them with imaging data. +- Integrate textual and tabular clinical information with longitudinal MRI and radiation data in a unified multimodal model. +- Evaluate the extended model on retrospective cohorts with respect to performance, robustness, and interpretability. +- Prepare the system for subsequent prospective evaluation and use in interdisciplinary tumor board settings. + + + + +## Progress and Next Steps + + + + +1. Describe specific steps you **have actually done**. + + + + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/NousnavBasedSlicerExtension/README.md b/PW44_2026_GranCanaria/Projects/NousnavBasedSlicerExtension/README.md new file mode 100644 index 000000000..a663bb748 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/NousnavBasedSlicerExtension/README.md @@ -0,0 +1,109 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: NousNav based Slicer Extension +category: IGT and Training +presenter_location: + +key_investigators: + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Tina Kapur + affiliation: BWH/HMS + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Andras Lasso + affiliation: Queen's + country: Canada + +- name: Kyle Sunderland + affiliation: Queen's + country: Canada + +- name: Csaba Pinter + affiliation: Ebatinca + country: Spain + +- name: Martin Bellehumer + affiliation: Germany + +- name: Rafael Palomar + affiliation: Norwegian University of Science and Technology + country: Norway + +--- + +# Project Description + + + + +The goal of this project is to create a Slicer Extension (tentative name SlicerNav) that starts with the functionality of NousNav, which is a CustomApp, and then grows from there. + + + +## Tasks + + + + +1. PRIORITY: Clear out pending NousNav todos and prepare a new release + 1. Autosave / segmentation lag + 1. Other issues from Sonia +1. Refactor modules to a new extension: SlicerOpenNav +1. Test build with refactored modules +1. Test extension with latest Slicer version +1. Code modernization + 1. Update NousNav to latest Slicer + 1. Parameter node usage + + +## Progress and Next Steps + + + + +1. NousNav 1.1 + 1. Finished pending todos from Sonia Pujol (usability of Patients module) + 1. NousNav 1.1.0 tagged + 1. NousNav 1.1.0 installer generated - needs testing before release is created +1. SlicerOpenNav + 1. Generated from NousNav modules, with history preserved + 1. Works from: + 1. Build tree + 1. Installation + 1. Source Tree +1. SlicerOpenNav -> NousNav + 1. NousNav refactored to use SlicerOpenNav (on branch) + 1. Home module will remain as customization point + + + + + +# Illustrations + +SlicerOpenNav + +![](https://github.com/user-attachments/assets/42c47e5a-aabb-4bf5-b106-06f8437ad85c) + + + + + +# Background and References + + + +- [SlicerOpenNav](https://github.com/NousNav/SlicerOpenNav) diff --git a/PW44_2026_GranCanaria/Projects/OhifInternationalizationUpdate/README.md b/PW44_2026_GranCanaria/Projects/OhifInternationalizationUpdate/README.md new file mode 100644 index 000000000..bb70af101 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/OhifInternationalizationUpdate/README.md @@ -0,0 +1,85 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: OHIF internationalization update +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Martin Bellehumeur + affiliation: Radical Imaging + country: Germany + +--- + +# Project Description + + + + +OHIF i18n translation tags were recently updated with many additional tags so that pratically all labels are covered. This means that AI can easily add new languages but we need someone to validate it and fix errors like “DICOM store” translated to “Magasin DICOM” for example. + + + +## Objective + + + + +Provide the opportunity to project week attendees to add or update a language they care about to OHIF. + + + + +## Approach and Plan + + + + + +Attendees who want to add/update a language can add themselves to the project with the language they target. +The AI language translation will be available soon after at: + +[https://na-mic-projectweek44-g0g4a5c5dgc5dcf3.westeurope-01.azurewebsites.net/](https://na-mic-projectweek44-g0g4a5c5dgc5dcf3.westeurope-01.azurewebsites.net/) + +The language can be selected in the "Preferences" section of the top right sprocket icon. +During the conference, project participants can schedule a 1 hour meeting with Martin to correct the translation. + + + + + +## Progress and Next Steps + + + + +Catalan language was added to OHIF. + + + + +# Illustrations + + + +# Acknowledgements + +This translation is supported by Jordi Piera i Jiménez, Director of the Digital Health Strategy for Catalonia + +_No response_ + + + +# Background and References + + + + +_No response_ + diff --git a/PW44_2026_GranCanaria/Projects/OrbitalFractureSurgerySimulationUsingSlicersofa/README.md b/PW44_2026_GranCanaria/Projects/OrbitalFractureSurgerySimulationUsingSlicersofa/README.md new file mode 100644 index 000000000..f05c33019 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/OrbitalFractureSurgerySimulationUsingSlicersofa/README.md @@ -0,0 +1,136 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Orbital fracture surgery simulation using SlicerSOFA +category: VR/AR and Rendering +presenter_location: + +key_investigators: + +- name: Chi Zhang + affiliation: Texas A&M University College of Dentistry + country: USA + +- name: Rafael Palomar + affiliation: NTNU / OUH + country: Norway + +- name: Paul Baksic + affiliation: INRIA + country: France + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +--- + +# Project Description + + + + +Using SlicerSOFA to simulate orbital fracture repair using titanium plate. + +Virtual planning in oral and maxillofacial surgery (OMFS) is primary based on static models using proprietary software for implant customization design and surgical navigation. Soft tissue behaviors were usally unknown. However, physics simulation has not been widely adopted in OMFS virtual planning. One reason is simulation is complex. SlicerSOFA can bridge this gap by integration Slicer and SOFA. + +![](https://github.com/user-attachments/assets/b8f775ea-4360-415b-9279-d44d7ecadbd2) +![](https://github.com/user-attachments/assets/da97f07b-c505-4d63-92de-7eeb9a6d89c6) +![](https://github.com/user-attachments/assets/7fe6a8b3-3b5f-45d2-8959-00356edbb846) + +[Image source](https://surgeryreference.aofoundation.org/cmf/trauma/midface/orbit-floor/reconstruction#general-considerations) + + + +## Objective + + + + +1. Create a demo scene for future development and gathering feedback from surgeons +2. Plan for a grant proposal resubmission. + + + +## Approach and Plan + + + + +Work on two separate scenes: + +**1. Soft tissue retraction:** +- Goal: move the retractor to create a gap sufficient to insert the plate +- Challenges: collision in a confined space; proper mechanical models of different tissue types +- Creating a single multi-material model + +![](https://github.com/user-attachments/assets/8ef946f4-00ff-4e3d-b0ce-126b367299fc) +![](https://github.com/user-attachments/assets/5aa60cab-99d4-48c3-9d09-7eeef7470867) + + +**2. Plate bending and fixation** +- Meshed plate is geometrically complicated; using shell model as a proxy + + +**3. Run a demo scene in SlicerSOFA** +- How to integrate Slicer methods to facilitate mechanical model preparation and interaction, such as controller + +- Demo scene "tap the eyeball" + + + + +**4. Planning for benchmarking and validation: what is considered as "success" at this stage?** + + + + +## Progress and Next Steps + +### Multimaterial model +1. Create a union model of orbital soft tissue and convert into a tetrahedron mesh. +2. Using the SOFA 'MeshROI' method to use the eyeball-musle polygon model as an ROI to select tetrahedra fell within it, and assign different materials to tets in and out the ROI (i.e., orbital fat). + +![](https://github.com/user-attachments/assets/ad88a41a-5459-4bd2-bff5-4f71f3cb65f0) + + + +### Using SlicerSOFA infrastructure and vtkProbeFilter() & grid transform for mesh & image deformation + + + +### Next step: work on the collision model issue between the retractor and the soft tissue + +# Illustrations + + + + +_No response_ + + + +# Background and References + + + + +Orbital fracture repair introduction: [AO Surgery Reference - Orbital Floor Reconstruction](https://surgeryreference.aofoundation.org/cmf/trauma/midface/orbit-floor/reconstruction#general-considerations) + +Related previous PW pages: [Evaluate the fit of preformed plates in orbital surgery +](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/EvaluateTheFitOfPreformedPlatesInOrbitalSurgery/) and [Simulate orbit surgery using SlicerSOFA](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/SimulateOrbitSurgeryUsingSlicersofa/) diff --git a/PW44_2026_GranCanaria/Projects/PathologyExtensionForMhubIoModules/README.md b/PW44_2026_GranCanaria/Projects/PathologyExtensionForMhubIoModules/README.md new file mode 100644 index 000000000..48aff20ca --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/PathologyExtensionForMhubIoModules/README.md @@ -0,0 +1,91 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Pathology Extension for MHub IO Modules +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Curtis Lisle + affiliation: KnowledgeVis + country: LLC, USA + +- name: Leonard Nürnberg + affiliation: MGB + country: Harvard, The Netherlands + +--- + +# Project Description + + + + +Pathology (DICOM) images differ greatly from radiology images, e.g., contain multiple resolutions. The MHub core provides 18 IO Modules to import, convert, organize, imaging data. We want to extend this with additional IO Modules to extract a target resolution and to provide an alternative toolchain to generate DICOMSEG output files. The IO Modules will be made publicly available as an MHub.ai module extension. + + + +## Objective + + + + +1. A public pathology extension for MHub.ai +2. Adding the RMS model to MHub.ai utilizing the provided IO Modules as a PoC + + + +## Approach and Plan + + + + +1. Create a new pathology extension repository +2. Implement an extractor module +4. Implement a specific dicomseg conversion module (e.g., based on highdicom) +5. Implement the RMS model as PoC + + + +## Progress and Next Steps + +1. A **PathologyExtension** repository was created. Modules defined in the extension are automatically discovered during the MHubIO run setup so these + extensions are available to all MHub models +2. A **PathologyResolutionFilter** module has been developed and tested on DICOM-WSI images from the NCI Imaging Data Commons. The module reads image metadata + and copies only image resolutions that match an input parameter of desired resolutions. Only image resolutions matching the desired target resolution + are copied from the input for MHub pipeline processing. +3. A prototype method using the **HighDicom** Python library was developed and tested to write DICOM DSeg (segmentation) images. Next, this code needs to be + wrapped as an MHub DSegWriter method and added to the PathologyExtension repository. Once this is complete, pathology algorithns that don't know how to read and write WSI-DICOM can be used in DICOM to DICOM + workflows + + +# Illustrations + + + + +## Layer Extraction +![](https://github.com/user-attachments/assets/e5705efa-0279-4f4a-b485-75e85fa2a167) + +## Proposed Pipeline +![](https://github.com/user-attachments/assets/f3ccb9db-3eb4-4b62-8697-3cca89f09f11) + +## RMS Model +![](https://github.com/user-attachments/assets/3a356f20-2bef-4400-853c-32dd66ea8989) + + + +# Background and References + + + + +- [MHub Documentation - Using Model Extensions](https://github.com/MHubAI/documentation/blob/main/documentation/mhub_models/the_mhub_dockerfile.md#install-additional-mhub-io-collections) +- [MHub Documentation - Example Collection Repository](https://github.com/LennyN95/mhubio-test-collection) +- [Reproducible Radiology and Pathology Imaging Analysis Applications in MHub ](https://docs.google.com/presentation/d/1bcP9WhqRf1eZtMZ1lsubE6wCgKdAYA4CIPedhw2Rcuo/edit?slide=id.g34b433d272b_1_0#slide=id.g34b433d272b_1_0) Slide 17-22 +- [MHub.ai](https://mhub.ai) +- [Official PathologyExtension Repository](https://github.com/MHubAI/PathologyExtension) diff --git a/PW44_2026_GranCanaria/Projects/PlayingWithSceneViews/README.md b/PW44_2026_GranCanaria/Projects/PlayingWithSceneViews/README.md new file mode 100644 index 000000000..81a369f2e --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/PlayingWithSceneViews/README.md @@ -0,0 +1,89 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Playing with Scene Views +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Ron Kikinis + affiliation: BWH + country: USA + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Kyle Sunderland + affiliation: Queen's University + country: Canada + +- name: Martin Bellehumeur + affiliation: Radical Imaging + country: Germany + +--- + +# Project Description + + + + +_No response_ + + + +## Objective + + + + +1. Test and provide feedback on scene views updates + + + + +## Approach and Plan + + + + +### Meeting: Ron, Kyle, Andras and Martin - Tuesday 27th @ 10AM + - User options are too complicated + - Options should be simpler, Ex: it is not clear what "Capture display nodes" means + - When users load a scene view, we should indicate what scene view has been loaded + - Previous/Next button to move between scene views + - If "source data" (volume, etc.) is deleted, then we should erase scene views where it is visible, or show a message. + + + - Issues + - Capture display/view nodes checkboxes not left checked by default when capturing a scene view + - Recording only view nodes doesn't maintain display node visibility + - Segmentations can become hidden when switching scene views + +## Progress and Next Steps + + + +1. Simplify user options for Scene Views dialog + - Capture display nodes -> Save display settings + - Capture view nodes -> Save view layout + +# Illustrations + + +![](https://github.com/user-attachments/assets/a591c303-29fc-46ab-a439-c32be843be89) + +![](https://github.com/user-attachments/assets/4f78cb31-b6c0-4b22-9a63-0cbe8a0552bf) + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/01.png b/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/01.png new file mode 100644 index 000000000..c0f1440f2 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/01.png differ diff --git a/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/02.png b/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/02.png new file mode 100644 index 000000000..6d7b3164b Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/02.png differ diff --git a/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/README.md b/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/README.md new file mode 100644 index 000000000..9f8cbe23d --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/PythonDependenciesInExtensions/README.md @@ -0,0 +1,1020 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Python dependencies in extensions +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Ebrahim Ebrahim + affiliation: Kitware + country: USA + +- name: Steve Pieper + affiliation: Isomics + country: USA + +- name: Sam Horvath + affiliation: Kitware + country: USA + +- name: Andras Lasso + affiliation: Queen's + country: Canada + +- name: Michael Halle + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +Many Slicer extension developers have to deal with the problem of external python dependencies: how to specify them, how and when to install them, and how to validate that the required things are installed. Everyone addresses the problem in a different way, often re-inventing the wheel and also often generating new great ideas. I'd like to collect all the best practices and turn them into a framework that is built into core slicer for extension developers to more easily grab and use. Something like "stick your dependencies in here and the use `slicer.util.check_python_dependences` and `slicer.util.install_python_dependencies`. If that turns out to be a bad idea for whatever reason, at least I can collect all the best practices and put them into the extension development documentation. + + +## Objective + + + + +* Encode into Slicer some way to make it more convenient for extension developers to handle external python dependencies. + + +## Approach and Plan + + + + +* Gather best practices on external python dependency handling in Slicer extensions, and then distill them into an optimized approach. +* Encode that approach in Slicer somehow, either as utility functions, updates to extension templates, or simply documentation. + + + +## Progress and Next Steps + + + + +### Table of existing practices + +I've broken down the problem of external Python dependency into four components: + +- **Specification:** How do we represent Python dependency requirements? +- **Checking:** How do we check whether the dependency requirements are already met by the environment? +- **Triggering:** What causes the requirement installation process to start? +- **Installing:** How do we carry out requirement installation? + +The table below includes all 94 extensions that are currently in the [Slicer extension index](https://github.com/Slicer/ExtensionsIndex) and that have some external python dependencies to deal with, and my best quick guess as to how they approach three of the problems above. +Here is a legend to interpret the terms I've put in the table: + +- **Checking:** + - _simple_: Checkng is done by trying to import and and catching if it fails. It could also be done with importlib. No version checking. + - _version_: Does some kind of version checking as well. +- **Triggering:** + - _user_: Nothing triggers an automatic install. The user will have to install by following some steps. + - _top level_: The install is triggered at some top level like the module enter function, such that it would happen the first time the module is even switched to for example. It could also be at the truly top-level-- in this case it would be triggered during module discovery, which is undesirable. + - _processing_: When the user goes to run some kind of processing in the extension, that's when it actually checks if it has the dependencies it needs and kicks off install if not. + - _button_: User has to press a button to install. +- **Installing:** + - _user_: User has to do it by following some steps. + - _simple_: Just `slicer.util.pip_install`. + - _isolated_: There is a separate environment (such as a virtual env) into which the dependencies are installed (and so it is not going to be slicer's `pip_install` but some script or shell process that carries out installation). + - _blocking-prevention_: Attempts to stop the installation from fully blocking the application. This could be by using threading of some sort or spawning a background process, or by otherwise trying to give slicer a chance to process events while the install is happening. + - _display_: Has some more sophisticated display of what's going on while pip installing + + +--- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ExtensionCheckingTriggeringInstalling
ShapeVariationAnalyzersimpleuserisolated
SlicerVolBrainsimpleuseruser
PerkTutorsimpletop levelsimple
Q3DCExtensionsimpletop levelsimple
QuantitativeReportingsimpletop levelsimple
SegmentationReviewsimpletop levelsimple
Slicer-ABLTemporalBoneSegmentationsimpletop levelsimple
Slicer-ASLtoolkitsimpletop levelsimple
Slicer-MusculoskeletalAnalysissimpleprocessingsimple
Slicer-PET-MUST-segmentersimpletop levelsimple
Slicer-TITANsimpleprocessingsimple
SlicerANTsPysimpletop levelsimple
SlicerAnatomyCarvesimpletop levelsimple
SlicerArduinoControllersimpletop levelsimple
SlicerAuto3dgmsimpletop levelsimple
SlicerAutomatedDentalToolsversionprocessingsimple
SlicerAutoscoperMsimpleprocessingsimple
SlicerBigImagesimpleprocessingsimple
SlicerBiomechsimpleprocessingsimple
SlicerBreastUltrasoundAnalysissimpletop levelsimple
SlicerBreast_DCEMRI_FTVsimpletop levelsimple
SlicerCADSWholeBodyCTSegsimpleprocessingsimple
SlicerCBCTToothSegmentationsimpleprocessingsimple
SlicerCineTracksimpleprocessingsimple
SlicerColoc-Z-Statssimpleprocessingsimple
SlicerConnectToSuperviselysimpleprocessingsimple
SlicerDBSCoalignmentsimpleprocessingsimple
SlicerDICOMwebBrowsersimpletop levelsimple
SlicerDMRIsimpleprocessingsimple
SlicerDebuggingToolssimpleprocessingsimple
SlicerDensityLungSegmentationsimpleprocessingsimple
SlicerDentalModelSegversionprocessingisolated, blocking-prevention
SlicerFreeSurfersimpletop levelsimple
SlicerHDBrainExtractionsimpleprocessingsimple
SlicerHeadCTDeidsimpleprocessingsimple
SlicerHeartsimpleprocessingsimple
SlicerIDCBrowsersimpletop leveldisplay
SlicerIVIMFitsimpleprocessingsimple
SlicerImageAugmentersimplebuttonsimple
SlicerJupytersimpleprocessingsimple
SlicerKonfAIversiontop levelblocking-prevention
SlicerLungCTAnalyzersimpleprocessingsimple
SlicerMEMOSsimpleprocessingsimple
SlicerMHubRunnersimpletop levelsimple
SlicerMONAIAuto3DSegversionprocessingblocking-prevention
SlicerMONAIVizsimpleprocessingsimple
SlicerMOOSEsimplebuttonsimple
SlicerMassVisionsimpletop levelsimple
SlicerModalityConvertersimplebuttonsimple
SlicerMorphsimpletop levelsimple
SlicerMorphoDepotsimpletop levelsimple
SlicerMultiverSegsimpleprocessingsimple
SlicerMuscleMapsimpleprocessingsimple
SlicerNNInteractivesimpletop leveldisplay, blocking-prevention, isolated
SlicerNNUnetversionprocessingsimple
SlicerNetstimsimpleprocessingsimple
SlicerNeurosimpleprocessingdisplay
SlicerNeuroStripsimpletop levelsimple
SlicerNeuropacsversionprocessingsimple
SlicerOpenLIFUsimpleprocessingdisplay
SlicerOrbitSurgerySimsimpletop leveldisplay
SlicerPhotogrammetrysimpletop levelsimple
SlicerPipelinessimpletop levelsimple
SlicerPolycysticKidneySegsimplebuttonsimple
SlicerPyTorchsimplebuttonsimple
SlicerPythonTestRunnersimpleprocessingsimple
SlicerRVXLiverSegmentationsimpletop levelsimple
SlicerRadiomicssimpletop levelsimple
SlicerSPECTReconversiontop leveldisplay
SlicerSandboxsimpleprocessingsimple
SlicerSegmentHumanBodysimpletop levelsimple
SlicerSegmentWithSAMsimpletop levelsimple
SlicerSkeletalRepresentationsimpletop levelsimple
SlicerSoundControlsimpletop levelsimple
SlicerStereotaxiasimpleprocessingsimple
SlicerSurfaceLearnersimpletop levelsimple
SlicerThemessimplebuttonsimple
SlicerTissueSegmentationsimpletop levelsimple
SlicerTomoSAMsimpleprocessingsimple
SlicerTorchIOsimpleprocessingsimple
SlicerTotalSegmentatorsimpleprocessingsimple
SlicerTractParcellationsimplebuttonsimple
SlicerTramesimpletop levelsimple
SlicerUltrasoundsimpletop levelsimple
SlicerUniGradICONsimpletop levelsimple
Slicerflywheelcaseiteratorsimpletop levelsimple
TCIABrowsersimpletop levelsimple
TOMAAT-Slicersimpletop levelsimple
aigtsimpletop levelsimple
opendose3dsimpletop leveldisplay
slicer_flywheel_connectsimpletop levelsimple
+ +### Next steps + +In the table above, the green cells are the items I think are worth revisiting and learning from for this project. + +Other things I found while looking through these that I'd like to consider: + +- Using `slicer.util.restart` to restart after install +- Showing a status message: `slicer.util.showStatusMessage` +- Showing the python console: `slicer.util.displayPythonShell` +- Using `slicer.util.tryWithErrorDisplay` +- Using a `BusyCursor` context manager + +Further points that I'd like to follow up on: + +- I found some [thoughts from JC](https://github.com/Slicer/Slicer/issues/7171) that I'd like to look over carefully. +- I had forgotten about [this work by David](https://github.com/Slicer/Slicer/issues/7707). This could help with the _triggering_ question in particular. One consideration is to not make things too opaque so as to make debugging difficult (e.g. say a module import fails; will the error be incomprehensible?) +- What about the problem of conflicts between requirements of different extensions? For an example see the mess that was caused by the conflict between the total segmentator and NNUnet extensions. +- When extension unit tests are running, they have the ability to influence each other's slicer python environment. It would be nice if there were some way to revert the slicer python environment before each extension test begins. This is out of the scope of the present project but we can consider how it might be done. + +### uv + +Using `uv` instead of `pip` could provide a huge speedup and unlock many more possibilities. [Mike's AI generated summary](https://gist.github.com/mhalle/c2e752467d960a123f42ea459c09f73e) provides some inspiration: + +- As a first step, we can see if `uv` can be bundled with Slicer. We can add it to the superbuild (but then we are dealing with rust), or we can install it from the wheel. Replacing `pip` by `uv pip` is in instant win in terms of speed. +- Exciting possibilities follow: + - `uv` _workspaces_ can be used to look at python dependencies of a _set_ of Slicer extensions and come up with a single common resolution before actually installing anything. This could be used at Slicer build time on all indexed Slicer extensions. Or it can be used whenever a user is installing a new extension on the set of all their installed extensions. + - `uv`'s lock files and ability to roll back to snapshots may solve the problem reverting the Slicer python environment to a clean one while testing each extension. + +### Refined next steps + +A plan of attack: + +- Come up with a dependency specification format. Use [JC's commentary](https://github.com/Slicer/Slicer/issues/7171) and AI help to find the best solution. Consider the strange dependency conflict handling that takes place in [NNUnet](https://github.com/KitwareMedical/SlicerNNUnet/blob/e44b00883e8f373c72bf79c50455bd2c776ed8cf/SlicerNNUnet/SlicerNNUNetLib/InstallLogic.py#L32) and [Total Segmentator](https://github.com/lassoan/SlicerTotalSegmentator/blob/2e5f9c3aa38365cc63eba2f3c8ea1c2e2b79acd8/TotalSegmentator/TotalSegmentator.py#L745), or whether we can handle bringing the [currently isolated docker environment and requirements in NNInteractive](https://github.com/coendevente/SlicerNNInteractive/tree/76545192e1a925f911a31dea72802ad04d089072/server) into slicer's python. +- Come up with a solution to the *checking* problem, referencing the extensions with the *version* tag in the table above and with some AI help. +- Come up with a solution to the *triggering* problem, referencing [David's work](https://github.com/Slicer/Slicer/issues/7707) and some AI help. +- Come up with a solution to the *installation* problem by addressing the following: + - Display: Progress reporting, the option to prompt the user about what is happening and whether they want to proceed, showStatusMessage, busy cursor if not running in background. Reference the extensions with the *display* tag above. + - Blocking prevention approach. Reference the extensions with the *blocking-prevention* tag above. +- Consider whether there is room to appraoch the problem of "environment reversion" for the sake of extension testing. +- Implementation +- Experimental bonus objectives: `uv`, conflicts between extensions, reverting the environment for tests. + +### Plan for implementation + +#### Dependency specification + +##### File format + +For Slicer extensions, I believe `requirements.txt` is the right way to specify python dependency requirements. + +_Why not pyproject.toml?_ + +- Semantics: In a `pyproject.toml` one declares a _package's_ dependencies. This suggests you're defining a distributable package with a name, version, and build backend. Slicer extensions aren't python packages. In a Slicer extension we are just specifying "install these things into this environment," which is exactly what a `requirements.txt` is. +- Directness: The `requirements.txt` format *is* pip's input format. So it removes the need for a translation layer. +- Extra baggage: pyproject.toml strongly suggests `[project]` metadata like `name`, `version`, `[build-system]`, etc, which are not relevant. +- Constraints: `pip install -c constraints.txt` is how pip handles dependency conflicts across multiple extensions. Even with `pyproject.toml` you'd still need a separate constraints.txt file. +- This approach still works if `uv` is adopted: + - `uv` supports requirements.txt: `uv pip compile requirements.txt -o requirements.lock` generates resolved lock files + - When doing `uv pip compile` the lock file that we get is essentially in a requirements.txt format. The TOML `uv.lock` format is only used with `uv lock` or `uv sync` (which are things you'd probably use mainly for managing actual python projects). + - To resolve multiple files we can do `uv pip compile a.txt b.txt -c constraints.txt` without the need for any of the extra `pyproject.toml` metadata + +##### Python object + +A dependency can be represented by a `packaging.requirements.Requirement`. A `list` of such things is what we should get when we load a `requirements.txt` file (or a `constraints.txt` file). + +- This is the kind of object that `pip` is using internally to represent requirements. +- This handles the requirement syntax (version specifiers, extras, markers, URLs) + +Example of how to load a requirements file: + +```py +from packaging.requirements import Requirement + +def load_requirements(path): + """Load requirements.txt into list of Requirement objects.""" + reqs = [] + with open(path) as f: + for line in f: + line = line.strip() + # Skip comments, empty lines, and pip options (-r, -c, --index-url, etc.) + if line and not line.startswith("#") and not line.startswith("-"): + reqs.append(Requirement(line)) + return reqs +``` + +#### Checking + +One can do a pip `--dry-run` to use pip's way of checking, but then we need to call a subprocess which has some overhead. +Unlike _installation_, dependency _checking_ is an operation that might get called upon frequently. +It would be good to do it in pure python. +It does get a bit complicated mainly because of the possibility of extras in a `Requirement`, +but it's not that bad; here is how `slicer.util.pip_check` might work: + +```python +from importlib.metadata import version, requires, PackageNotFoundError +from packaging.requirements import Requirement +from packaging.markers import default_environment + + +def pip_check(req : Requirement|list[Requirement], _seen=None) -> bool: + """Check if requirement(s) are satisfied. + + For requirements with extras like package[extra1,extra2]>=1.0, this: + 1. Checks if the base package is installed at an acceptable version + 2. Finds which dependencies are activated by the requested extras + 3. Recursively verifies those dependencies are satisfied + + Markers (e.g., "; sys_platform == 'win32'") are evaluated - if a marker + doesn't apply to the current environment, the requirement is considered + satisfied (since it doesn't need to be installed). + + Args: + req: Either a Requirement object or a list of Requirement objects + _seen: Internal parameter for tracking circular dependencies + + Returns: + True if all requirements are satisfied, False otherwise + + Example: + from packaging.requirements import Requirement + + # Single requirement + req = Requirement("numpy>=1.20") + if pip_check(req): + print("numpy is satisfied") + + # Multiple requirements + reqs = [ + Requirement("numpy>=1.20"), + Requirement("pandas[excel]>=2.0"), + ] + if pip_check(reqs): + print("All requirements satisfied") + """ + if _seen is None: + _seen = set() + + # Handle list of requirements, sharing _seen across all of them + if isinstance(req, list): + return all(pip_check(r, _seen) for r in req) + + # Check if requirement's marker applies to current environment + # If not, consider it satisfied (doesn't need to be installed here) + if req.marker is not None: + env = default_environment() + if not req.marker.evaluate(env): + return True + + # Avoid rechecking the same requirement + key = (req.name.lower(), frozenset(req.extras)) + if key in _seen: + return True + _seen.add(key) + + # Check if base package is installed at acceptable version + try: + installed = version(req.name) + except PackageNotFoundError: + return False + if installed not in req.specifier: + return False + + # If no extras then we are done + if not req.extras: + return True + + # Find dependencies activated by the requested extras + dep_strings = requires(req.name) or [] + env = default_environment() + activated = [] + + for dep_str in dep_strings: + dep = Requirement(dep_str) + if dep.marker is None: + continue + + # Check if any requested extra activates this dependency + for extra in req.extras: + if dep.marker.evaluate({**env, "extra": extra}): + # Strip marker before recursive check - we've already determined it applies + dep_str_no_marker = str(dep).split(';')[0].strip() + activated.append(Requirement(dep_str_no_marker)) + break # Don't check other extras for same dep + + # Recursively verify all activated dependencies + return all(pip_check(dep, _seen) for dep in activated) +``` + +#### Triggering + +For the **triggering** problem I propose an explicit checker function followed by non-top-level imports. Maybe [the `LazyImportGroup` approach](https://github.com/Slicer/Slicer/issues/7707) can be considered for later, but for now I think something clear and simple is needed. The `LazyImportGroup` cleverly intercepts your first use of an imported module to trigger installation behind the scenes. It's elegant, but the magic does reduce transparency for everyday Slicer extension developers, making debugging more difficult. For IDE support and type checking, one can use the `TYPE_CHECKING` pattern to declare imports at the top of the file, which type checkers see but which doesn't run at runtime. + +Here's how `slicer.util.pip_ensure` might work: + +```py +from packaging.requirements import Requirement + + +def pip_ensure( + requirements: list[Requirement], + prompt: bool = True, + requester: str | None = None, + skip_in_testing: bool = True, + show_progress: bool = True, +) -> None: + """Ensure requirements are satisfied, installing if needed. + + Call at the point where dependencies are actually needed (e.g., onApplyButton). + + Args: + requirements: List of Requirement objects to check/install + prompt: If True, show confirmation dialog before installing + requester: Name shown in dialog to identify who is requesting the packages + (e.g., "TotalSegmentator", "MyFilter", "console script") + skip_in_testing: If True (default), skip installation when Slicer is running + in testing mode (slicer.app.testingEnabled()). This prevents tests from + modifying the Python environment. Set to False if your test explicitly + needs to verify installation behavior. + show_progress: If True (default), show progress dialog during installation + with status updates and collapsible log details. If False, show only + a busy cursor. Since pip_ensure already shows a confirmation dialog, + showing progress during installation provides a consistent user experience. + + Raises: + RuntimeError: If user declines installation or installation fails + + Example: + reqs = slicer.util.load_requirements(Path(__file__).parent / "requirements.txt") + slicer.util.pip_ensure(reqs, requester="MyExtension") + import some_package + """ + import logging + + missing = [req for req in requirements if not pip_check(req)] + + if not missing: + return # all satisfied + + # skip installation in testing mode to avoid modifying the environment + if skip_in_testing and slicer.app.testingEnabled(): + missing_str = ", ".join(str(req) for req in missing) + logging.info(f"Testing mode is enabled: skipping pip_ensure for [{missing_str}]") + return + + if prompt: + package_list = "\n".join(f"• {req}" for req in missing) + title = f"{requester} - Install Python Packages" if requester else "Install Python Packages" + count = len(missing) + message = ( + f"{count} Python package{'s' if count != 1 else ''} " + f"need{'s' if count == 1 else ''} to be installed.\n\n" + f"This will modify Slicer's Python environment. Continue?" + ) + if not slicer.util.confirmOkCancelDisplay(message, title, detailedText=package_list): + raise RuntimeError("User declined package installation") + + # Install missing packages with optional progress display + pip_install_with_progress( + [str(req) for req in missing], + show_progress=show_progress, + requester=requester, + ) +``` + +The `pip_install_with_progress` is explained in [the Installing section below](#installing). Since `pip_ensure` already shows a confirmation dialog to the user by default, showing progress during the subsequent installation provides a consistent experience as the default. + +##### Example usage + +Say you have a `Resources/requirements.txt` in your Slicer module and it contains + +```txt +scikit-image>=0.21 +``` + +Here's how you might use `pip_ensure` to trigger install if needed: + +```py +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import skimage + +... + +class MyFilterWidget(ScriptedLoadableModuleWidget): + + ... + + def onApplyButton(self): + reqs = slicer.util.load_requirements(self.resourcePath("requirements.txt")) # say this contains "scikit-image>=0.21", for example + slicer.util.pip_ensure(reqs, requester="MyFilter") + import skimage + + filtered = skimage.filters.gaussian(array, sigma=2.0) + ... +``` + +#### Installing + +We will build upon `slicer.util.pip_install` to help solve two problems: + +- _Blocking prevention_: Avoid blocking the UI. Optional, and again off by default. +- _Progress display_: Show progress to the user. Optional, off by default so that no one's extensions change their behavior unexpectedly. + +Blocking prevention is technical. +We will build this into `slicer.util.pip_install` by using a QTimer-based polling approach [inspired by SlicerMONAIAuto3DSeg](https://github.com/lassoan/SlicerMONAIAuto3DSeg/blob/b92cb839f0a78fc5fddceda433a9b8facd2a0e35/MONAIAuto3DSeg/MONAIAuto3DSegLib/process.py#L51). + +Progress display will involve creating a modal dialog containing a progress bar and an expandable details section. + +I think stuffing progress display functionality into `pip_install` does not make sense. +`pip_install` could remain a low-level building block (with the ability to be non-blocking), while `pip_install_with_progress` could be a high-level utility that always waits for completion while showing a modal dialog. Mixing these in one function would create confusing interactions. For example what should `pip_install(blocking=False, show_progress=True)` do when the caller expects an immediate return but the progress dialog expects to block until completion? It also feels a bit messy to stuff so much Qt gui code into `pip_install` itself. + +Here is what these two changes end up meaning for Slicer extension developers, if they are succesfully implemented: + +- First, `pip_install` gets optional `blocking=False` mode with `logCallback` and `completedCallback` parameters, allowing advanced users to build custom installation UIs or run pip in the background while keeping the application fully responsive. +- Second, the new `pip_install_with_progress()` function provides an out-of-the-box installation experience with a modal progress dialog showing an animated progress bar, status updates, and a collapsible details panel containing the full pip log. Additionally there would be error handling that displays the complete log if installation fails. For most extension developers, `pip_install_with_progress()` is the recommended choice for user-facing installations, while the enhanced `pip_install()` remains available for scripting, automation, or custom UI needs. + +### Implementation notes + +The above plan was consolidated into a set of instructions for Claude to execute upon. + +It ran with `--dangerously-skip-permissions` inside an isolated docker environment where it had only a Slicer source tree and build tree to play with and test its implementation. When it was done, I requested unit tests following the style and philosophy of existing ones for `slicer.util`, a self-review based on some similar past pull requests, and useful documentation updates. + +Discussions led to a few changes to the design: + +- Combine `pip_install_with_progress` into `pip_install`, because we may actually want to make the modal progress dialog become the default behavior for everyone using `pip_install`. +- When non-blocking and not showing a dialog, still have the option to show updates in the status bar +- Guard against multiple simultaneous pip installs +- Add handling of [constraints](https://pip.pypa.io/en/latest/user_guide/#constraints-files) +- Incorporate a nicer way pip install with `--no-deps` for some dependencies (e.g. to potentially make [things like this](https://github.com/KitwareMedical/SlicerNNUnet/blob/e44b00883e8f373c72bf79c50455bd2c776ed8cf/SlicerNNUnet/SlicerNNUNetLib/InstallLogic.py#L288) easier to express) + +[This PR is the outcome!](https://github.com/Slicer/Slicer/pull/9010) + +### Future directions: + +The PR will probably take some back-and-forth, and it's only a start to the chain of improvements that we ultimately want: + +- Integration of `uv` for speed of dependency resolution and for the ability to roll back the environment to a previous lock file. Pre-installation of `uv` is already a solved problem over at this old PR [#8181](https://github.com/Slicer/Slicer/pull/8181). +- Support for virtual environemnts. Sometimes you just want to try out a new segmentation model and it needs a very specific environment; we should consider making it easier to say "pip install this package, but to an isolated dedicated virtual environment." Similar philosophy to `pipx` or `uv tool`, with the convenience of `slicer.util.pip_install`. +- Establishing a canonical location for extensions to specify their depdendencies. This would open up the option of resolving dependencies for mutliple extensions all at once. It also opens up the option to provide more convenient tooling at the level of `ScriptedLoadableModule`. +- SlicerIDCBrowser goes through some effort to [check whether packages _can_ be updated](https://github.com/ImagingDataCommons/SlicerIDCBrowser/blob/a3fbf8309a602dc1e857991f95303c6ee5336ac2/IDCBrowser/IDCBrowser.py#L455-L463), and to do so in a non-blocking way. This is actually quite useful to include in the family of `slicer.util.pip_*` things. + +# Illustrations + + + + +![](01.png) + +![](02.png) + + + +# Background and References + + + + +- [Pull request that came out of this work](https://github.com/Slicer/Slicer/pull/9010) + +Prior related work and ideas: + +- [https://github.com/Slicer/Slicer/issues/7171](https://github.com/Slicer/Slicer/issues/7171) +- [https://github.com/Slicer/Slicer/issues/7707](https://github.com/Slicer/Slicer/issues/7707) +- [https://github.com/Slicer/Slicer/pull/8181](https://github.com/Slicer/Slicer/pull/8181) \ No newline at end of file diff --git a/PW44_2026_GranCanaria/Projects/RefinementOfTheMethodUsedToDetermineSurgicalClassBasedOnTheShapeOfTheCarotisSyphon/README.md b/PW44_2026_GranCanaria/Projects/RefinementOfTheMethodUsedToDetermineSurgicalClassBasedOnTheShapeOfTheCarotisSyphon/README.md new file mode 100644 index 000000000..498ab29d8 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/RefinementOfTheMethodUsedToDetermineSurgicalClassBasedOnTheShapeOfTheCarotisSyphon/README.md @@ -0,0 +1,91 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Refinement of the method used to determine surgical class based on the shape of the + carotis syphon +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Attila Tanács + affiliation: University of Szeged + country: Hungary + +- name: Attila Nagy + affiliation: University of Szeged + country: Hungary + +- name: Ferenc Dezső Bakó + affiliation: University of Szeged + country: Hungary + +--- + +# Project Description + + + + +This is an [ongoing project from last year](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/DeterminationOfSurgicalClassBasedOnTheCurvatureAndShapeOfTheCarotidSyphon/). + +Stroke is a leading cause of death worldwide of which ischaemic stroke is the more common. Mechanical thrombectomy involves inserting a catheter into the cerebral vasculature to remove blood clot. Catheter devices with different parameters are available to perform the procedure of which the correct one must be selected beforehand to avoid blockage. Clinical experience suggests that large lumen aspiration catheters were most commonly stuck at the anterior curvature of the carotid syphon. + +We categorised 53 studies into four groups. Previously, we extracted nine features based on vessel geometry for classification purposes. + + + +## Objective + + + + +1. Objective A. Our main objective is to refine the extracted attribute values in order to enhance the classification results. +2. Objective B. Vessel segmentation is also part of the process that is performed manually currently. We are trying to make it automatic. + + + +## Approach and Plan + + + + +1. Using Weka, figure out what features and classification method provide the best result. +2. We plan to gather CT and ground truth data for MONAI Auto3dSeg segmentation training. + + + +## Progress and Next Steps + + + + +1. Absolute distance values were normalized by computing the ratio of distance relative to the distance along the centreline of the vessel. +2. Starting from the first cross section, difference values were computed from absolute vessel cross section area values. +3. Attributes were inspected using Weka functions. + +Results: Even the new attributes are not really a good descriptor of best choice available as ground truth. + +Next steps: Ground truth should be considered in a different way. Instead of opting for one single choice, a percentage value could be assigned to each intrumentation. From the recorded surgical log data, we know which devices were tested and which were unsuccessful. These unsuccessful instrument applications could also be used to train the classification method. + + + +# Illustrations + + + + +![](https://github.com/user-attachments/assets/02e5d8ea-f072-4fb6-81f0-b8f5f7336ed7) + + + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/RegisteringCbctAndFluoroscopyImagesToDetermineElectrodePlacementInTheHumanCochlea/README.md b/PW44_2026_GranCanaria/Projects/RegisteringCbctAndFluoroscopyImagesToDetermineElectrodePlacementInTheHumanCochlea/README.md new file mode 100644 index 000000000..50c111488 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/RegisteringCbctAndFluoroscopyImagesToDetermineElectrodePlacementInTheHumanCochlea/README.md @@ -0,0 +1,117 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Registering CBCT and fluoroscopy images to determine electrode placement in the human + cochlea +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Attila Nagy + affiliation: University of Szeged + country: Hungary + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Attila Tanács + affiliation: University of Szeged + country: Hungary + +- name: András Lasso + affiliation: Queen's University + country: Canada, ON + +- name: Roland Nagy (remote) + affiliation: University of Szeged + country: Hungary + +- name: Ádám Perényi (remote) + affiliation: University of Szeged + country: Hungary +--- + +# Project Description + + + + +Cochlear implant electrode placeemnt is exteremely important during cochlear surgery, as inserting it as close as possible to the modiolus produces the best results (lower energy consumption of the device, better hearing outcomes, speec recognition ans so on). +But it is an imaging nightmare: imeging the inner ear isn't that easy, and doing so with metal inserted is even more challenging. One of the best option is to use fluroscopy images, but then we lose the 3rd dimension. +If we could register the two modalities, we could have the desired accuracy and extend the measurements into 3D. + + + +## Objective + + + + +Brainstorm about ideas, and maybe create a prototype using example data. + +- Collect ideas +- Collect even more ideas +- Hopefully create a prototype + + + +## Approach and Plan + + + + + +- Examine the CBCT and fluoroscopy images and identify the points. One for sure is that the CBCT images have bad distances, as usual... +- Try to overlay/register fluoroscopy images using anatomical landmarks + + + +## Progress and Next Steps +With some sample data we tried the new Virtual CathLab module. It seems that it has most of the functionality we need to bring our work further. +Also, created a small anonymization Slicer extension that will fit our later workflow. + + +1. Describe specific steps you **have actually done**. +- Checked the Virtual CathLab module. +- Successfully loaded our own imaing CBCT data into it, and we had similar results as our fluoroscopy images +- Createn a batch anoymization extension + + + + +# Illustrations + + + + +![](https://github.com/user-attachments/assets/db4355c7-71b9-418e-ab0d-07d18bd3c004) +![](https://github.com/user-attachments/assets/104baf5d-bcde-4378-b411-b7f450d4f9db) + +Progress: +Anonymization module UI demo video: + + + +Here is a link to the same video on +[YouTube](https://www.youtube.com/watch?v=Tq1r9Yp6PGk) + +Anonymization module UI: + ![](https://github.com/user-attachments/assets/f8629ab0-713c-4ffa-a4b4-0ca0cd36ca65) + + +Example module usage with our own cochlear implant CBCT data: +![](https://github.com/user-attachments/assets/680dd100-0976-4829-94b0-7755068e8605) + + + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/RevisitRawimageguessExtension/README.md b/PW44_2026_GranCanaria/Projects/RevisitRawimageguessExtension/README.md new file mode 100644 index 000000000..f8cba723d --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/RevisitRawimageguessExtension/README.md @@ -0,0 +1,102 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Revisit RawImageGuess extension +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Attila Nagy + affiliation: University of Szeged + country: Hungary + +- name: Csaba Pintér + affiliation: EBATINCA SL + country: Spain + +- name: Andras Lasso + affiliation: Queen's University + country: Canada + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +- name: Jean-Christophe Fillion-Robin + affiliation: Kitware Inc. + country: USA + +--- + +# Project Description + + + + +We created this extension not too long ago. +By using this extension you can find out the parameters of work file types and formats that Slicer doesn't handle out of the box. This way you can create an .nhdr with the parameters of the images(series), and then load them into Slicer. + + + +## Objective + + + + +Would like to collect use cases, refinement suggestions and new ideas. + + + + +## Approach and Plan + + + + +Collect use cases, refinement suggestions and new ideas. + + + + +## Progress and Next Steps + + + + +1. Ruined the original functionality :D +2. Began to implement new ideas an experiment with them. +3. Tried one, but didn't really work out as expected. It is a row continuity heuristic (super cheap, super effective) +Natural images (medical, microscopy, industrial) have strong horizontal continuity. +For a guessed X: +Interpret raw data as 1D array +Split into rows of length X +Compute: +mean(|row[i] - row[i+1]|) +Do this for several rows. +Maybe needs some refinement. + +The implementation of further ideas is coming. + +# Illustrations + + + + + +![](https://github.com/user-attachments/assets/fed718c2-cf5e-4888-a136-fd898548d2b9) + +# Background and References + + + + +Project week from 2018: +https://projectweek.na-mic.org/PW28_2018_GranCanaria/Projects/RawImageGuess/ + +And 2019: +https://projectweek.na-mic.org/PW30_2019_GranCanaria/Projects/RawImageGuess/ diff --git a/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/README.md b/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/README.md new file mode 100644 index 000000000..a232f4db6 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/README.md @@ -0,0 +1,115 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Scene mirroring on the web with trame-slicer +category: Cloud / Web +presenter_location: + +key_investigators: + + - name: Thibault Pelletier + affiliation: Kitware + country: France + +--- + +# Project Description + + + + +Following our success in bringing core 3D Slicer functionalities on the web with the trame framework, we are continuing +our endeavor by adding more connectivity between the Slicer application and the trame ecosystem. + +This project will bring a connection between the Slicer application scene and the trame application providing a stepping +stone for exciting new features such as: + +- Real time review and cooperation in one viewer +- Remote IGT display on tablets or light devices +- Interactive teaching modules +- ... + +## Objective + + + +1. New module / logic dedicated to starting a trame server in the main thread +2. Library components for users to define their own mirroring +3. Example on linking the trame-slicer Segmentation application with the Slicer main application + +## Approach and Plan + + + +1. Proof of concept of starting a trame server in the Slicer application thread +2. Test of putting trame components into the Qt layout using QWebEngine components +3. Implementation / test of scene interactive linking between Slicer and trame Slicer +4. Bi directional interaction on the same Scene and interactivity tests + +## Progress and Next Steps + + + +### Progress + +The trame-server relies on an aiohttp server and uses the main async Event Loop to establish a one to one connection to +a client. + +In the context of the SlicerTrame server, the application starts a dedicated Slicer-app-real process and in this context +the trame-server takes control of the main event loop. It is then effectively blocking the UI. + +To start the SlicerTrame server without blocking the UI, the trame-server can be started in another Thread and use a +new event loop in this thread (blocking the given thread). + +We have been able to test this using `PyQt6` + `qasync` and producing the expected behavior of mixing trame with the +`Qt` layer and have a responsive environment. + +In our tests, the trame-server was launched by the Qt application and be connected callbacks using qtSignal / slots. + +### Technical points to address + +In the Slicer Python environment, the following points need to be addressed for proper integration: + +1. Qt6 is required for modern web view access (Slicer 6+) +2. The default Web API needs to be set to OpenGL for compatibility with the default view layer +3. Direct threading is not available and the actual start / management of the trame-server needs to be sorted out + * At the moment, threading can be *kind of* hacked using PyQt6 QThread objects. + * qasync can be used with PyQt6 to provide a trame-server start / stop + * In this context the event loop seems to be blocked / lag behind when the VTK OpenGL views are not hovered +4. Direct usage / inheritance of QThread in the Slicer context will make Slicer crash +5. The trame-slicer application / views / etc. needs to be created in the main Qt Thread to avoid threading problems + +### Next step(s) + +* Investigate QThread creation with QtPython in the Slicer python environment +* Architecture design for the Slicer / trame-slicer State interaction +* Test(s) of view rendering / streaming between Slicer and trame-slicer + +# Illustrations + + + +PyQt trame-slicer integration POC: + +![PyQt Demo](./pyqt_trame_slicer.gif) + +Slicer trame UI integration POC: + +![Slicer Trame UI](./slicer_trame_ui.gif) + +_No response_ + +# Background and References + + + +- trame-slicer library: [https://github.com/KitwareMedical/trame-slicer](https://github.com/KitwareMedical/trame-slicer) +- Slicer trame extension: [https://github.com/KitwareMedical/SlicerTrame](https://github.com/KitwareMedical/SlicerTrame) +- QAsync : [https://github.com/CabbageDevelopment/qasync](https://github.com/CabbageDevelopment/qasync) +- QThread usage in QtPython: [https://github.com/MeVisLab/pythonqt/issues/44#issuecomment-3352759196](https://github.com/MeVisLab/pythonqt/issues/44#issuecomment-3352759196) + + diff --git a/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/pyqt_trame_slicer.gif b/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/pyqt_trame_slicer.gif new file mode 100644 index 000000000..2912c0916 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/pyqt_trame_slicer.gif differ diff --git a/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/slicer_trame_ui.gif b/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/slicer_trame_ui.gif new file mode 100644 index 000000000..ada10dcc0 Binary files /dev/null and b/PW44_2026_GranCanaria/Projects/SceneMirroringOnTheWebWithTrameSlicer/slicer_trame_ui.gif differ diff --git a/PW44_2026_GranCanaria/Projects/SimpleCustomGpuAcceleratedFilteringAndVolumeRenderingPipeline/README.md b/PW44_2026_GranCanaria/Projects/SimpleCustomGpuAcceleratedFilteringAndVolumeRenderingPipeline/README.md new file mode 100644 index 000000000..5c7b9ecb3 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/SimpleCustomGpuAcceleratedFilteringAndVolumeRenderingPipeline/README.md @@ -0,0 +1,83 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Simple custom GPU accelerated filtering and volume rendering pipeline +category: VR/AR and Rendering +presenter_location: + +key_investigators: + +- name: Simon Drouin + affiliation: ETS Montreal + country: Canada + +- name: Rafael Palomar + affiliation: Oslo University Hospital + country: Norway + +--- + +# Project Description + + + +3D Slicer has the potential to be a powerful dissemination platform for novel 3D rendering methods targeted at medical imaging. However, its complex architecture is challenging for most experienced graphics programmers. It is difficult to create extensions that implement novel graphics pipeline without mastering a large portion of the Slicer architecture. + +Thibault Pelletier at Kitware France has recently contributed a powerful Slicer extension called [Layer Displayable Manager](https://github.com/KitwareMedical/SlicerLayerDisplayableManager). This extension simplifies the creation of new interactive VTK-based graphics rendering pipeline, both from Loadable or Scripted modules. + +Kyle Sunderland has created another valuable tool: [vtkGPUImageFilters](https://github.com/Sunderlandkyl/VTK/commits/vtkGPUImageFilter3). Each filter in this collection can be connected to the regular VTK pipeline, but use GPU for processing without having to bring the data back to the CPU memory between filters. The code is not yet integrated in VTK and is still a + +The combination of Layer Displayable Manager and vtkGPUImageFilters has the potential to enable a new range of GPU accelerated filtering and volume rendering effects. + + + +## Objective + + + + +The goal of this projet is to generate a proof of concept of the potential provided by the integration of LayerDisplayableManader and vtkGPUImageFilters. The project aims at: +* Creating an experimental scripted module that can apply a gaussian blur to a volume in the GPU +* Render the resulting volume without bringing the filtered image back to the CPU memory. + + + +## Approach and Plan + + + + +1. Build a simple volume rendering scripted module that implements a custom pipeline using the Layer Displayable Manager extension and the existing vtkGPUVolumeRaycastMapper +2. Build Kyle Suntherland’s vtkGPUImageFilter as an external VTK module. This step may require major modifications in the code as this collection of filter was implemented for a version of VTK from 2019. +3. Link the vtkGPUImageFilter module to be usable in the scripted module developed in step 1. +4. In the loadable module, implement a class that derives from vtkGPUVolumeRaycastMapper but is able to take a volume already in the GPU as an input. +5. Modify the pipeline of the module developed in the first step to filter the input volume and pass it on to the volume rendering mapper. + +## Progress and Next Steps + +The project to create a simple shader as an input to volume rendering using LayerDisplayableManager turned into a reflection on the future of rendering in Slicer. + +* Solution explored are the following: + * **Simple**: Create a GPU processing pipeline and make mapper inputs more flexible + * Pros: simple. Kyle Sunderland's vtkGPUImageFilter can be used as a basis. Minor changes can be made to each mapper to accept the output of GPU filters as their inputs, and make the number of inputs more flexible. + * Cons: Many GPU mapper are extremely complicated because they package too much functionality in the same class. They are difficult to modify and it is difficult to convince maintainers to accept changes that can affect other projects. + * **Extreme**: Replace VTK altogether + * Pros: possibly a lighter rendering engine, possibility to fully embrace modern graphics software designs. + * Cons: lots of work, risk of + * **Middle Ground**: replace main VTK rendering classes (Surface, Volume) + GPU processing pipeline + * Pros: Can start with existing code and simplify, introduces sufficient flexibility and has the potential to yield results shortly. + * Cons: Rendering classes no longer supported by Kitware. + +# Illustrations +Proposed simple architecture for volume rendering in the **Middle ground** solution above +![](https://github.com/user-attachments/assets/ea5a7157-7df3-4a2f-9d86-bbac16c9142e) + +The new architecture opens the possibility of creating rendering graphs, like in many CGI software and game engines. +![](https://github.com/user-attachments/assets/52a714d4-5883-458e-a526-b3de402abecb) + +# Background and References + +- [Layer Displayable Manager on GitHub](https://github.com/KitwareMedical/SlicerLayerDisplayableManager) +- [vtkGPUImageFilters](https://github.com/Sunderlandkyl/VTK/commits/vtkGPUImageFilter3) (branch on Kyle Sunderland's VTK fork) diff --git a/PW44_2026_GranCanaria/Projects/SimpleVoxelBasedVolumetrizationOfMedicalSegmentationsForFemSimulations/README.md b/PW44_2026_GranCanaria/Projects/SimpleVoxelBasedVolumetrizationOfMedicalSegmentationsForFemSimulations/README.md new file mode 100644 index 000000000..ebc27f774 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/SimpleVoxelBasedVolumetrizationOfMedicalSegmentationsForFemSimulations/README.md @@ -0,0 +1,114 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Simple Voxel-Based Volumetrization of Medical Segmentations for FEM simulations +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Domenico Riggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Ciro Benito Raggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Laura Lichtlein + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Maria Francesca Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +--- + +# Project Description + + + + +This project provides a simple and robust voxel-based volumetrization tool for medical models obtained from image segmentation. +Its primary goal is to enable fast simulation prototyping by converting segmented anatomical structures into simulation-ready volumetric representations, without requiring mesh cleaning, manual corrections, or complex tetrahedral meshing procedures (e.g., TetGen). + +By avoiding strict mesh quality constraints, the approach allows researchers and students to quickly test segmented models in FEM and physics-based simulations, even when surface meshes are imperfect or non-manifold. +The pipeline is designed as a low-barrier, lightweight solution that prioritizes usability, reproducibility, and rapid iteration over highly optimized meshing accuracy. + +The tool is particularly suited for early-stage experimentation, educational use, and proof-of-concept simulations, where fast feedback and robustness are more critical than advanced mesh optimization. + + + +## Objective + + + +The objective of this project is to develop a simple, robust, and accessible volumetrization tool that enables the rapid conversion of medical segmentations into simulation-ready volumetric models. +The project specifically targets scenarios in which traditional surface-based meshing pipelines are impractical due to strict mesh quality requirements, extensive manual cleaning, or frequent meshing failures. + +The project aims to: + • Enable fast testing of segmented anatomical models without requiring mesh cleaning or complex preprocessing. + • Avoid traditional surface-based meshing pipelines that depend on high-quality clean processed meshes. + • Generate voxel-based volumetric representations that are robust to imperfect or noisy segmentations. + • Support rapid prototyping and early-stage simulations, where ease of use and speed are more important than mesh optimization. + • Allow users to move from segmentation to simulation with minimal effort. + +The overall goal is to lower the barrier to simulation, making it easier to explore segmented models in a FEM environment within a short time. + + + +## Approach and Plan + + + + +The project adopts a simple and user-oriented approach to transform segmented anatomical models into volumetric representations that can be quickly tested in simulations. The focus is on minimizing preprocessing effort and avoiding complex meshing steps, allowing users to move directly from segmentation to a usable volumetric model. + +The workflow is designed to be intuitive: users select a model, choose a voxel size, and generate a filled volumetric representation that can be visualized and exported. + + + +## Progress and Next Steps + + + + +### Progress: +- Improved the user interface to enhance usability and workflow clarity. +- Added export functionality for volumetric meshes in `.vtk` and `.msh` formats. +- Performed extensive code refactoring and systematic bug fixing to improve module stability and robustness. +- Implemented quality and fidelity metrics to quantitatively assess the voxelized volumetric model against the original segmentation. + These validation metrics provide objective feedback on how accurately the voxel-based representation preserves anatomical features for a given voxel pitch, supporting informed trade-offs between geometric accuracy and computational cost. +- Discussed and evaluated the integration of the proposed approach within the **Segment Mesher** extension developed by Lasso *et al.* [1]. + +You can try this extension here: https://github.com/DomenicoRiggio/SlicerModelsVoxelization + +### Next Steps: +- Integration within the **Segment Mesher** extension developed by Lasso *et al.* [1]. +- Test compatibility and performance with the **SofaSlicer** extension and other FEM-based simulation frameworks. +- Intersection voxel removal: implement a dedicated function to detect and remove voxels generated in overlapping regions (e.g., intersections between multiple segments or between separate models). This preprocessing step aims to ensure non-overlapping volumetric representations and to reduce numerical artifacts prior to simulation. + +--- + +### Illustrations + + + +![](https://github.com/user-attachments/assets/da3689c9-e0ba-4905-bd9b-243d6d832f53) + +![](https://github.com/user-attachments/assets/3cdf45fe-2d92-493c-9022-fa52971e41e2) + + +--- + +### Background and References + + + +[1] Lasso, A., *et al.* **Segment Mesher** – A 3D Slicer extension for generating volumetric meshes from segmentation data. +GitHub repository: https://github.com/lassoan/SlicerSegmentMesher diff --git a/PW44_2026_GranCanaria/Projects/SlicerAdaptiveBrush/README.md b/PW44_2026_GranCanaria/Projects/SlicerAdaptiveBrush/README.md new file mode 100644 index 000000000..1754b929b --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/SlicerAdaptiveBrush/README.md @@ -0,0 +1,136 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: SlicerAdaptiveBrush - Adaptive Brush Segment Editor Effect +category: Segmentation / Classification / Landmarking +presenter_location: In-person + +key_investigators: + +- name: Ben Zwick + affiliation: The University of Western Australia and Talk2View + country: Australia + +- name: Andy Huynh + affiliation: Talk2View + country: Australia + +--- + +# Project Description + + + +SlicerAdaptiveBrush is a segment editor effect extension for 3D Slicer that provides an adaptive brush tool for semi-automatic segmentation. The brush automatically segments regions based on image intensity similarity within the brush area, adapting to image features (edges, boundaries) rather than using a fixed geometric shape. + +## Objective + + + +1. Submit to Extension Index +2. Improve documentation and tutorials +3. Optimize performance for real-time interaction +4. Add GPU acceleration for Level Set algorithm + +## Approach and Plan + + + +### 1. Submit to Extension Index + +- Complete submission requirements +- Test on all platforms (Linux, macOS, Windows) +- Create extension icon and screenshots + +### 2. Improve documentation + +- Write user tutorial with example workflows +- Document algorithm selection guide +- Add parameter tuning recommendations + +### 3. Optimize performance + +- Profile and optimize critical paths +- Implement ROI result caching for nearby brush positions +- Add slice-by-slice preview mode + +### 4. GPU acceleration + +- Implement OpenCL/CUDA backend for Level Set +- Benchmark CPU vs GPU performance + +## Progress and Next Steps + +### Completed + +1. **Extension Index Submission (In Progress)** + - CI/CD pipeline with GitHub Actions for automated builds + - Extension not yet in the Slicer Extension Index + +2. **Documentation** + - Full documentation site live at [benzwick.github.io/SlicerAdaptiveBrush](https://benzwick.github.io/SlicerAdaptiveBrush/) + - [Getting Started Tutorial](https://benzwick.github.io/SlicerAdaptiveBrush/user_guide/getting_started.html) with 10-step workflow + - [Algorithms Guide](https://benzwick.github.io/SlicerAdaptiveBrush/user_guide/algorithms.html) covering all 7 algorithms + - [Parameter Wizard Guide](https://benzwick.github.io/SlicerAdaptiveBrush/user_guide/parameter_wizard.html) for interactive setup + - Developer documentation for optimization, testing, and recipes + - Auto-generated screenshots from test suite + +3. **Performance Optimization** + - Implemented PerformanceCache with gradient and threshold caching + - Undo/redo integration with single save per stroke + - Cache statistics and hit rate logging + +4. **GPU Acceleration** + - Backend selector UI prepared (Auto/CPU/GPU) + - GPU implementation deferred to v2.0+ + +### Next Steps + +- Complete Extension Index submission +- Parameter optimization and testing for different image modalities (CT, MRI T1/T2, PET) and tissue types (tumor, bone, vessels, brain tissue) +- Testing with [Imaging Data Commons](https://portal.imaging.datacommons.cancer.gov/) data using Claude skills: + - [ImagingDataCommons/idc-claude-skill](https://github.com/ImagingDataCommons/idc-claude-skill) + - [mhalle/idc-skill](https://github.com/mhalle/idc-skill) + - [benzwick/imaging-data-commons-skill](https://github.com/benzwick/imaging-data-commons-skill) + - See also: [claude-scientific-skill for Imaging Data Commons](../ClaudeScientificSkillForImagingDataCommons/) project +- Mouse shortcuts can be configured using [SlicerMouseMaster](../SlicerMouseMaster/) for workflow optimization + +# Illustrations + +![Selecting Adaptive Brush effect](https://benzwick.github.io/SlicerAdaptiveBrush/_images/getting_started_005_select_adaptive_brush.png) + +*Selecting the Adaptive Brush effect in Segment Editor* + +![Painting with Adaptive Brush](https://benzwick.github.io/SlicerAdaptiveBrush/_images/getting_started_008_paint.png) + +*Painting a brain tumor segmentation - the brush adapts to image boundaries* + +![3D visualization of segmentation](https://benzwick.github.io/SlicerAdaptiveBrush/_images/getting_started_010_view_in_3d.png) + +*3D surface rendering of the segmented tumor* + +# Background and References + +Code repository: + +Documentation: + +- [Getting Started Tutorial](https://benzwick.github.io/SlicerAdaptiveBrush/user_guide/getting_started.html) +- [Algorithms Guide](https://benzwick.github.io/SlicerAdaptiveBrush/user_guide/algorithms.html) +- [Parameter Wizard](https://benzwick.github.io/SlicerAdaptiveBrush/user_guide/parameter_wizard.html) + +## Features + +- **Multiple algorithm choices** - Geodesic Distance, Watershed, Random Walker, Level Set, Connected Threshold, Region Growing, Threshold Brush +- **Auto-threshold methods** - Otsu, Huang, Triangle, Maximum Entropy, IsoData, Li +- **Automatic intensity analysis** - GMM-based threshold estimation adapts to image content +- **Edge-aware boundaries** - Respects anatomical boundaries automatically +- **2D and 3D modes** - Works on single slices or volumetrically (sphere mode) + +## References + +- [ITK-SNAP Adaptive Brush](https://www.itksnap.org/) - Original inspiration +- [3D Slicer Segment Editor](https://slicer.readthedocs.io/en/latest/user_guide/modules/segmenteditor.html) +- [SlicerSegmentEditorExtraEffects](https://github.com/lassoan/SlicerSegmentEditorExtraEffects) diff --git a/PW44_2026_GranCanaria/Projects/SlicerCBM/README.md b/PW44_2026_GranCanaria/Projects/SlicerCBM/README.md new file mode 100644 index 000000000..ef62e3a00 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/SlicerCBM/README.md @@ -0,0 +1,175 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: SlicerCBM - Computational Biophysics for Medicine in 3D Slicer +category: Quantification and Computation +presenter_location: In-person + +key_investigators: + +- name: Ben Zwick + affiliation: The University of Western Australia and Talk2View + country: Australia + +- name: Andy Huynh + affiliation: Talk2View + country: Australia + +--- + +# Project Description + + + +SlicerCBM (Computational Biophysics for Medicine in 3D Slicer) is an extension for 3D Slicer that provides tools for creating and solving computational models of biophysical systems and processes with a focus on clinical and biomedical applications. Features include segmentation, mesh generation, assignment of material properties (mechanical and electrical) and boundary conditions, and solvers for biomechanical modeling, electrical field modeling (EEG forward problem), and biomechanics-based non-rigid image registration. + +## Objective + + + +1. Submit to Extension Index +2. Update documentation +3. Fix bugs and improve code +4. Add automated tests + +## Approach and Plan + + + +### 1. Submit to Extension Index + +- Complete submission requirements ([#8](https://github.com/SlicerCBM/SlicerCBM/issues/8)) +- Fix installation issues + +### 2. Update documentation + +- Rewrite tutorials on slicercbm.org ([#55](https://github.com/SlicerCBM/SlicerCBM/issues/55)) + +### 3. Fix bugs and improve code + +- Fix critical module bugs +- Improve Slicer integration (MRML nodes, VTK formats) +- Enable stricter linting ([#77](https://github.com/SlicerCBM/SlicerCBM/issues/77)) + +### 4. Add automated tests + +- Write unit tests for core modules + +## Progress and Next Steps + + + +### 1. Submit to Extension Index + +- [#8](https://github.com/SlicerCBM/SlicerCBM/issues/8): TODO list for submitting extension +- [#41](https://github.com/SlicerCBM/SlicerCBM/issues/41): Fix gmsh Python package import +- [#45](https://github.com/SlicerCBM/SlicerCBM/issues/45): Check installation of all Python packages +- [#73](https://github.com/SlicerCBM/SlicerCBM/issues/73): Fix installation issues +- [#75](https://github.com/SlicerCBM/SlicerCBM/issues/75): Investigate Windows support + +### 2. Update documentation + +- [#34](https://github.com/SlicerCBM/SlicerCBM/issues/34): Put modules in categories +- [#39](https://github.com/SlicerCBM/SlicerCBM/issues/39): Use Segmentations module instead of Model Maker +- [#55](https://github.com/SlicerCBM/SlicerCBM/issues/55): Improve documentation and website + +### 3. Fix bugs and improve code + +- [#48](https://github.com/SlicerCBM/SlicerCBM/issues/48): Fix MVoxMeshGenerator module's handling of arguments +- [#61](https://github.com/SlicerCBM/SlicerCBM/issues/61): Keep data in MRML nodes +- [#62](https://github.com/SlicerCBM/SlicerCBM/issues/62): Fix Fusion/CreateSkullAndScalpSegments module +- [#64](https://github.com/SlicerCBM/SlicerCBM/issues/64): Fix MTLEDSimulator +- [#65](https://github.com/SlicerCBM/SlicerCBM/issues/65): Fix FuzzyClassification module +- [#67](https://github.com/SlicerCBM/SlicerCBM/issues/67): Fix BrainMaterialProperties module +- [#68](https://github.com/SlicerCBM/SlicerCBM/issues/68): Use consistent units (mm) for length +- [#70](https://github.com/SlicerCBM/SlicerCBM/issues/70): Fix SkullGenerator module +- [#72](https://github.com/SlicerCBM/SlicerCBM/issues/72): Use VTK mesh formats +- [#76](https://github.com/SlicerCBM/SlicerCBM/issues/76): Fix long lines +- [#77](https://github.com/SlicerCBM/SlicerCBM/issues/77): Enable stricter ruff linting + +### 4. Add automated tests + +- Write unit tests for core modules +- Set up CI/CD pipeline for automated testing + +# Illustrations + + + +Flowchart of the patient-specific solution of the iEEG forward problem in deforming brain. Brain shift caused by implantation of electrodes is computed using the biomechanical model. The computed displacement field is used to transform the DTI to the postoperative configuration. This warped DTI is then used as the basis for creating the iEEG forward model. +![fig_flowchart-eeg](https://github.com/NA-MIC/ProjectWeek/assets/33216696/ef320477-0540-460f-8412-122977ef2641) + +Original (actual preoperative) and deformed (predicted postoperative) MR images compared with original CT image and electrode positions. Postoperative CT image and electrode positions (white spheres in CT and red points in the slice planes) are overlaid on the (a,b,c) MRI acquired preoperatively and (d,e,f) MRI registered to postoperative configuration of the brain obtained using biomechanics-based image warping. +![fig_mri_ct_elec_unwarped_and_warped](https://github.com/NA-MIC/ProjectWeek/assets/33216696/b860a491-94a2-4c1b-8a8a-c1e3d1bfb42d) + +Tissue label maps based on (a,b,c) original preoperative and (d,e,f) deformed by insertion of electrodes postoperative image data. Tissue classes are colored as follows: scalp (pink); skull (yellow); GM (gray); WM (white); and CSF (blue). The location of the electrode grid array can be identified by the line of black voxels in the vicinity of the right temporal and parietal lobes. +![fig_labelmaps](https://github.com/NA-MIC/ProjectWeek/assets/33216696/f41bc134-12e6-42e2-b776-901060c56915) + +Mean conductivity (1/3 tr(C)) for models constructed using (a,b,c) original preoperative and (d,e,f) deformed by insertion of electrodes postoperative image data. The ECoG electrode grid substrate is denoted by the purple outline. +![fig_cond_MC](https://github.com/NA-MIC/ProjectWeek/assets/33216696/bd2cff16-6ed8-4e5a-af2e-1155f11a8369) + +Streamlines of the electric field generated by a current dipole source located in the temporal lobe of an epilepsy patient. Finite element solution using a regular hexahedral grid implemented in MFEM. +![brain-electric-field](https://github.com/NA-MIC/ProjectWeek/assets/33216696/29a3fe30-4353-49bb-ae91-ec6225eba7f6) + +# Background and References + + + +Code repository and documentation: + +- +- +- [GitHub Issues](https://github.com/SlicerCBM/SlicerCBM/issues) + +Sample data: + +- Zwick BF, Safdar S, Bourantas GC, Joldes GR, Hyde DE, Warfield SK, + Wittek A, Miller K. Data for patient-specific solution of the + electrocorticography forward problem in deforming brain [Data + set]. Zenodo; 2022. + +Publications: + +- Zwick BF, Safdar S, Bourantas GC, Joldes GR, Hyde DE, Warfield SK, + Wittek A, Miller K. Image data and computational grids for + computing brain shift and solving the electrocorticography + forward problem. Data in Brief. 2023;48:109122. + + +- Safdar S, Zwick BF, Yu Y, Bourantas GC, Joldes GR, Warfield SK, + Hyde DE, Frisken S, Kapur T, Kikinis R, Golby A, Nabavi A, + Wittek A, Miller K. SlicerCBM: automatic framework for + biomechanical analysis of the brain. Int J CARS. 2023. + + +- Zwick BF, Bourantas GC, Safdar S, Joldes GR, Hyde DE, Warfield SK, + Wittek A, Miller K. Patient-specific solution of the + electrocorticography forward problem in deforming + brain. NeuroImage. 2022;263:119649. + + +- Yu Y, Safdar S, Bourantas GC, Zwick BF, Joldes GR, Kapur T, Frisken + S, Kikinis R, Nabavi A, Golby A, Wittek A, Miller K. Automatic + framework for patient-specific modelling of tumour resection-induced + brain shift. Comput Biol Med. 2022;143:105271. + + +- Safdar S, Zwick BF, Bourantas G, Joldes GR, Warfield SK, Hyde DE, + Wittek A, Miller K. Automatic Framework for Patient-Specific + Biomechanical Computations of Organ Deformation: An Epilepsy (EEG) + Case Study. In: Nielsen PMF, Nash MP, Li X, Miller K, Wittek A, + editors. Computational Biomechanics for Medicine. Cham: Springer + International Publishing; 2022. p. 75–89. + + +Previous Project Weeks: + +- [PW39 SlicerCBM](https://projectweek.na-mic.org/PW39_2023_Montreal/Projects/SlicerCBM/) +- [PW38 SlicerCBM](https://projectweek.na-mic.org/PW38_2023_GranCanaria/Projects/SlicerCBM/) diff --git a/PW44_2026_GranCanaria/Projects/SlicerMouseMaster/README.md b/PW44_2026_GranCanaria/Projects/SlicerMouseMaster/README.md new file mode 100644 index 000000000..7cda535d4 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/SlicerMouseMaster/README.md @@ -0,0 +1,103 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: SlicerMouseMaster - Advanced Mouse Customization for 3D Slicer +category: Infrastructure +presenter_location: In-person + +key_investigators: + +- name: Ben Zwick + affiliation: The University of Western Australia and Talk2View + country: Australia + +- name: Andy Huynh + affiliation: Talk2View + country: Australia + +--- + +# Project Description + + + +SlicerMouseMaster is a 3D Slicer extension for advanced mouse customization, button remapping, and workflow optimization. It allows users to assign custom actions to extra mouse buttons (back, forward, thumb buttons), create workflow-specific presets, and use context-sensitive bindings that change based on the active Slicer module. + +## Objective + + + +1. Submit to Extension Index +2. Add support for additional mouse models +3. Create workflow presets for common tasks +4. Improve cross-platform compatibility + +## Approach and Plan + + + +### 1. Submit to Extension Index + +- Complete submission requirements +- Test on all platforms (Linux, macOS, Windows) +- Create extension icon and screenshots + +### 2. Add mouse model support + +- Test with various mice from different manufacturers +- Improve button detection wizard +- Create community mouse profile database + +### 3. Create workflow presets + +- Segment Editor optimized preset +- Markups workflow preset +- Volume rendering preset + +### 4. Cross-platform compatibility + +- Test button codes across operating systems +- Document platform-specific differences +- Implement platform-specific fallbacks + +## Progress and Next Steps + + + +1. Submitted extension to Slicer Extensions Index ([PR #2308](https://github.com/Slicer/ExtensionsIndex/pull/2308)) +2. Created comprehensive documentation site with user guide, developer guide, and API reference +3. Implemented automated screenshot generation for documentation via CI +4. Added support for Logitech MX Master 3S and MX Master 4 mice +5. Created default presets for common workflows (Segment Editor, Markups, Volume Rendering) + +# Illustrations + +![Main UI](https://raw.githubusercontent.com/benzwick/SlicerMouseMaster/main/Screenshots/main-ui.png) + +*MouseMaster module interface showing mouse selection, preset management, and button mappings* + +# Background and References + + + +- Source code: +- Documentation: +- Extensions Index PR: + +## Features + +- **Button Remapping**: Assign custom actions to mouse buttons (back, forward, thumb, etc.) +- **Mouse Profiles**: Built-in support for popular mice with auto-detection +- **Workflow Presets**: Save and share button configurations for different tasks +- **Context-Sensitive Bindings**: Different mappings per Slicer module +- **Cross-Platform**: Works on Linux. Not tested on macOS or Windows. + +## Supported Mice + +- Logitech MX Master 3S/4 (fully supported) +- Generic 3-button and 5-button mice (basic support) +- Custom profiles via button detection wizard diff --git a/PW44_2026_GranCanaria/Projects/SlicermodalityconverterExtensionAdditionOfNewModelsAndUseCaseExamples/README.md b/PW44_2026_GranCanaria/Projects/SlicermodalityconverterExtensionAdditionOfNewModelsAndUseCaseExamples/README.md new file mode 100644 index 000000000..0906c251d --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/SlicermodalityconverterExtensionAdditionOfNewModelsAndUseCaseExamples/README.md @@ -0,0 +1,94 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: SlicerModalityConverter Extension - addition of new models and use case examples +category: Quantification and Computation +presenter_location: + +key_investigators: + +- name: Ciro Benito Raggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Maria Francesca Spadea + affiliation: Karlsruhe Institute of Technology + country: Germany + +--- + +# Project Description + + + + +SlicerModalityConverter is a 3D Slicer extension designed for medical image-to-image (I2I) translation. + +The ModalityConverter module provides a user-friendly interface for integrating multiple AI models trained for I2I translation (currently MRI-to-CT). It also supports GPU acceleration for faster inference, and is designed to allow users to easily integrate custom models. + +More about the module [here](https://github.com/ciroraggio/SlicerModalityConverter). + + + +## Objective + + + + +1. Integration of new translation models for T1w-to-T2w MRI translation. +2. Creating use case examples with video tutorials. + + + +## Approach and Plan + + + + +1. Integrate two new pre-trained models for T1w-to-T2w translation (presented in this [study](https://arxiv.org/abs/2507.14575) and released in this [repository](https://github.com/AndreaMoschetto/medical-I2I-benchmark)), following the guidelines reported in the module documentation for integrating custom models. + +2. Create video tutorials demonstrating the common uses of existing models. For example, show how to use MRI-to-synthetic CT translation models to extract the skull's representation from a T1w brain MRI. + + + +## Progress and Next Steps + + + +### Preogress +1. The integration of the two pre-trained T1w-to-T2w translation models was completed but not accepted. Although preliminary tests were performed, the achieved performance was not satisfactory for practical use. In addition, the training strategy adopted for these models makes their general usability and scalability difficult in a broader clinical/research context. For these reasons, the models were not officially integrated into the module. + +2. A CBCT-to-CT translation model for the head and neck district was successfully integrated into the module. + +3. A short tutorial was added to demonstrate how to extract the skull directly from a T1-w MRI using the MRHead example, exploiting the models available in the Modality Converter module. + +### Next steps + +1. Evaluate the possibility of integrating alternative, more robust T1w–T2w translation models with better generalization performance. + +2. Extend the tutorial section with additional use cases based on the currently integrated models. + +# Illustrations + + +* New model: + ![](https://github.com/user-attachments/assets/537263e2-9a5a-4ec6-988d-3db61fd9dade) + + +* Tutorial: [Skull Extraction from T1w MRI via Deep Learning-based Image-to-Image Translation in 3D Slicer](https://github.com/ciroraggio/SlicerModalityConverter/blob/develop/ModalityConverter/assets/tutorials/MRItoCT-skull.mp4) + + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/SlicerultrasoundExtensionDevelopment/README.md b/PW44_2026_GranCanaria/Projects/SlicerultrasoundExtensionDevelopment/README.md new file mode 100644 index 000000000..71e978888 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/SlicerultrasoundExtensionDevelopment/README.md @@ -0,0 +1,109 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: SlicerUltrasound Extension development +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Deepa Krishnaswamy + affiliation: Brigham and Women's Hospital + country: USA + +- name: Tina Kapur + affiliation: Brigham and Women's Hospital + country: USA + +- name: Tamas Ungi + affiliation: ClaroNav + country: Canada + +- name: David Dinh + affiliation: SlicerUltrasound Team + country: USA + +- name: Martin Bellehumeur + affiliation: Radical Imaging + country: Germany + +--- + +# Project Description + + + + +**SlicerUltrasound** is a 3D Slicer extension that currently includes two modules: 1) **_Anonymize_**, which removes both metadata-embedded PHI and burned-in text from DICOM images, and 2) **_Annotate_**, which supports expert labeling of image findings. + +The **_Anonymize_** module has been used at multiple hospitals (BWH, Lahey, Indiana Methodist to remove PHI from patient exams. Users import DICOM ultrasound images from their local folder, and apply probe-specific masking templates to remove burned-in identifiers. Users specify the transducer type—curvilinear or phased array—which determines the expected fan shape. By marking three or four points on the image, the module interpolates the imaging sector and masks any visual PHI outside this region while preserving diagnostically relevant content. + +The **_Annotate_** module allows experts to annotate lung ultrasound video clips, focusing on features such as pleura lines and B-lines. It provides an intuitive interface for frame-by-frame annotation, supports multiple raters, and saves annotation data for future research and machine learning. + +Our goals during the project week are to: +1. Obtain feedback from developers and clinicians +2. Implement changes to the extension +3. Connect with and talk to others about the MONAI Ultrasound Working Group - Annotation and Anonymization subgroup. + + + +## Objective + + + + +1. Demonstrate the current extension to Slicer core developers and obtain feedback +2. Demonstrate to clinicians and obtain feedback +3. Integrate changes to the extension + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. + +## Progress and Next Steps + + + + +1. Many people provided useful feedback about possible improvements to the modules. +2. We also looked into the current OHIF viewer, which already allows for users to annotate pleural lines and B-lines. Martin added functionality needed to save additional annotations. + +# Illustrations + + + +Anonymize module +![](https://github.com/user-attachments/assets/ebfdbe29-4540-47d1-9a10-1d05e172fcd9) + +Annotate module +![](https://github.com/user-attachments/assets/7f8b89b7-00f9-42b2-aae1-2558b2e64279) + +Original OHIF app for annotation of pleural lines and B-lines - on our anonymized lung US data: + + +Enhancements to the OHIF app for annotation of pleural lines and B-lines - on sample cardiac US data. This helps with the clinical workflow, where step 1) the clinician scans through the ultrasound clip and quickly pick the frame that has the maximum number of B-lines, and step 2) the clinician or a fellow can come back later and annotate the pleural lines and B-lines. + + + +# Background and References + + + + +[Link to code](https://github.com/SlicerUltrasound/SlicerUltrasound) + +[Link to PR for OHIF](https://github.com/OHIF/Viewers/pull/5758) diff --git a/PW44_2026_GranCanaria/Projects/Talk2ViewTheMedicalViewerYouCanTalkTo/README.md b/PW44_2026_GranCanaria/Projects/Talk2ViewTheMedicalViewerYouCanTalkTo/README.md new file mode 100644 index 000000000..9361350c1 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/Talk2ViewTheMedicalViewerYouCanTalkTo/README.md @@ -0,0 +1,130 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Talk2View - The medical viewer you can talk to +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Andy Huynh + affiliation: Talk2View + country: Australia + +- name: Ben Zwick + affiliation: Talk2View + country: Australia + +- name: Kat Dunn + affiliation: Talk2View + country: Australia + +- name: Wilson Wan + affiliation: Talk2View + country: Australia + +--- + +# Project Description + + + + +[Talk2View](https://www.talk2view.com) helps users use complex medical software using natural language commands. Currently, Talk2View is available as a standalone application built on top of 3D Slicer and a 3D Slicer Extension module. + + + +## Objective + + + + +1. Objective A. Package and Distribute Talk2View 3D Slicer Extension +2. Objective B. Evaluate Connector Architecture for Community Development +3. Objective C. Web App Feasibility using Trame +4. Objective D. Ask Community for new Features and Connectors + + + + +## Approach and Plan + + + + +#### Objective A. Package and Distribute Talk2View 3D Slicer Extension + +1. Consult the 3D Slicer developer community on the best approach to package and distribute the Talk2View 3D Slicer extension. +2. Package and distribute Talk2View using the recommended approach. + +#### Objective B. Evaluate Connector Architecture for Community Development + +1. Consult the 3D Slicer developer community on best practices for designing a connector architecture that integrates with existing extensions with minimal friction. +2. Implement the connector architecture and publish clear documentation so community members can connect their own extensions for the AI agent to use as tools. + +#### Objective C. Web App Feasibility using Trame + +1. Speak with Thibault to evaluate Trame’s capabilities and confirm whether delivering Talk2View as a web application is feasible. + +#### Objective D. Ask Community for new Features and Connectors + +1. Engage the 3D Slicer community to identify high-value features or connectors that could be added to Talk2View. + + + + +## Progress and Next Steps + + + + +#### Objective A. Package and Distribute Talk2View 3D Slicer Extension + +- [x] Talk2View 3D Slicer extension is currently available on our [website](https://www.talk2view.com). +- [ ] Ask Slicer Developers on best approach to package and distribute on the Extension Index. +- [ ] Package and distribute using recommended approach. + +#### Objective B. Evaluate Connector Architecture for Community Development + +- [x] Community can develop connectors in Talk2View. +- [ ] Seek advice on making this process more frictionless. +- [ ] Implement into Talk2View 3D Slicer Extension. +- [ ] Document the process for community development. + +#### Objective C. Web App Feasibility using Trame + +- [ ] Set-up meeting with Thibault to talk about Trame's capabilities and if it's feasible to add Talk2View as a web app. +- [ ] Create an MVP to showcase and test if it is worth working towards. + +#### Objective D. Ask Community for new Features and Connectors + +- [ ] Showcase Talk2View to community. +- [ ] List features/connectors that would help improve Talk2View. +- [ ] Add features/connectors to Talk2View. + + + +# Illustrations + + + + + + + +![](https://github.com/user-attachments/assets/84d43eb2-33c6-4cb7-95b4-b4577f91a838) + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/Projects/Template/README.md b/PW44_2026_GranCanaria/Projects/Template/README.md new file mode 100644 index 000000000..612d58613 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/Template/README.md @@ -0,0 +1,58 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Write full project title here +category: Uncategorized +presenter_location: Online + +key_investigators: +- name: Person Doe + affiliation: University + +- name: Person2 Doe2 + affiliation: University2 + country: Spain +--- + +# Project Description + + + +## Objective + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW44_2026_GranCanaria/Projects/Template/README.md.j2 b/PW44_2026_GranCanaria/Projects/Template/README.md.j2 new file mode 100644 index 000000000..e4a640fe9 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/Template/README.md.j2 @@ -0,0 +1,56 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: {{ title | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +category: {{ category | regex_replace("\n$|\n\.\.\.\n$", "") }} +presenter_location: {{ presenter_location | regex_replace("\n$|\n\.\.\.\n$", "") }} + +key_investigators: +{% for investigator in investigators %} +- name: {{ investigator.name | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} + affiliation: {{ investigator.affiliation | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- if investigator.country %} + country: {{ investigator.country | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- endif %} +{% endfor %} +--- + +# Project Description + + + +{{ description }} + +## Objective + + + +{{ objective }} + +## Approach and Plan + + + +{{ approach }} + +## Progress and Next Steps + + + +{{ progress }} + +# Illustrations + + + +{{ illustrations }} + +# Background and References + + + +{{ background }} diff --git a/PW44_2026_GranCanaria/Projects/UpdatingSlicersofa/README.md b/PW44_2026_GranCanaria/Projects/UpdatingSlicersofa/README.md new file mode 100644 index 000000000..ffc818a9c --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/UpdatingSlicersofa/README.md @@ -0,0 +1,71 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Updating SlicerSOFA +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Rafael Palomar + affiliation: NTNU / OUH + country: Norway + +- name: Paul Baksic + affiliation: Inria + country: France + +- name: Steve Pieper + affiliation: Isomics Inc. + country: USA + +--- + +# Project Description + +SlicerSOFA is a 3D Slicer extension integrating the simulation framework SOFA in 3D Slicer. The extension packages the SOFA-framework, together with `SofaPython3` and exposes SOFA to 3D Slicer through Python. In addition, SlicerSOFA provides functionality to connect and transfer data between 3D Slicer objects and SOFA objects. In this project, we plan to update SlicerSOFA to have better cross-platform coverage (currently a macOS version is not available) and work with the latest 3D Slicer and SOFA versions, as well as integrating external execution of simulations through RPyC. + +## Objective + +- ⬆️ Update SlicerSOFA to use SOFA v25.12 (latest available) — delivered in 🔀 [PR #60](https://github.com/Slicer/SlicerSOFA/pull/60) +- 🧪 Update SlicerSOFA to run on the latest 3D Slicer stable and development versions — in review (🔀 [PR #60](https://github.com/Slicer/SlicerSOFA/pull/60)) +- 🍏 Fix SlicerSOFA macOS integration — next +- ✅ Enable loading a regular SOFA scene in SlicerSOFA — delivered via SOFASceneLoader (🔀 [PR #60](https://github.com/Slicer/SlicerSOFA/pull/60)) +- 🔌 Integrate RPyC external execution — next +- 📝 Update project documentation — delivered in 🔀 [PR #60](https://github.com/Slicer/SlicerSOFA/pull/60) + +## Approach and Plan + +The core SOFA library will be updated first to its latest version (v25.12) and tested on the latest Slicer (stable+dev). After an updated working version for Windows and GNU/Linux, a fix for macOS will be provided. Finally, a new executor using RPyC will be provided (tests will be performed in external processes (local + remote machine)). The updates and the new additions will be documented. + +## Progress and Next Steps + +### ✅ Results (PW44) +- ✨ New SOFASceneLoader module to open any Python-based SOFA scene exposing `createScene()` +- 🔁 Added SOFA↔️MRML mapping for polydata topologies +- ⬆️ Updated SOFA core and related plugins to v25.12 +- 🔧 Forced CMake minimum version policy on GLEW for reliable builds +- 📝 Documentation updates + +📎 All the above are included in: 🔀 [PR #60](https://github.com/Slicer/SlicerSOFA/pull/60) +(Previous SOFASceneLoader work: [PR #58](https://github.com/Slicer/SlicerSOFA/pull/58)) + +### 🔜 Next steps +- 🍏 macOS packaging and integration fixes +- 🔌 RPyC executor integration and local/remote testing +- 🧪 CI and validation on latest Slicer stable and nightly +- 📚 Expand documentation and examples + +# Illustrations + +🎥 SlicerSOFA `.py` scene loading demo: + +[SlicerSOFA.webm](https://github.com/user-attachments/assets/6ce15869-eb69-4be1-adad-87db3bb91d46) + +# Background and References + +- Source code: https://github.com/Slicer/SlicerSOFA +- 🔀 [PR #60 (PW44 results)](https://github.com/Slicer/SlicerSOFA/pull/60) +- [PR #58 (SOFASceneLoader)](https://github.com/Slicer/SlicerSOFA/pull/58) diff --git a/PW44_2026_GranCanaria/Projects/UsingAutomaticAiSegmentationToolsForImagingDataCommonsDataEnrichment/README.md b/PW44_2026_GranCanaria/Projects/UsingAutomaticAiSegmentationToolsForImagingDataCommonsDataEnrichment/README.md new file mode 100644 index 000000000..c2432f7c8 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/UsingAutomaticAiSegmentationToolsForImagingDataCommonsDataEnrichment/README.md @@ -0,0 +1,165 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Using automatic AI segmentation tools for Imaging Data Commons data enrichment +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Lena Giebeler + affiliation: RWTH Aachen + country: Germany + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: Lalith Kumar Shiyam Sundar + affiliation: LMU + country: Germany + +- name: Ron Kikinis + affiliation: BWH + country: USA + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy +--- + +# Project Description + + + + +This project builds on work carried out during [PW43](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/EvaluatingConcordanceOfAiBasedAnatomySegmentationModels/) and on the work ["In Search of Truth: Evaluating Concordance of AI-Based Anatomy Segmentation Models"](https://www.arxiv.org/pdf/2512.15921). + +Our overall goal is to enrich images available in [Imaging Data Commons](https://learn.canceridc.dev/) with segmentations and quantitative features. + +In this work, we developed a practical workflow to compare AI-based anatomy segmentation models in the absence of ground truth annotations. Segmentation outputs from different models were harmonized into a standardized representation, enabling structure-wise comparison and efficient visual review. Using this framework, we evaluated six open-source segmentation models, TotalSegmentator 1.5, TotalSegmentator 2.6, Auto3DSeg, MOOSE, MultiTalent, and CADS, on 18 CT scans from the NLST dataset hosted by the [Imaging Data Commons](https://learn.canceridc.dev). While agreement varied across anatomical structures, MOOSE and CADS showed consistent results across all evaluated structures and did not show visible segmentation errors during visual comparison. In contrast, the other four models produced visible segmentation errors or deficiencies in rib and vertebrae structures. + +The goal of this Project Week is to select a representative subset of the NLST dataset, run the MOOSE segmentation model on it, and use radiomic features to identify and visually inspect potential segmentation outliers to confirm robustness of the model. Stretch goal is to process all of the CTs in NLST (or even beyond NLST) with MOOSE to generate segmentations and radiomics features, for the subsequent ingestion into Imaging Data Commons. + +In addition, the 3DSlicer [CrossSegmentationExplorer](https://github.com/ImagingDataCommons/CrossSegmentationExplorer) extension described in the preprint should be finished and published as an extension for 3D Slicer. + + + +## Objective + + + + +1. Evaluate how the MOOSE segmentation model performs on a representative subset of the NLST dataset, as the previously analyzed subset did not capture all relevant dataset characteristics. +2. Publish the CrossSegmentationExplorer extension in 3DSlicer +3. Identify any new segmentation models known in the community that might be suitable for automatic segmentation tasks. + + + + +## Approach and Plan + + + + +1. Define criteria for NLST subset selection +2. Run MOOSE on that subset and extract radiomic features +3. Analyze feature distributions to detect outliers +4. Visually review outlier cases in Slicer +5. Compare muscle segmentation with [MuscleMap](https://github.com/MuscleMap/MuscleMap) (also [SlicerMuscleMap](https://github.com/Eddowesselink/SlicerMuscleMap) for Slicer) +6. Side-project stretch goal: assemble a whole-body single volume image for Visible Human and prepare segmentations from various tools for the resulting image. + + + +## Progress and Next Steps + + + + +### 1. Representative NLST Subset Selection +The representative subset selection was guided by an initial sampling strategy developed together with Claude AI. Based on this plan, a Python notebook was generated and iteratively refined. The notebook is available in the [**`nlst-exploration`**](https://github.com/fedorov/nlst-exploration/tree/main) repository. + + +**Dataset filtering:** We excluded all CT series without existing TotalSegmentator segmentations, as these had already been filtered out previously due to problematic acquisition parameters (e.g., invalid pixel spacing). + +**Selection of relevant DICOM attributes:** Together with Claude AI, we discussed and defined a set of DICOM parameters that should be considered to capture relevant variability in acquisition, reconstruction, and scanner hardware. +The following attribute groups were selected: Spatial: 'SliceThickness', 'PixelSpacing', 'SpacingBetweenSlices'; Exposure: 'KVP', 'Exposure', 'CTDIvol'; Reconstruction: 'ConvolutionKernel'; Hardware': 'Manufacturer', 'ManufacturerModelName'; Geometry': 'PatientPosition', 'GantryDetectorTilt', 'SpiralPitchFactor'; + +The following attributes were found to be constant or empty across the dataset and were therefore excluded from further analysis: SpacingBetweenSlices, CTDIvol, PatientPosition, and GantryDetectorTilt. + +At this stage, the selection focuses exclusively on series-level acquisition and reconstruction parameters. +Patient-related attributes (e.g. age, sex, or other clinical metadata) are not yet included in the sampling strategy and will be incorporated in a future iteration. + +**Data reduction:** To reduce the combinatorial complexity of the parameter space we rounded continuous attributes as follows: +- *Slice thickness* was rounded to one decimal place. +- *Pixel spacing* was rounded to one decimal place. +- *KVP* was rounded to the nearest integer. +- *Exposure* values were rounded to the nearest 100 units. + +**Dataset statistics (after filtering and data reduction):** + +| Numerical Attribute | Count | Unique | Min | Q25 | Median | Mean | Q75 | Max | Std | +|-----------------------|--------|----------|------|------|--------|------|------|-------|------| +| SliceThickness | 133273 | 13 | 0.60 | 2.00 | 2.50 | 2.47 | 2.50 | 6.50 | 0.90 | +| PixelSpacing | 133273 | 7 | 0.40 | 0.60 | 0.70 | 0.66 | 0.70 | 1.00 | 0.07 | +| KVP | 133273 | 7 | 80 | 120 | 120 | 121 | 120 | 140 | 5 | +| Exposure | 133256 | 62 | 0 | 0 | 100 | 504 | 1000 | 9000 | 668 | +| SpiralPitchFactor | 38629 | 9 | 0.75 | 1.38 | 1.50 | 1.47 | 1.50 | 1.75 | 0.15 | + +| Categorical Attribute | Count | Unique | Most Frequent Value | Most Frequent (%) | Top 3 Values | +|---------------------------|--------|----------|----------------------|-------------------|------------------------------------------------| +| Manufacturer | 133273 | 4 | GE MEDICAL SYSTEMS | 45.4 % | GE MEDICAL SYSTEMS, SIEMENS, Philips | +| ManufacturerModelName | 133273 | 23 | Volume Zoom | 20.8 % | Volume Zoom, Sensation 16, LightSpeed QX/i | +| ConvolutionKernel | 133273 | 36 | STANDARD | 29.9 % | STANDARD, B30f, B50f | + + +**Clustering and sampling:** Based on the normalized attributes, Claude AI proposed a clustering strategy to group series with similar acquisition and reconstruction characteristics. The dataset was clustered into 14 distinct clusters. From each cluster, 3 representative CT series were selected, this resulted in a total of 48 CT series. +Post-hoc verification using summary statistics confirmed that the selected subset mostly reflects the global parameter distributions of the filtered NLST dataset, with most acquisition parameters covering the interquartile range Q25–Q75 of the full dataset. + + +| Numerical Attribute | Min | Median | Max | +|-----------------------|------|--------|-------| +| SliceThickness | 1.0 | 2.5 | 5.0 | +| PixelSpacing | 0.64 | 0.665 | 0.72 | +| KVP | 120 | 120 | 140 | +| Exposure | 0 | 100 | 3000 | +| SpiralPitchFactor | 0.75 | 1.5 | 1.5 | + +A CSV file listing all selected representative CT series is available in the [**`nlst-exploration`**](https://github.com/fedorov/nlst-exploration/tree/main)repository. + +**Segmentation Generation:** Segmentation generation was initially planned using only the MOOSE model. However, based on results from a prior comparative analysis on the NLST dataset, CADS segmentations were additionally generated. Both models had previously shown the most consistent performance and did not show visible segmentation errors across the evaluated anatomical structures. Since it was not possible to determine which of the two models performs better on this dataset, both MOOSE and CADS were included in the analysis. + +**Next Steps (Future Work)** +- Extract radiomic features from MOOSE and CADS segmentations using [Radiomics.jl](https://github.com/pzaffino/Radiomics.jl). +- Analyze feature distributions to identify potential segmentation outliers. +- Visually inspect identified outliers using [CrossSegmentationExplorer](https://github.com/ImagingDataCommons/CrossSegmentationExplorer) in 3D Slicer. +- Extend representative sampling by incorporating patient-related attributes. + +### 2. Publish the CrossSegmentationExplorer extension in 3DSlicer +A Pull Request to include CrossSegmentationExplorer as a Tier 1 3D Slicer extension has been created. +Pull Request: + + + +# Illustrations + + + + + +# Background and References + + + +- Project repository: [nlst-exploration](https://github.com/fedorov/nlst-exploration) +- Imaging Data Commons [documentation](https://learn.canceridc.dev/) [portal](https://portal.imaging.datacommons.cancer.gov/explore/) +- Preprint: ["In Search of Truth: Evaluating Concordance of AI-Based Anatomy Segmentation Models"](https://www.arxiv.org/pdf/2512.15921) +- 3DSlicer [CrossSegmentationExplorer](https://github.com/ImagingDataCommons/CrossSegmentationExplorer) extension +- BigQuery table ID for NLST series-level metadata: idc-external-031.nlst_capstone2025.series_level_characteristics +- This project is continuing earlier PW42 project [“Review of segmentation results quality across various multi-organ segmentation models”](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/ReviewOfSegmentationResultsQualityAcrossVariousMultiOrganSegmentationModels/) and PW43 project ["Evaluating concordance of AI-based anatomy segmentation models"](https://projectweek.na-mic.org/PW43_2025_Montreal/Projects/EvaluatingConcordanceOfAiBasedAnatomySegmentationModels/) +- [Radiomics.jl](https://github.com/pzaffino/Radiomics.jl) + diff --git a/PW44_2026_GranCanaria/Projects/ValidationOfRadiomicsJlLibraryByUsingOvarianCancerImagesAndPossibleIntegrationInSlicer/README.md b/PW44_2026_GranCanaria/Projects/ValidationOfRadiomicsJlLibraryByUsingOvarianCancerImagesAndPossibleIntegrationInSlicer/README.md new file mode 100644 index 000000000..0d4a0fb9d --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/ValidationOfRadiomicsJlLibraryByUsingOvarianCancerImagesAndPossibleIntegrationInSlicer/README.md @@ -0,0 +1,132 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Validation of Radiomics.jl library by using ovarian cancer images and possible integration + in Slicer +category: Quantification and Computation +presenter_location: + +key_investigators: + +- name: Paolo Zaffino + affiliation: Magna Graecia University of Catanzaro + country: Italy + +- name: Ciro Benito Raggio + affiliation: Karlsruhe Institute of Technology + country: Germany + +- name: Francesca Spadeda + affiliation: Karlsruhe Institute of Technology + country: Germany + +--- + +# Project Description + + + + +[Radiomics.jl](https://github.com/pzaffino/Radiomics.jl) is a pure Julia library for radiomics feature extraction. +Being a pretty new library, we want to test and validate it by using CT of ovarian cancer patients. + +We would also like to investigate the possibility of calling it from the embedded Python in Slicer. + +Of course any suggestion is more than welcome. + + + + +## Objective + + + + +1. Test Radiomics.jl on ovarian cancer CT +2. Investigate the possibility of calling Radiomics.jl main function in Python + + + + +## Approach and Plan + + + + +1. Compare Radiomics.jl features with those computed by PyRadiomics (considered as the gold standard) +2. Create a shared library and call it from Python +3. Collect comments/suggestions + + + +## Progress and Next Steps + + + + +1. Tested the library on ovarian cancer CT (a few features must be fixed) +2. Tested juliacall to use Radiomics.jl from Python (you don't need to install Julia) +```bash +pip install juliacall +``` + +```python +from juliacall import Main as jl +jl.seval('import Pkg; Pkg.add("Radiomics")') +``` +```python +import SimpleITK as sitk +import numpy as np + +from juliacall import Main as jl +jl.seval("using Radiomics") + +ct_sitk = sitk.ReadImage('DATA_PATH/ct.nii.gz') +mask_sitk = sitk.ReadImage('DATA_PATH/mask.nii.gz') + +ct = sitk.GetArrayFromImage(ct_sitk) +mask = sitk.GetArrayFromImage(mask_sitk) + +spacing = list(ct_sitk.GetSpacing()) + +radiomic_features = dict(jl.Radiomics.extract_radiomic_features(ct, mask, spacing)) +``` +3. Created a shared library and used it both in Python and C++ + +```julia +using PackageCompiler + +create_library(".", "radiomicsjl_build"; + lib_name="libradiomicsjl", + force=true, + incremental=true, + filter_stdlibs=true) +``` + +4. Collected very useful comments, suggestions, and potential use-cases (thanks Andrey Fedorov!) + + +# Illustrations + + + + +![Radiomics.jl](https://raw.githubusercontent.com/pzaffino/Radiomics.jl/refs/heads/main/Logo%20Radiomicsjl.png) + +![](https://github.com/user-attachments/assets/1dc97ca0-90a1-4e5f-ae40-67730e3dae16) + + + +# Background and References + + + + +- Radiomics.jl official page: [https://www.radiomicsjl.org](https://www.radiomicsjl.org) +- Radiomics.jl source code: [https://github.com/pzaffino/Radiomics.jl](https://github.com/pzaffino/Radiomics.jl) +- Pyradiomics documentation: [https://pyradiomics.readthedocs.io](https://pyradiomics.readthedocs.io) +- Pyradiomics source code: [https://github.com/AIM-Harvard/pyradiomics/tree/master](https://github.com/AIM-Harvard/pyradiomics/tree/master) diff --git a/PW44_2026_GranCanaria/Projects/ValidationOfTheMhubAiSlicerExtension/README.md b/PW44_2026_GranCanaria/Projects/ValidationOfTheMhubAiSlicerExtension/README.md new file mode 100644 index 000000000..fd164cd18 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/ValidationOfTheMhubAiSlicerExtension/README.md @@ -0,0 +1,89 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Validation of the MHub.ai Slicer Extension +category: Infrastructure +presenter_location: + +key_investigators: + +- name: Leonard Nürnberg + affiliation: BWH + country: USA + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +--- + +# Project Description + + + + +The MHub.ai Slicer Extension gives direct access to ~30 models from within 3D Slicer. +We want to finalize the extension and provide a stable use experience, focusing on guidance during the setup phase (e.g., how do I install Docker) and improving the integration and functionality. + +A core part of this project is to collect user feedback in a structured way (setup experience, usability, failure modes). We are therefore looking for volunteers who are willing to test the extension before Project Week and during the event. Testers will be asked to follow a short setup and usage checklist and report what works, what breaks, and what is confusing. Feedback will be used to iteratively improve and finalize the extension during Project Week. + + + +## Objective + + + + +1. Finalize a stable version of the extension compatible with current 3D Slicer. +2. Collect actionable user feedback from multiple testers and implement it during Project Week. + + + + +## Approach and Plan + + + + +1. Move the extension into the original repository +2. Fix open bugs, cleanup and restructure implementation +3. Implement setup experience (detect Docker availability) +4. Provide a full step-by-step tutorial using the extension +5. Find voluntary testers (possibly before the start of PW) +6. Collect actionable user feedback from multiple testers before and during PW. + + + +## Progress and Next Steps + + + + +1. Please fill [this form if you want to test the MHub.ai 3D Slicer extension](https://forms.office.com/Pages/ResponsePage.aspx?id=AUGsYwXdcUe81i4qVCHl-qLmGEsaj-pFrsfxy3-hTudUQ0lKR0xZVjlCMFlNNlY3OTRSV1NFN0tQQi4u) + + + + +# Illustrations + + + +

+ ![](https://github.com/user-attachments/assets/18a28dc9-386a-4a04-a3a7-f4bd69ab346e) + ![](https://github.com/user-attachments/assets/413917c6-6f2f-49d3-8412-293b1078747f) +

+ + +# Background and References + + + + +- [MHub.ai](https://mhub.ai/) +- [MHub.ai Model Repository](https://mhub.ai/models) +- [MHub.ai Tutorials](https://github.com/MHubAI/documentation/tree/main/tutorials) +- [MHub Runner (Extension)](https://github.com/MHubAI/SlicerMHubRunner) diff --git a/PW44_2026_GranCanaria/Projects/VisualizationOfVesselsOfTheBrainstemFromSmriData/README.md b/PW44_2026_GranCanaria/Projects/VisualizationOfVesselsOfTheBrainstemFromSmriData/README.md new file mode 100644 index 000000000..25f29905e --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/VisualizationOfVesselsOfTheBrainstemFromSmriData/README.md @@ -0,0 +1,87 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: Long-COVID and the Brain White Matter +category: Segmentation / Classification / Landmarking +presenter_location: + +key_investigators: + +- name: Zora Kikinis + affiliation: Mass General Brigham + country: Boston, USA + +--- + +# Project Description + + + + +About 7% of COVID-19 survivors experience long-lasting symptoms known as long-COVID. There is no proven cure for long-COVID, nor do we know the pathology of the syndrome. Our research project aims to understand how changes in the brain, specifically the white matter, contribute to the symptoms of the neuropsychiatric subtype of long-COVID. + +The brain white matter fiber tract of interest to us is the dorsal vagal complex-corticolimbic fiber system (DVC-CLFS), which connects the brainstem and the frontal brain areas (Kikinis et al. 2024). + + + + + + +## Objective + + + + +Reconstruction of the DVC-CLFS fiber tract in study subjects with and without long-COVID. + + + + + +## Approach and Plan + + + + +I will use diffusion and structural MRI, specifically whole-brain tractography (UKF tractography) and FreeSurfer parcellations, to segment the DVC-CLFS fiber tract in its entirety, extending from the frontal lobe to the brainstem, using 3DSlicer tools. + + + + + + + +## Progress and Next Steps + + + + +1. In the past project weeks, we established a standardized protocol for segmenting the anatomically accurate DVC-CLFS fiber tract from MRI images using 3D Slicer. +2. By the end of this week, I have segmented the fiber tract from 20 study subjects. +3. Next: Segmentation of the fiber tract in 100 more study subjects. +4. Next: To establish the relationship between this specific fiber tract and long-COVID symptoms, diffusion measures—fractional anisotropy (FA), radial diffusivity (RD), mean diffusivity (MD), and free water (FW) — will be extracted from the streamlines of the DVC-CLFS tract. These metrics will be used for statistical analyses and correlated with neuropsychiatric long-COVID symptom scores, including measures of cognition and fatigue. + + + + + +# Illustrations + + + +![](https://github.com/user-attachments/assets/12007fc5-7f9c-4ea1-8e71-21ae046ec721) + +Segmentation of the DVC-CLFS fiber tract (white) from the UKF whole-brain tractography and FreeSurfer-generated parcellations using 3D Slicer tool and its extensions. + + +# Background and References + + + +Investigating the Structural Network Underlying Brain-Immune Interactions Using Combined Histopathology and Neuroimaging: A Critical Review for Its Relevance in Acute and Long COVID-19. +Kikinis et al. 2024, +https://pubmed.ncbi.nlm.nih.gov/38590789/ diff --git a/PW44_2026_GranCanaria/Projects/WorkingWithDicomDataOnImagesOcrSaveAndAnonymize/README.md b/PW44_2026_GranCanaria/Projects/WorkingWithDicomDataOnImagesOcrSaveAndAnonymize/README.md new file mode 100644 index 000000000..d116db698 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/WorkingWithDicomDataOnImagesOcrSaveAndAnonymize/README.md @@ -0,0 +1,87 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: 'Working with DICOM data on images: OCR, save, and anonymize' +category: DICOM +presenter_location: + +key_investigators: + +- name: Attila Nagy + affiliation: University of Szeged + country: Hungary + +- name: Gábor Fichtinger + affiliation: Queen's University + country: Kingston, ON, Canada + +--- + +# Project Description + + + + +Sometimes CT or MR scans arrive as plain images (JPEG, PNG...), and the data is overlaid on them. +The task is two-fold: +- get the data, because it may store spatial, series and patient information. This is an OCR task. +- get rid of the data, so it can be anonymized + + + + +## Objective + + + + +Our goal was to create an extension prototype that can achieve the above goals, using pure Slicer and python infrastructure. + + + + +## Approach and Plan + + + + +Work on prototype data, test various OCR solutions (Tesseract, EasyOCR), and implement the module with a GUI. + + + + +## Progress and Next Steps + + + + +Module done. +It is able to OCR text in selected ROIs, and then blank out those regions in all of the sliced (technically images). +The modules support reviewing the OCR'ed data, saving it as JSON (and also save it in the scene MRML), and load it back. +The regions can be blanked both with pure black (0,0,0) and pure white (255,255,255) RGB values. The DICOM functionality is not yet extensively tested, and there is also a placeholder where with the use of some regex DICOM data can automatically be imported into the series. + + + + +# Illustrations + + + + +Here is a short video demonstrating the current functionality: + + + + +# Background and References + + + + +_No response_ + diff --git a/PW44_2026_GranCanaria/Projects/WsidicomizerBasedConversionIntoDicomWsiFormatForImagingDataCommons/README.md b/PW44_2026_GranCanaria/Projects/WsidicomizerBasedConversionIntoDicomWsiFormatForImagingDataCommons/README.md new file mode 100644 index 000000000..69bd39b04 --- /dev/null +++ b/PW44_2026_GranCanaria/Projects/WsidicomizerBasedConversionIntoDicomWsiFormatForImagingDataCommons/README.md @@ -0,0 +1,106 @@ +--- +layout: pw44-project + +permalink: /:path/ + +project_title: wsidicomizer-based conversion into DICOM WSI format for Imaging Data Commons +category: DICOM +presenter_location: + +key_investigators: + +- name: Andrey Fedorov + affiliation: BWH + country: USA + +- name: David Clunie + affiliation: PixelMed Publishing + country: USA + +- name: Daniela Schacherer + affiliation: Fraunhofer MEVIS + country: Germany + +--- + +# Project Description + + + + +This project is motivated by the operational needs of the [Imaging Data Commons](https://learn.canceridc.dev/) to have a sustainable mechanisms supported by open-source software for harmonizing various slide microscopy images in vendor-specific representations into DICOM Whole Slide Imaging format. Our current procedures is not sustainable, since they rely on the PixelMed tools by David Clunie, which are based on a coding style and build process that has not changed since initiated over 20 years ago, unfortunately have "bus factor" of 1, and are not open for community contributions. We started using [`wsidicomizer`](https://github.com/imi-bigpicture/wsidicomizer) as a replacement, but there are gaps in the functionality to be remedied, and additional testing that needs to be done before we can consider operational switch. + +This project is the continuation of the [earlier project at PW42 2025](https://projectweek.na-mic.org/PW42_2025_GranCanaria/Projects/EvaluationOfImiBigpictureWsidicomizerAsAToolForConversionIntoDicomWholeSlideImagingFormat/). + + + +## Objective + + + + +1. Implement routing of the DICOM metadata. +2. Evaluate extensibility of the conversion approach to various input types. + + + +## Approach and Plan + + + + +1. Improve the conversion script developed for the HMS SARDANA sample to handle metadata. +2. Evaluate conversion of the .czi samples. +3. Evaluate metadata routing in the Mirax conversion script + + + +## Progress and Next Steps + + + +``` +CSV Files → CCDIMetadataLoader → DomainMetadata → MetadataBuilder → WsiDicomizerMetadata + Dataset + ↓ ↓ + UIDRegistry Code Tables (CSV) + (SQLite) +``` + +1. Set up [exploration repository](https://github.com/fedorov/pw44-wsi-conversion) that included code base from various related conversion tools and conversion scripts I worked on earlier +2. Identified relevant sample earlier converted in IDC from the [CCDI-MCI collection](https://portal.imaging.datacommons.cancer.gov/explore/filters/?collection_id=CCDI&collection_id=ccdi_mci), which was selected as the initial target for development +3. Used Claude Code and Copilot to independently develop plan and initial implementation for migrating from pixelmed (see `copilot_solution` and `claude_solution`) in the repo +4. Used Claude Code and Copilot to independently scritinize both solution and summarize pros and cos (see reports in the top level of the repo) +5. Examined the analysis, implementations, selected Copilot solution as the preferred (supported by the analysis) +6. Iterated to make [the converter](https://github.com/fedorov/pw44-wsi-conversion/blob/main/copilot-solution/convert_ccdi.py) work on the selected CCDI-MCI sample +7. Iterated to add features and refine organization +8. Confirmed functionality on an independent sample not supplied to the agent during development +9. Confirmed `dciodvfy` DICOM validator does not report issues related to the specimen metadata. +10. Confirmed converted images load with the QuPath BioFormats DICOM loader +11. Next steps: + * validate specimen metadata against the pixelmed-conversion results + * look into ICC profile handling, pixel data total size difference, potentially missing label/overview images + * extend/test for collections other than CCDI + +# Illustrations + + + + +_No response_ +## Curious examples of failures of Claude + +* remind to check documentation in the prompt? +![](https://github.com/user-attachments/assets/a8e4d117-a619-4ac2-9cbb-460b2120835c) + +* need to understand the code ... +![](https://github.com/user-attachments/assets/72c38fe5-8144-43a1-81a9-b7996e6a66a3) + + +# Background and References + + + + +_No response_ diff --git a/PW44_2026_GranCanaria/README.md b/PW44_2026_GranCanaria/README.md new file mode 100644 index 000000000..b0b8cae22 --- /dev/null +++ b/PW44_2026_GranCanaria/README.md @@ -0,0 +1,156 @@ +--- +permalink: /:path/ +redirect_from: +- /PW44_2026_GranCanaria/README.html +- /PW44_2026_GranCanaria/Readme.html + +project_categories: +- IGT and Training +- AI +- DICOM +- VR/AR and Rendering +- Segmentation / Classification / Landmarking +- Quantification and Computation +- Cloud / Web +- Infrastructure +--- + +# Welcome to the web page for the 44th Project Week! + +[This event](https://projectweek.na-mic.org/PW44_2026_GranCanaria/README.html) took place January 26th - 30th, 2026 in Las Palmas de Gran Canaria, Spain, in person. If you have any questions, you can contact the [organizers](#organizers). + +## Location + +### NH Imperial Playa, Salon Tamadaba, ground floor past the stairs/elevators + + + +| | | + +| | | | + + +- **Recommended hotels (special rates) and maps** + - NH Imperial Playa [Map](https://cutt.ly/twjO0PO) + - Booking: [Discounted hotel rooms booking link](https://www.nh-hotels.com/es/event/jornadas-namic) + +- **Transportation** from the airport to the city (Las Palmas de Gran Canaria): + - Taxi (line at the airport) + - [Bus line 60](https://guaguasglobal.com/lineas-horarios/linea/?id=60) + - [Map: Airport - San Telmo bus station](https://www.google.com/maps/dir/Gran+Canaria+Airport,+GC-1,+s%2Fn,+35230+Las+Palmas+de+Gran+Canaria,+Las+Palmas/Estacion+De+Guaguas+SAN+TELMO,+35002+Las+Palmas+de+Gran+Canaria,+Las+Palmas/@28.0191886,-15.4859935,12z/data=!3m1!4b1!4m14!4m13!1m5!1m1!1s0xc40a266c3662d1d:0x824bcf7e159f85d4!2m2!1d-15.3874042!2d27.9289223!1m5!1m1!1s0xc40958500f0b3f5:0x3693fb0e3c418af2!2m2!1d-15.4158957!2d28.109201!3e3?entry=ttu) +- The city has good bus/taxi service and is also walkable. + +[Google Photos Album](https://photos.app.goo.gl/xsBojsMjNqvp9FD19) + +## Preparation meetings + +Preparations meetings will be at 10 AM EDT and start Tuesday, November 18 2025. [Zoom Link](https://etsmtl.zoom.us/j/98196156238?pwd=q6SKGglkISvueUC2Xy2z38oBz6QKtn.1) + +## Registration + +- **Registration is now closed.** +- Remote participation will not be possible at this project week +- Registration costs 480€ (increases to 525€ after December 21), and includes coffee breaks, snacks, and lunch at the hotel for the entire week + + + +## Discord +The **Discord** application is used to communicate between team members and organize activities before and during Project Week. Please join the Project Week [Discord server](https://discord.gg/AkxzKvqMBp) as soon as possible and explore its functionality before the workshop. For more information on the use of Discord before and during Project Week, please visit [this page](../common/Discord.md). + +## Agenda + +{% include calendar.md from="2026-01-26" to="2026-01-30"%} + +## Breakout sessions + +[3D Slicer Breakout Slides](https://docs.google.com/presentation/d/1RqMDb4tENGnL9HUIuCkr3X2xK7B61-DBWyECkOqIHJk/edit?usp=sharing) + + +## Projects + +To learn how to create or update project pages, please refer to the [contributing project pages](ContributingProjectPages.md) section. + +{% include projects.md %} + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. + +List of registered participants so far (names will be added here after processing registrations): + + + + +1. Steve Pieper, Isomics, Inc., USA +1. Ron Kikinis, Brigham and Women's Hospital, Harvard Medical School, USA +1. Zora Kikinis, Brigham and Women's Hospital, Harvard Medical School, USA +1. Elise Donszelmann-Lund, McGill University, Canada +1. Martin Bellehumeur, Radical Imaging, Germany +1. Simon Drouin, École de Technologie Supérieure, Canada +1. Attila Nagy, University of Szeged, Hungary +1. Juan Ruiz-Alzola, University of Las Palmas de Gran Canaria, Spain +1. Lina Bucher, Karlsruher Institut für Technologie (KIT), Germany +1. Thibault Pelletier, Kitware Europe, France +1. Csaba Pintér, Ebatinca SL, Spain +1. Attila Tanács, University of Szeged, Hungary +1. Hamid Alavi, University of Twente, The Netherlands +1. Ciro Benito Raggio, Karlsruhe Institute of Technology, Germany +1. Francesca Spadea, Karlsruhe Institute of Technology, Germany +1. Andras Lasso, Queen's University, Canada +1. Isabel Frolick, McGill University, Canada +1. Deepa Krishnaswamy, Brigham and Women's Hospital, Harvard Medical School, USA +1. Domenico Riggio, Karlsruhe Institute of Technology, Germany +1. Álvaro Falcón Santana, Instituto de Microelectrónica Aplicada (IUMA) - Universidad de Las Palmas de Gran Canaria (ULPGC), Spain +1. Gabor Fichtinger, Queen's University, Canada +1. Gabriella d'Albenzio, School of Computing, Queen's University, Canada +1. Alejandro Rodríguez Moreno, Ebatinca SL, Spain +1. Paolo Zaffino, Magna Graecia University of Catanzaro, Italy +1. Lorena Romeo, Magna Graecia University of Catanzaro, Italy +1. Ebrahim Ebrahim, Kitware, Inc. USA +1. Elena Scalbi, Politecnico di Milano, Italy +1. Lena Giebeler, RWTH Aachen University, Germany +1. Carlos Allende Prieto, Instituto de Astrofisica de Canarias, Spain +1. Rafael Palomar, NTNU / Oslo University Hospital, Norway +1. Kyle Sunderland, Queen's University, Canada +1. Chi Zhang, Texas A&M University College of Dentistry, USA +1. Andrey Fedorov, Brigham and Women's Hospital, USA +1. Andy Huynh, Talk2View, Australia +1. Alexandra Ertl, German Cancer Research Center (DKFZ), Germany +1. Maximilian Fischer, German Cancer Research Center (DKFZ), Germany +1. Carl-Fredrik Westin, Harvard Medical School, Brigham and Women's Hospital, USA +1. Robin Peretzke, German Cancer Research Center (DKFZ), Germany +1. Paul Baksic, Inria, France +1. Leonard Nürnberg, Maastricht University, The Netherlands +1. Sam Horvath, Kitware, USA +1. Arthur Chakwizira, Brigham and Women's Hospital, Harvard Medical School, USA +1. Ben Zwick, The University of Western Australia / Talk2View, Australia +1. David Haberl, Zenta, Austria +1. Javier Pascau, Universidad Carlos III de Madrid, Spain +1. Alicia Pose Díez de la Lastra, Universidad Carlos III de Madrid, Spain +1. Michael Halle, Brigham and Women's Hospital, USA +1. Hans Knutsson, Linköping University, Sweden + + + +## Statistics + + + +## Organizers + +### Local organizing committee +* Juan Ruiz-Alzola, PhD, Professor of Imaging Technologies, director of the Grupo de Tecnología Médica y Audiovisual (GTMA), [Instituto Universitario de Investigaciones Biomédicas y Sanitarias (IUIBS)](https://www.iuibs.ulpgc.es/), [Universidad de Las Palmas de Gran Canaria (ULPGC)](https://www.ulpgc.es/) +* Csaba Pintér, PhD, [EBATINCA](https://ebatinca.com) +* [The EBATINCA team](https://ebatinca.com/en/empresa/team). For inquiries please send email to + +### Global Project Week organizing committee + +* [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](http://www.spl.harvard.edu/pages/People/tkapur)), +* [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://drouin-simon.github.io/ETS-web//)) +* [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)) +* [@piiq](https://github.com/piiq) ([Theodore Aptekarev](https://discourse.slicer.org/u/pll_llq)) +* [@sjh26](https://github.com/sjh26) ([Sam Horvath, PhD](https://www.kitware.com/samantha-horvath/)) + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](https://labs.cs.queensu.ca/perklab/wp-content/uploads/sites/3/2024/02/Kapur2016.pdf). diff --git a/PW45_2026_Boston/ContributingProjectPages.md b/PW45_2026_Boston/ContributingProjectPages.md new file mode 100644 index 000000000..609e5bef0 --- /dev/null +++ b/PW45_2026_Boston/ContributingProjectPages.md @@ -0,0 +1,84 @@ +--- +--- +{% comment %}Extract event name (e.g "PW39_2023_Montreal"). {% endcomment %} +{%- assign event_name = page.path | split: '/' | first -%} + +# Contributing Project Pages + +## Creating new project pages + +With the [Project Week GitHub Issue page](https://github.com/NA-MIC/ProjectWeek/issues/new/choose), you have two options to create your Project Page: + + +1. [Create a Project](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=sjh26&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) issue: If you are ready to create your page, you can simply create a “Project” issue. This issue will allow you to fill out a convenient form to provide the necessary details. The Project Week website team will then review the issue and trigger the page creation pull request. + +2. [Create the project page yourself using the template](Projects/Template/README.md): If you prefer to create the Project Page yourself, you can still do so by using the provided template and submitting a pull request. + +## Project Creation Tips + +- Get your project pages created early! The day before is best to make sure everything you need for you presentation is available. The ProjectWeek site will be closed to edits for the ***10 minutes before*** both the opening and closing presentation session to ensure site generation. After this 10 minute period edits will be re-enabled. + +- If you are [creating the project page yourself using the template](Projects/README.md), **don't reuse a project page template from a previous year.** We have made significant updates to the template to support auto-generation of project pages, so previous years' templates will not function properly. + + - When naming the file, **please ensure there are no spaces/special characters in the folder or file name** + - Make sure to fill out / update all of the information at the top of the README file (title, category, location, etc) + +- Remember to fill out the title for your project when using the [project creation issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) + +- Check the formatting on the Key Investigators list when creating a [project issue](https://github.com/NA-MIC/ProjectWeek/issues/new?assignees=drouin-simon%2Cpiiq%2Crafaelpalomar%2Csjh26%2Ctkapur&labels=project%2Cevent%3A{{ event_name }}&projects=&template=project.yml&title=Project%3A+) (this is critical for page generation): + + `- Firstname Lastname (Affiliation, Country)` + +## Updating existing project pages + +Here are the steps using the GitHub web interface: + +1. Navigate to your project's `README.md` on the GitHub website. For instance, if you want to update a project called **YourProjectName**, visit the URL like the following: + + ``` + https://github.com/NA-MIC/ProjectWeek/blob/master/{{ event_name }}/Projects/YourProjectName/README.md + ``` + +2. Click the edit button, as shown in this screenshot: ![Screenshot 2023-06-12 10 43 35](https://github.com/NA-MIC/ProjectWeek/assets/25040869/ab01a7bf-c1e4-4c23-9aca-e2c6421ca530) + +3. You can now edit the page, add images by dragging and dropping, and more. + +4. Once done, click "Commit Changes", and follow the instructions to create a fork and a pull request to add your changes to the webpage. See this screenshot for reference: ![Screenshot 2023-06-12 10 50 50](https://github.com/NA-MIC/ProjectWeek/assets/25040869/180e81bb-d4f9-4f65-8569-a93192b2828e) + +## Videos in project pages + +Here are some steps to make sure all of your awesome videos render correctly: + +1. Videos added by drag and drop will render correctly when viewed through GitHub, but need some extra tweaks to work in the final generated website. + + + In your `README.md`, if you have a video link that looks like this: + + ``` + https://github.com/NA-MIC/ProjectWeek/assets/66890913/8f257f29-fa9c-4319-8c49-4138003eba27 + ``` + + Update it to: + + ```html + + ``` + +2. Links to externally hosted videos (such as YouTube) will need an iframe. + + Replace: + + ``` + https://youtu.be/ZWxE5QcGvE8 + ``` + + with + + ````html + + ```` diff --git a/PW45_2026_Boston/Projects/Template/README.md b/PW45_2026_Boston/Projects/Template/README.md new file mode 100644 index 000000000..7e9662bfd --- /dev/null +++ b/PW45_2026_Boston/Projects/Template/README.md @@ -0,0 +1,58 @@ +--- +layout: pw45-project + +permalink: /:path/ + +project_title: Write full project title here +category: Uncategorized +presenter_location: Online + +key_investigators: +- name: Person Doe + affiliation: University + +- name: Person2 Doe2 + affiliation: University2 + country: Spain +--- + +# Project Description + + + +## Objective + + + +1. Objective A. Describe **what you plan to achieve** in 1-2 sentences. +1. Objective B. ... +1. Objective C. ... + +## Approach and Plan + + + +1. Describe specific steps of **what you plan to do** to achieve the above described objectives. +1. ... +1. ... + +## Progress and Next Steps + + + +1. Describe specific steps you **have actually done**. +1. ... +1. ... + +# Illustrations + + + +# Background and References + + diff --git a/PW45_2026_Boston/Projects/Template/README.md.j2 b/PW45_2026_Boston/Projects/Template/README.md.j2 new file mode 100644 index 000000000..f12140cd7 --- /dev/null +++ b/PW45_2026_Boston/Projects/Template/README.md.j2 @@ -0,0 +1,56 @@ +--- +layout: pw45-project + +permalink: /:path/ + +project_title: {{ title | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +category: {{ category | regex_replace("\n$|\n\.\.\.\n$", "") }} +presenter_location: {{ presenter_location | regex_replace("\n$|\n\.\.\.\n$", "") }} + +key_investigators: +{% for investigator in investigators %} +- name: {{ investigator.name | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} + affiliation: {{ investigator.affiliation | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- if investigator.country %} + country: {{ investigator.country | to_yaml | regex_replace("\n$|\n\.\.\.\n$", "") }} +{%- endif %} +{% endfor %} +--- + +# Project Description + + + +{{ description }} + +## Objective + + + +{{ objective }} + +## Approach and Plan + + + +{{ approach }} + +## Progress and Next Steps + + + +{{ progress }} + +# Illustrations + + + +{{ illustrations }} + +# Background and References + + + +{{ background }} diff --git a/PW45_2026_Boston/README.md b/PW45_2026_Boston/README.md new file mode 100644 index 000000000..23e5a8b53 --- /dev/null +++ b/PW45_2026_Boston/README.md @@ -0,0 +1,89 @@ +--- +permalink: /:path/ +redirect_from: +- /PW_45_2026_Boston/README.html +- /PW_45_2026_Boston/Readme.html + +project_categories: +- IGT and Training +- AI +- DICOM +- VR/AR and Rendering +- Segmentation / Classification / Landmarking +- Quantification and Computation +- Cloud / Web +- Infrastructure +--- + +# Welcome to the web page for the 45th Project Week! + +[This event](https://projectweek.na-mic.org/PW_45_2026_Boston/README.html) will take place June 22nd - 26th, 2026 in MIT, Boston, USA, in person. If you have any questions, you can contact the [organizers](#organizers). + +## Location + +Rooms TBA + +## Preparation meetings + +Preparation meetings will be at 10 AM EDT and start Tuesday, May 5th 2026. [Zoom Link](https://etsmtl.zoom.us/j/96828430830?pwd=9EqBWHKc4fytzNThgha4ByM1hoQwPr.1) + + +## Communication + +- Sign up for the new mailing list [here](https://gaggle.email/join/3d-slicer-project-week@gaggle.email) + +## Registration + +TBA + + + + + + + +## Agenda + +{% include calendar.md from="2026-06-22" to="2026-06-26"%} + +## Breakout sessions + + + +## Projects + +To learn how to create or update project pages, please refer to the [contributing project pages](ContributingProjectPages.md) section. + +{% include projects.md %} + +## Registrants + +Do not add your name to this list below. It is maintained by the organizers based on your registration. + +List of registered participants so far (names will be added here after processing registrations): + + + + + + + + +## Statistics + + + +## Organizers + + +* [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](https://spl.harvard.edu/people/tina-kapur-phd)), +* [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://www.etsmtl.ca/etudier-a-lets/corps-enseignant/sidrouin) +* [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)) +* Theodore Aptekarev +* [@sjh26](https://github.com/sjh26) ([Sam Horvath, PhD](https://www.kitware.com/samantha-horvath/)) +* [@deepakri201](https://github.com/deepakri201) ([Deepa Krishnaswamy, PhD](https://scholar.google.com/citations?user=X8jB1n0AAAAJ&hl=en)) + +## History +Please read about our experience in running these events since 2005: [Increasing the Impact of Medical Image Computing Using +Community-Based Open-Access Hackathons: the NA-MIC and 3D Slicer Experience](https://labs.cs.queensu.ca/perklab/wp-content/uploads/sites/3/2024/02/Kapur2016.pdf). diff --git a/README.md b/README.md index e47d89f7b..c593dbeff 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ - ## Welcome to the main page for the Project Week events! -### The [34th Project Week](PW34_2020_Virtual/README.md) will be held virtually December 14-18, 2020. + +### The [45th Project Week](PW45_2026_Boston/README.md) will take place on June 22-26, 2026, in person at MIT, Boston, USA. ## Frequently Asked Questions @@ -13,19 +13,19 @@ The Project Week is a week-long hackathon of hands on activity in which medical #### When, where, how much? -Twice a year - January in Europe, and June at MIT. Ad-hoc meetings are added occasionally. The registration fee is approximately 350 (USD or Euro), and is used to cover coffee and food. +The Project Week events are held twice a year, January in Europe, and June in North America, with ad-hoc meetings added occasionally. The registration fee for in-person events is approximately 500 (USD or Euro), and is used to cover breakfast, lunch, and coffee. #### How does it work? -Weekly videoconferences for preparation begin 8-12 weeks before an event. Potential participants propose projects during these meetings, and collaboratively create a list of projects that are of mutual interest. The projects include platform work, algorithm development, and biomedical applications. Through the course of the meetings, each participant selects one or more project teams and develops goals for the week. The first day of the Project Week itself begins with a 2-hour in-person introduction to all projects and participants. The rest of the week consists of a mix of working sessions and breakout sessions on special topics, as decided by the participants during the preparatory meetings. +Weekly videoconferences for preparation begin 8 weeks before an event. Potential participants propose projects during these meetings, and collaboratively create a list of projects that are of mutual interest. The projects include platform work, algorithm development, and biomedical applications. Through the course of the meetings, each participant selects one or more project teams and develops goals for the week. The first day of the Project Week itself begins with a 2-hour introduction to all projects and participants. The rest of the week consists of a mix of working sessions and breakout sessions on special topics, as decided by the participants during the preparatory meetings. The event concludes on the last (5th day) with a 2-hour progress report for all projects. #### Who can attend? -Project Weeks are open to all and publicly advertised. One-day participation is permitted for first-time attendees, but rest stay for the entire event. Email announcements are sent to the [Project Week mailing list](https://public.kitware.com/mailman/listinfo/na-mic-project-week) (for continuity) and posted on the [Project Week forum](https://discourse.slicer.org/c/community/project-week). +Project Weeks are open to all and publicly advertised. Email announcements are sent to the [Project Week mailing list](https://public.kitware.com/mailman/listinfo/na-mic-project-week) (for continuity) and posted on the [Project Week forum](https://discourse.slicer.org/c/community/project-week). One-day participation is permitted for first-time attendees, but rest stay for the entire event. #### Who should attend? -This is harder to determine. Your best bet is to contact the organizers (see below) or attend a preparatory videoconference (see each Project Week page for details). +Please attend a preparatory videoconference to determine if your project has resonance with the other participants who are planning to attend that particular event (see each Project Week page for details). #### What else? @@ -33,24 +33,36 @@ The Project Week series was founded in 2005, along with the National Alliance fo #### Who to contact? -Project Weeks are led by [@tkapur](https://github.com/tkapur), ([Tina Kapur, PhD](http://www.spl.harvard.edu/pages/People/tkapur)) who is happy to tell you more about them. +Project Weeks are led by [@tkapur](https://github.com/tkapur) ([Tina Kapur, PhD](https://spl.harvard.edu/people/tina-kapur-phd)), [@drouin-simon](https://github.com/drouin-simon) ([Simon Drouin, PhD](https://www.etsmtl.ca/etudier-a-lets/corps-enseignant/sidrouin)), [@rafaelpalomar](https://github.com/rafaelpalomar) ([Rafael Palomar, PhD](https://www.ntnu.edu/employees/rafaelp)), Theodore Aptekarev, [@sjh26](https://github.com/sjh26) ([Sam Horvath, PhD](https://www.kitware.com/samantha-horvath/)) and [@deepakri201](https://github.com/deepakri201) ([Deepa Krishnaswamy, PhD](https://scholar.google.com/citations?user=X8jB1n0AAAAJ&hl=en)) who are happy to tell you more about them. ## Upcoming Project Weeks -- The 2020 Project Week will be held virtually and will take place December 14-18, 2020. + Project Week 45 will take place on June 22-26, 2026, in person at MIT, Boston, USA. -* Please contact the organizers if you have interest in hosting a Project Week event. + Project Week 46 will take place on January 25-29, 2027, in person in Las Palmas de Gran Canaria, Spain. ## Past Project Weeks | Events | Registrants | |----|----| -| [2020 January 20-24: Project Week 33](PW33_2020_GranCanaria/README.md) - Gran Canaria, Spain.
It recorded 66 Registrants (from 16 countries) who worked on 43 projects. | 66 | +| [2026 June 22-36 Project Week 45](PW45_2026_Boston/README.md) - MIT, Cambridge, MA, USA. | | +| [2026 January 26-30 Project Week 44](PW44_2026_GranCanaria/README.md) - Gran Canaria, Spain. | 48 | +| [2025 June 23-27: Project Week 43](PW43_2025_Montreal/README.md) - Montreal, Canada. | 56 | +| [2025 January 27-31: Project Week 42](PW42_2025_GranCanaria/README.md) - Gran Canaria, Spain. | 63 | +| [2024 June 24-28: Project Week 41](PW41_2024_MIT/README.md) - MIT, Cambridge, MA, USA. | 116 | +| [2024 January 29-February 2: Project Week 40](PW40_2024_GranCanaria/README.md) - Gran Canaria, Spain
It recorded 204 registered attendees (from 29 countries, 51% first-timers). |204 | +| [2023 June 12-16: Project Week 39](PW39_2023_Montreal/README.md) - Online and Montreal, Canada
It recorded 154 registered attendees (from 23 countries, 46% first-timers). | 154 | +| [2023 January 30-February 3rd: Project Week 38](PW38_2023_GranCanaria/README.md) - Online and Gran Canaria, Spain
It recorded 251 registered attendees (from 30 countries, 52% first-timers). | 251 | +| [2022 June 27-July 1: Project Week 37](PW37_2022_Virtual/README.md) - Online
It recorded 117 registered attendees (from 20 countries, 41% first-timers.) | 117 | +| [2022 Jan 17-21: Project Week 36](PW36_2022_Virtual/README.md) - Online and Gran Canaria, Spain.
It recorded 131 registered attendees (from 26 countries, 44% first-timers.) | 131 | +| [2021 June 28-July 2: Project Week 35](PW35_2021_Virtual/README.md) - Internet.
It recorded 140 registered attendees (from 23 countries, 44% first-timers.)|140| +| [2020 December 14-18: Project Week 34](PW34_2020_Virtual/README.md) - Internet.
It recorded 204 registered attendees (from 26 countries and 101 institutions.)|204| +| [2020 January 20-24: Project Week 33](PW33_2020_GranCanaria/README.md) - Gran Canaria, Spain.
It recorded 66 registered attendees (from 16 countries) who worked on 43 projects. | 66 | | [2019 July 15-29: Project Week 32](PW32_2019_London_Canada/README.md) - Robarts Research, London, Canada. | 38 | -| [2019 June 24-28 :Project Week 31](PW31_2019_Boston/README.md) - MIT, Cambridge, MA, USA.
It recorded 78 attendees, who worked on 47 projects and attended 5 breakout sessions. | 78 | -| [2019 January 28-February 1: Project Week 30](PW30_2019_GranCanaria/README.md) - Gran Canaria, Spain.
It recorded 60 Registrants (from 13 countries) who worked on 33 projects. | 60 | -| [2018 July 16-20: Project Week 29](PW29_2018_London_Canada/README.md) - Robarts Research Institute, London, Ontario, Canada.
It recorded 37 Registrants. This was a satellite event with a slight focus on new users and community expansion. | 37 | -| [2018 June 25-29: Project Week 28](PW28_2018_GranCanaria/README.md) - Gran Canaria, Spain.
It recorded 58 Registrants, who worked on 31 projects.| 58 | +| [2019 June 24-28 :Project Week 31](PW31_2019_Boston/README.md) - MIT, Cambridge, MA, USA.
It recorded 78 registered attendees, who worked on 47 projects and attended 5 breakout sessions. | 78 | +| [2019 January 28-February 1: Project Week 30](PW30_2019_GranCanaria/README.md) - Gran Canaria, Spain.
It recorded 60 registered attendees (from 13 countries) who worked on 33 projects. | 60 | +| [2018 July 16-20: Project Week 29](PW29_2018_London_Canada/README.md) - Robarts Research Institute, London, Ontario, Canada.
It recorded 37 registered attendees. This was a satellite event with a slight focus on new users and community expansion. | 37 | +| [2018 June 25-29: Project Week 28](PW28_2018_GranCanaria/README.md) - Gran Canaria, Spain.
It recorded 58 registered attendees, who worked on 31 projects.| 58 | | [2018 January 8-12: Project Week 27](PW27_2018_Boston/README.md) - MIT, Cambridge, MA, USA.
It recorded 72 registered attendees, who worked on 52 projects and attended 5 breakout sessions. | 72 | | [2017 July 17-21: Project Week 26](PW26_2017_London_Canada/README.md) - Robarts Research Institute, London, ON, Canada. | 58 | | [2017 June 26-30: Project Week 25](https://www.na-mic.org/wiki/2017_Summer_Project_Week) - Catanzaro Lido, Calabria, Italy.
It recorded 51 registered attendees, who worked on 29 projects. | 51 | @@ -83,4 +95,3 @@ Project Weeks are led by [@tkapur](https://github.com/tkapur), ([Tina Kapur, PhD --- This page is hosted [from the NA-MIC organization's ProjectWeek repository on github.com](https://github.com/NA-MIC/ProjectWeek) and is published at [https://projectweek.na-mic.org](https://projectweek.na-mic.org) - diff --git a/_config.yml b/_config.yml index 5c63dcb52..92ccf7313 100644 --- a/_config.yml +++ b/_config.yml @@ -1,4 +1,34 @@ +title: NA-MIC Project Weeks theme: jekyll-theme-primer +repository: NA-MIC/ProjectWeek plugins: - jekyll-mentions + - jekyll-redirect-from + - jemoji + +redirect_from: + json: false + +# Exclude from processing. +# The following items will not be processed, by default. +# Any item listed under the `exclude:` key here will be automatically added to +# the internal "default list". +# +# Excluded items can be processed by explicitly listing the directories or +# their entries' file path in the `include:` list. +# +exclude: + # Default + - .sass-cache/ + - .jekyll-cache/ + - gemfiles/ + - Gemfile + - Gemfile.lock + - node_modules/ + - vendor/bundle/ + - vendor/cache/ + - vendor/gems/ + - vendor/ruby/ + # This project + - "*.j2" diff --git a/_includes/calendar.md b/_includes/calendar.md new file mode 100644 index 000000000..1fb9536a6 --- /dev/null +++ b/_includes/calendar.md @@ -0,0 +1,36 @@ + + + + +
+
+ + + + + + +[How to add this calendar to your own?](../common/Calendar.md) + + diff --git a/_includes/project_generate_category.md b/_includes/project_generate_category.md new file mode 100644 index 000000000..6adadc764 --- /dev/null +++ b/_includes/project_generate_category.md @@ -0,0 +1,78 @@ + + +{% for pw_page in site.pages %} + + {% comment %}Extract page event name (e.g "PW39_2023_Montreal"){% endcomment %} + {% assign pw_page_event_name = pw_page.path | split: '/' | first %} + + {% comment %}Only process page associated with the current event.{% endcomment %} + {% if pw_page_event_name != event_name %} + {% continue %} + {% endif %} + + {% comment %}Extract page event type (e.g "Projects").{% endcomment %} + {% assign pw_page_type = pw_page.path | split: '/' | slice: 1 | first %} + + {% comment %}Only process pages located in the "Projects" directory.{% endcomment %} + {% if pw_page_type != "Projects" %} + {% continue %} + {% endif %} + + {% comment %}Extract page project name (e.g "SlicerVRInteraction").{% endcomment %} + {% assign project_name = pw_page.path | split: '/' | slice: 2 | first | downcase %} + + {% comment %}Ignore "Template" and "README.md" file.{% endcomment %} + {% if project_name == "template" or project_name == "readme.md" %} + {% continue %} + {% endif %} + + {% assign pw_page_category = pw_page.category | default:"Uncategorized" %} + + {% comment %}Force all non-matching projects to Uncategorized.{% endcomment %} + {% unless page.project_categories contains pw_page_category %} + {% assign pw_page_category = "Uncategorized" %} + {% endunless %} + + {% if pw_page_category != requested_category %} + {% continue %} + {% endif %} + + {% assign project_count = project_count | plus: 1 %} + + {% if page.project_categories contains pw_page_category %} + + {% comment %}If if applies, add catergory header.{% endcomment %} + {% unless categories contains pw_page_category %} +{% capture categories %} +{{ categories }} +### {{ pw_page_category }} +{% endcapture %} + {% endunless %} + + {% comment %}Append category entry.{% endcomment %} +{% capture categories %} +{{ categories }} +1. [{{ pw_page.project_title }}]({{ pw_page.url }}) ( +{%- for investigator in pw_page.key_investigators -%} + {{ investigator.name }}{% unless forloop.last %}, {% endunless -%} +{%- endfor -%} +) +{% endcapture %} + + {% else %} + +{% capture uncategorized %} +{{ uncategorized }} +1. [{{ pw_page.project_title }}]({{ pw_page.url }}) ( +{%- for investigator in pw_page.key_investigators -%} + {{ investigator.name }}{% unless forloop.last %}, {% endunless -%} +{%- endfor -%} +) (Category: {{pw_page.category}}) +{% endcapture %} + + {% endif %} +{% endfor %} diff --git a/_includes/project_generate_category_noloc.md b/_includes/project_generate_category_noloc.md new file mode 100644 index 000000000..236767148 --- /dev/null +++ b/_includes/project_generate_category_noloc.md @@ -0,0 +1,78 @@ + + +{% for pw_page in site.pages %} + + {% comment %}Extract page event name (e.g "PW39_2023_Montreal"){% endcomment %} + {% assign pw_page_event_name = pw_page.path | split: '/' | first %} + + {% comment %}Only process page associated with the current event.{% endcomment %} + {% if pw_page_event_name != event_name %} + {% continue %} + {% endif %} + + {% comment %}Extract page event type (e.g "Projects").{% endcomment %} + {% assign pw_page_type = pw_page.path | split: '/' | slice: 1 | first %} + + {% comment %}Only process pages located in the "Projects" directory.{% endcomment %} + {% if pw_page_type != "Projects" %} + {% continue %} + {% endif %} + + {% comment %}Extract page project name (e.g "SlicerVRInteraction").{% endcomment %} + {% assign project_name = pw_page.path | split: '/' | slice: 2 | first | downcase %} + + {% comment %}Ignore "Template" and "README.md" file.{% endcomment %} + {% if project_name == "template" or project_name == "readme.md" %} + {% continue %} + {% endif %} + + {% assign pw_page_category = pw_page.category | default:"Uncategorized" %} + + {% comment %}Force all non-matching projects to Uncategorized.{% endcomment %} + {% unless page.project_categories contains pw_page_category %} + {% assign pw_page_category = "Uncategorized" %} + {% endunless %} + + {% if pw_page_category != requested_category %} + {% continue %} + {% endif %} + + {% assign project_count = project_count | plus: 1 %} + + {% if page.project_categories contains pw_page_category %} + + {% comment %}If if applies, add catergory header.{% endcomment %} + {% unless categories contains pw_page_category %} +{% capture categories %} +{{ categories }} +### {{ pw_page_category }} +{% endcapture %} + {% endunless %} + + {% comment %}Append category entry.{% endcomment %} +{% capture categories %} +{{ categories }} +1. [{{ pw_page.project_title }}]({{ pw_page.url }}) ( +{%- for investigator in pw_page.key_investigators -%} + {{ investigator.name }}{% unless forloop.last %}, {% endunless -%} +{%- endfor -%} +) +{% endcapture %} + + {% else %} + +{% capture uncategorized %} +{{ uncategorized }} +1. [{{ pw_page.project_title }}]({{ pw_page.url }}) ( +{%- for investigator in pw_page.key_investigators -%} + {{ investigator.name }}{% unless forloop.last %}, {% endunless -%} +{%- endfor -%} +) (Category: {{pw_page.category}}) +{% endcapture %} + + {% endif %} +{% endfor %} diff --git a/_includes/projects.md b/_includes/projects.md new file mode 100644 index 000000000..61adcf8ba --- /dev/null +++ b/_includes/projects.md @@ -0,0 +1,27 @@ + + +{% comment %}Extract event name (e.g "PW39_2023_Montreal"). {% endcomment %} +{% assign event_name = page.path | split: '/' | first %} + +{% assign project_count = 0 %} +{% assign categories = "" %} +{% assign uncategorized = "" %} + +{% for requested_category in page.project_categories %} +{% include project_generate_category.md %} +{% endfor %} + +{% assign requested_category = "Uncategorized" %} +{% include project_generate_category.md %} + +_The {{ event_name }} event has a total of {{ project_count }} projects._ + +{{ categories }} + +### Uncategorized + +_This section lists projects that either have no category assigned, are assigned to the "Uncategorized" category, or are assigned to a different category than the ones listed above. If you are unable to find a category that is suitable for your project, or believe that a specific category is missing, please discuss it with the [organizers](#organizers)._ + +{{ uncategorized }} + + diff --git a/_includes/projects_noloc.md b/_includes/projects_noloc.md new file mode 100644 index 000000000..1af13bf61 --- /dev/null +++ b/_includes/projects_noloc.md @@ -0,0 +1,27 @@ + + +{% comment %}Extract event name (e.g "PW39_2023_Montreal"). {% endcomment %} +{% assign event_name = page.path | split: '/' | first %} + +{% assign project_count = 0 %} +{% assign categories = "" %} +{% assign uncategorized = "" %} + +{% for requested_category in page.project_categories %} +{% include project_generate_category_noloc.md %} +{% endfor %} + +{% assign requested_category = "Uncategorized" %} +{% include project_generate_category.md %} + +_The {{ event_name }} event has a total of {{ project_count }} projects._ + +{{ categories }} + +### Uncategorized + +_This section lists projects that either have no category assigned, are assigned to the "Uncategorized" category, or are assigned to a different category than the ones listed above. If you are unable to find a category that is suitable for your project, or believe that a specific category is missing, please discuss it with the [organizers](#organizers)._ + +{{ uncategorized }} + + diff --git a/_layouts/default.html b/_layouts/default.html index c06d4ce94..05e401ba1 100644 --- a/_layouts/default.html +++ b/_layouts/default.html @@ -21,7 +21,7 @@
{% if site.title and site.title != page.title %} -

{{ site.title }}

+

{{ site.title }}

{% endif %} {{ content }} diff --git a/_layouts/pw39-project.html b/_layouts/pw39-project.html new file mode 100644 index 000000000..868ee88d7 --- /dev/null +++ b/_layouts/pw39-project.html @@ -0,0 +1,23 @@ +--- +layout: default +--- + +Back to Projects List + +

{{ page.project_title }}

+ +

Key Investigators

+ +
    +{% if page.key_investigators %} + {% for investigator in page.key_investigators %} +
  • {{ investigator.name }} ({{ investigator.affiliation }}{% if investigator.country %}, {{ investigator.country }}{% endif %})
  • + {% endfor %} +{% endif %} +
+ +Presenter location: {{page.presenter_location}} + +{{ content }} + +
diff --git a/_layouts/pw40-project.html b/_layouts/pw40-project.html new file mode 100644 index 000000000..868ee88d7 --- /dev/null +++ b/_layouts/pw40-project.html @@ -0,0 +1,23 @@ +--- +layout: default +--- + +Back to Projects List + +

{{ page.project_title }}

+ +

Key Investigators

+ +
    +{% if page.key_investigators %} + {% for investigator in page.key_investigators %} +
  • {{ investigator.name }} ({{ investigator.affiliation }}{% if investigator.country %}, {{ investigator.country }}{% endif %})
  • + {% endfor %} +{% endif %} +
+ +Presenter location: {{page.presenter_location}} + +{{ content }} + + diff --git a/_layouts/pw41-project.html b/_layouts/pw41-project.html new file mode 100644 index 000000000..c74cd39f0 --- /dev/null +++ b/_layouts/pw41-project.html @@ -0,0 +1,23 @@ +--- +layout: default +--- + +Back to Projects List + +

{{ page.project_title }}

+ +

Key Investigators

+ +
    +{% if page.key_investigators %} + {% for investigator in page.key_investigators %} +
  • {{ investigator.name }} ({{ investigator.affiliation }}{% if investigator.country %}, {{ investigator.country }}{% endif %})
  • + {% endfor %} +{% endif %} +
+ +Presenter location: {{page.presenter_location}} + +{{ content }} + + diff --git a/_layouts/pw42-project.html b/_layouts/pw42-project.html new file mode 100644 index 000000000..c47332040 --- /dev/null +++ b/_layouts/pw42-project.html @@ -0,0 +1,21 @@ +--- +layout: default +--- + +Back to Projects List + +

{{ page.project_title }}

+ +

Key Investigators

+ +
    +{% if page.key_investigators %} + {% for investigator in page.key_investigators %} +
  • {{ investigator.name }} ({{ investigator.affiliation }}{% if investigator.country %}, {{ investigator.country }}{% endif %})
  • + {% endfor %} +{% endif %} +
+ +{{ content }} + + diff --git a/_layouts/pw43-project.html b/_layouts/pw43-project.html new file mode 100644 index 000000000..c47332040 --- /dev/null +++ b/_layouts/pw43-project.html @@ -0,0 +1,21 @@ +--- +layout: default +--- + +Back to Projects List + +

{{ page.project_title }}

+ +

Key Investigators

+ +
    +{% if page.key_investigators %} + {% for investigator in page.key_investigators %} +
  • {{ investigator.name }} ({{ investigator.affiliation }}{% if investigator.country %}, {{ investigator.country }}{% endif %})
  • + {% endfor %} +{% endif %} +
+ +{{ content }} + + diff --git a/_layouts/pw44-project.html b/_layouts/pw44-project.html new file mode 100644 index 000000000..5617dd45f --- /dev/null +++ b/_layouts/pw44-project.html @@ -0,0 +1,21 @@ +--- +layout: default +--- + +Back to Projects List + +

{{ page.project_title }}

+ +

Key Investigators

+ +
    +{% if page.key_investigators %} + {% for investigator in page.key_investigators %} +
  • {{ investigator.name }} ({{ investigator.affiliation }}{% if investigator.country %}, {{ investigator.country }}{% endif %})
  • + {% endfor %} +{% endif %} +
+ +{{ content }} + + diff --git a/_layouts/pw45-project.html b/_layouts/pw45-project.html new file mode 100644 index 000000000..5617dd45f --- /dev/null +++ b/_layouts/pw45-project.html @@ -0,0 +1,21 @@ +--- +layout: default +--- + +Back to Projects List + +

{{ page.project_title }}

+ +

Key Investigators

+ +
    +{% if page.key_investigators %} + {% for investigator in page.key_investigators %} +
  • {{ investigator.name }} ({{ investigator.affiliation }}{% if investigator.country %}, {{ investigator.country }}{% endif %})
  • + {% endfor %} +{% endif %} +
+ +{{ content }} + + diff --git a/assets/na-mic-logo.png b/assets/na-mic-logo.png new file mode 100644 index 000000000..4673c73bb Binary files /dev/null and b/assets/na-mic-logo.png differ diff --git a/common/Discord.md b/common/Discord.md new file mode 100644 index 000000000..93d55d51e --- /dev/null +++ b/common/Discord.md @@ -0,0 +1,90 @@ +## Discord in Project Week  + +The Discord application is used before and during Project Week for team communication (as opposed to Zoom, which is used for community wide meetings). Before the workshop, Discord can be used by teams to coordinate, prepare work and create project pages. During Project Week, Discord is used mostly by online only or hybrid teams to meet. It can also be used by in-person participants to schedule small meetings and advertize them to the rest of the community. + +## What is Discord? + +Discord is a chat app, similar to programs such as Skype, TeamSpeak, or Slack. Primarily built for video gamers, it supports video calls, voice chat, and text. It offers an efficient and semipublic, forum-style community platform and allows the creation of channels devoted to specific topics. Discord allows for the creation of servers and Project Week has its [dedicated server](https://discord.gg/d5Q6b5ug8u).      + +Discord can be downloaded for free from https://discord.com/ and is available for all major platforms.  + +## Discord Server + +After Discord installation, please open the [Discord server link](https://discord.gg/d5Q6b5ug8u). After clicking the link in a browser of your choice it will open Discord and ask you to "Join" the Project Week server.  After login, you should see Discord opening like this: + +![](https://user-images.githubusercontent.com/18140094/149007887-704b51b2-2eac-4569-a7f0-739a8e1e72d5.png) + +The left panel lists text and voice/video channels. Each project gets its own text channel to chat and exchange links about the project progress. Additional text channels can be used by anyone for general announcements (#announcements), general discussion (#Lobby-1), gathering people for social event (#social), asking admins for the creation of a text channel for your project (#request-a-project-channel) or posting a job ad (#job-board). Voice channels are virtual meeting rooms that can be booked by creating an event or used spontaneously when available. Additional voice channels (Lobbys) can be used to gather randomly and chat. + +## Project history + +The same Discord server is used for each edition of Project Week. It is thus possible to continue projects in the next Project Weeks and check out the chat history of that project. However, important information about the project should be copied to the project page on GitHub. + +## How to use text channels +In text channels, enter your chat message in the bottom message input field. If the channel or the meeting room has a   + +![](https://user-images.githubusercontent.com/18140094/149000903-2a920350-2dac-4618-a02e-1ee914b19c88.png) + +symbol you may enter the channel by left-clicking onto it and joining the audio chat. If you can not find your project as a project channel please drop a line on the Discord lobby (top) - one of the server admins will see it and create the channel.  See who is online in the right Discord tab. You can mention the person by using the @ symbol in the message input field. You can send private messages in Discord, but please refrain from doing so. Use project channels or the lobby wherever possible so everyone can follow.  + +## How to change your nickname and avatar + +Double click on your nickname in the right "users online" and select "Edit Profile"  Please set the Discord nickname to your real name and affiliation, e.g. "Simon Drouin (ETS Montreal)". A real name usage policy may be defined for each Discord server.   + +![](https://user-images.githubusercontent.com/18140094/149008038-20629691-db7c-424f-b849-d7c99736c9fc.png) + +## Typo?  + +Fortunately, you can edit your messages in Discord chat like this: + +![](https://user-images.githubusercontent.com/18140094/149097744-c31e61f8-f75f-4ef5-92d7-5de0e2a5bddb.png) + +## Script formatting + +Please use three backticks (\`\`\`) to include source code in the text message.  + +Example: + +![](https://user-images.githubusercontent.com/18140094/149398597-b274d411-5b46-4a4f-9ca9-7c06396c5b33.png) + +Result: + +![](https://user-images.githubusercontent.com/18140094/149398688-449525e3-c1f1-43d9-b6d1-932bb2a55a60.png) + +## Screen sharing + +Join a voice channel, e.g. one of the meeting rooms. Press "Screen" (lower left) to share one of your screens.  + +![](https://user-images.githubusercontent.com/18140094/149112941-ff784625-b6bb-4c2d-9cb6-3d6a5de97515.png) + +To be able to look at the screen someone else shares, you need to click "LIVE"  and join the shared window stream (right to the name to that streaming in the voice channel) + +![](https://user-images.githubusercontent.com/18140094/149796988-39561009-f1f6-4a16-b6a0-49d966261e5d.png) + +## Get rid of that annoying channel sounds? + +Go "User settings" (see below) and switch all those off: + +![](https://user-images.githubusercontent.com/18140094/149826117-3440c17e-70af-4e69-bb4c-ff2888c2fee5.png) + +## How to leave a voice channel + +Simply click "Disconnect" next to your nickname (lower left).  + +![](https://user-images.githubusercontent.com/18140094/149116824-c57b963f-9a3e-4b93-b967-62ee6315223d.png) + +## Blur your background? + +This is easy. Go "User settings" (see below) and  + +![](https://user-images.githubusercontent.com/18140094/149825618-3d7ac8d7-2823-433e-9630-425624d7b947.png) + +## No sound?  + +Go "User Settings" + +![](https://user-images.githubusercontent.com/18140094/149098173-770fe614-47fd-4352-b4cd-f01f4033901a.png) + +Go "Voice & Video" and select the correct input and output devices. When you are done, click ESC (right top symbol on screenshot)  + +![](https://user-images.githubusercontent.com/18140094/149098459-b0207149-5fe9-4f24-a1d9-708592ee2dd2.png) diff --git a/netlify.toml b/netlify.toml new file mode 100644 index 000000000..3ad3520c5 --- /dev/null +++ b/netlify.toml @@ -0,0 +1,7 @@ +# Documentation at https://www.netlify.com/docs/netlify-toml-reference/ +[build] + command = "jekyll build" + publish = "_site/" + +[build.environment] + RUBY_VERSION = "3.3.2" diff --git a/welcome_module.png b/welcome_module.png new file mode 100644 index 000000000..60ed3156b Binary files /dev/null and b/welcome_module.png differ