This view is limited to 50 files because it contains too many changes.Β  See the raw diff here.
Files changed (50) hide show
  1. .clang-format +0 -40
  2. .flake8 +4 -0
  3. .github/workflows/ci.yml +1 -6
  4. .github/workflows/format.yml +18 -17
  5. .github/workflows/pip.yml +0 -62
  6. .github/workflows/release-drafter.yml +16 -0
  7. .github/workflows/release.yml +0 -95
  8. .gitignore +1 -6
  9. .pre-commit-config.yaml +0 -88
  10. CODE_OF_CONDUCT.md +0 -128
  11. Dockerfile +1 -1
  12. MANIFEST.in +0 -12
  13. README.md +64 -124
  14. {imcui β†’ api}/__init__.py +0 -0
  15. {imcui/api β†’ api}/client.py +7 -14
  16. imcui/api/core.py β†’ api/server.py +218 -27
  17. {imcui/api β†’ api}/test/CMakeLists.txt +2 -3
  18. {imcui/api β†’ api}/test/build_and_run.sh +0 -0
  19. {imcui/api β†’ api}/test/client.cpp +21 -18
  20. {imcui/api β†’ api}/test/helper.h +44 -39
  21. imcui/api/__init__.py β†’ api/types.py +0 -31
  22. app.py +3 -6
  23. build_docker.sh +1 -1
  24. {imcui/datasets β†’ datasets}/.gitignore +0 -0
  25. {imcui/datasets β†’ datasets}/sacre_coeur/README.md +0 -0
  26. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/02928139_3448003521.jpg +0 -0
  27. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/03903474_1471484089.jpg +0 -0
  28. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/10265353_3838484249.jpg +0 -0
  29. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/17295357_9106075285.jpg +0 -0
  30. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/32809961_8274055477.jpg +0 -0
  31. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/44120379_8371960244.jpg +0 -0
  32. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/51091044_3486849416.jpg +0 -0
  33. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/60584745_2207571072.jpg +0 -0
  34. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/71295362_4051449754.jpg +0 -0
  35. {imcui/datasets β†’ datasets}/sacre_coeur/mapping/93341989_396310999.jpg +0 -0
  36. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot135.jpg +0 -0
  37. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot180.jpg +0 -0
  38. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot225.jpg +0 -0
  39. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot270.jpg +0 -0
  40. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot315.jpg +0 -0
  41. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot45.jpg +0 -0
  42. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot90.jpg +0 -0
  43. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot135.jpg +0 -0
  44. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot180.jpg +0 -0
  45. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot225.jpg +0 -0
  46. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot270.jpg +0 -0
  47. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot315.jpg +0 -0
  48. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot45.jpg +0 -0
  49. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot90.jpg +0 -0
  50. {imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/10265353_3838484249_rot135.jpg +0 -0
.clang-format DELETED
@@ -1,40 +0,0 @@
1
- BasedOnStyle: Chromium
2
- IncludeBlocks: Preserve
3
- IncludeCategories:
4
- - Regex: '^<.*>'
5
- Priority: 1
6
- - Regex: '^".*"'
7
- Priority: 2
8
- SortIncludes: true
9
- Language: Cpp
10
- AccessModifierOffset: 2
11
- AlignAfterOpenBracket: true
12
- AlignConsecutiveAssignments: false
13
- AlignConsecutiveDeclarations: false
14
- AlignEscapedNewlines: Right
15
- AlignOperands: true
16
- AlignTrailingComments: false
17
- AllowAllParametersOfDeclarationOnNextLine: true
18
- AllowShortBlocksOnASingleLine: false
19
- AllowShortCaseLabelsOnASingleLine: true
20
- AllowShortFunctionsOnASingleLine: None
21
- AllowShortIfStatementsOnASingleLine: true
22
- AllowShortLoopsOnASingleLine: true
23
- AlwaysBreakAfterReturnType: None
24
- AlwaysBreakBeforeMultilineStrings: true
25
- AlwaysBreakTemplateDeclarations: false
26
- BinPackArguments: false
27
- BinPackParameters: false
28
- BreakBeforeBraces: Attach
29
- BreakBeforeInheritanceComma: false
30
- BreakBeforeTernaryOperators: true
31
- BreakStringLiterals: false
32
- ColumnLimit: 88
33
- CompactNamespaces: false
34
- ConstructorInitializerAllOnOneLineOrOnePerLine: true
35
- ConstructorInitializerIndentWidth: 4
36
- ContinuationIndentWidth: 4
37
- IndentCaseLabels: true
38
- IndentWidth: 4
39
- TabWidth: 4
40
- UseTab: Never
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.flake8 ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [flake8]
2
+ max-line-length = 80
3
+ extend-ignore = E203,E501,E402
4
+ exclude = .git,__pycache__,build,.venv/,third_party
.github/workflows/ci.yml CHANGED
@@ -11,7 +11,6 @@ on:
11
  jobs:
12
  build:
13
  runs-on: ubuntu-latest
14
- # runs-on: self-hosted
15
 
16
  steps:
17
  - name: Checkout code
@@ -29,9 +28,5 @@ jobs:
29
  pip install -r requirements.txt
30
  sudo apt-get update && sudo apt-get install ffmpeg libsm6 libxext6 -y
31
 
32
- - name: Build and install
33
- run: pip install .
34
-
35
  - name: Run tests
36
- # run: python -m pytest
37
- run: python tests/test_basic.py
 
11
  jobs:
12
  build:
13
  runs-on: ubuntu-latest
 
14
 
15
  steps:
16
  - name: Checkout code
 
28
  pip install -r requirements.txt
29
  sudo apt-get update && sudo apt-get install ffmpeg libsm6 libxext6 -y
30
 
 
 
 
31
  - name: Run tests
32
+ run: python test_app_cli.py
 
.github/workflows/format.yml CHANGED
@@ -1,23 +1,24 @@
1
- # This is a format job. Pre-commit has a first-party GitHub action, so we use
2
- # that: https://github.com/pre-commit/action
3
-
4
- name: Format
5
-
6
  on:
7
- workflow_dispatch:
8
- pull_request:
9
  push:
10
  branches:
11
- - main
12
-
 
 
 
13
  jobs:
14
- pre-commit:
15
- name: Format
16
  runs-on: ubuntu-latest
17
- # runs-on: self-hosted
18
  steps:
19
- - uses: actions/checkout@v4
20
- - uses: actions/setup-python@v5
21
- with:
22
- python-version: "3.x"
23
- - uses: pre-commit/action@v3.0.1
 
 
 
 
 
 
1
+ name: Format and Lint Checks
 
 
 
 
2
  on:
 
 
3
  push:
4
  branches:
5
+ - main
6
+ paths:
7
+ - '*.py'
8
+ pull_request:
9
+ types: [ assigned, opened, synchronize, reopened ]
10
  jobs:
11
+ check:
12
+ name: Format and Lint Checks
13
  runs-on: ubuntu-latest
 
14
  steps:
15
+ - uses: actions/checkout@v4
16
+ - uses: actions/setup-python@v4
17
+ with:
18
+ python-version: '3.10'
19
+ cache: 'pip'
20
+ - run: python -m pip install --upgrade pip
21
+ - run: python -m pip install .[dev]
22
+ - run: python -m flake8 ui/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py
23
+ - run: python -m isort ui/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py --check-only --diff
24
+ - run: python -m black ui/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py --check --diff
.github/workflows/pip.yml DELETED
@@ -1,62 +0,0 @@
1
- name: Pip
2
- on:
3
- workflow_dispatch:
4
- pull_request:
5
- push:
6
- branches:
7
- - main
8
-
9
- jobs:
10
- build:
11
- strategy:
12
- fail-fast: false
13
- matrix:
14
- platform: [ubuntu-latest]
15
- python-version: ["3.9", "3.10"]
16
-
17
- runs-on: ${{ matrix.platform }}
18
- # runs-on: self-hosted
19
- steps:
20
- - uses: actions/checkout@v4
21
- with:
22
- submodules: recursive
23
-
24
- - uses: actions/setup-python@v5
25
- with:
26
- python-version: ${{ matrix.python-version }}
27
-
28
- - name: Upgrade setuptools and wheel
29
- run: |
30
- pip install --upgrade setuptools wheel
31
-
32
- - name: Install dependencies on Ubuntu
33
- if: runner.os == 'Linux'
34
- run: |
35
- sudo apt-get update
36
- sudo apt-get install libopencv-dev -y
37
-
38
- - name: Install dependencies on macOS
39
- if: runner.os == 'macOS'
40
- run: |
41
- brew update
42
- brew install opencv
43
-
44
- - name: Install dependencies on Windows
45
- if: runner.os == 'Windows'
46
- run: |
47
- choco install opencv -y
48
-
49
- - name: Add requirements
50
- run: python -m pip install --upgrade wheel setuptools
51
-
52
- - name: Install Python dependencies
53
- run: |
54
- pip install pytest
55
- pip install -r requirements.txt
56
- sudo apt-get update && sudo apt-get install ffmpeg libsm6 libxext6 -y
57
-
58
- - name: Build and install
59
- run: pip install .
60
-
61
- - name: Test
62
- run: python -m pytest
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.github/workflows/release-drafter.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Release Drafter
2
+
3
+ on:
4
+ push:
5
+ # branches to consider in the event; optional, defaults to all
6
+ branches:
7
+ - master
8
+
9
+ jobs:
10
+ update_release_draft:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ # Drafts your next Release notes as Pull Requests are merged into "master"
14
+ - uses: release-drafter/release-drafter@v5.23.0
15
+ env:
16
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
.github/workflows/release.yml DELETED
@@ -1,95 +0,0 @@
1
- name: PyPI Release
2
- on:
3
- release:
4
- types: [published]
5
-
6
- jobs:
7
- build:
8
- strategy:
9
- fail-fast: false
10
- matrix:
11
- platform: [ubuntu-latest]
12
- python-version: ["3.9", "3.10", "3.11"]
13
-
14
- runs-on: ${{ matrix.platform }}
15
- steps:
16
- - uses: actions/checkout@v4
17
- with:
18
- submodules: recursive
19
-
20
- - uses: actions/setup-python@v5
21
- with:
22
- python-version: ${{ matrix.python-version }}
23
-
24
- - name: Upgrade setuptools and wheel
25
- run: |
26
- pip install --upgrade setuptools wheel
27
-
28
- - name: Install dependencies on Ubuntu
29
- if: runner.os == 'Linux'
30
- run: |
31
- sudo apt-get update
32
- sudo apt-get install libopencv-dev -y
33
-
34
- - name: Install dependencies on macOS
35
- if: runner.os == 'macOS'
36
- run: |
37
- brew update
38
- brew install opencv
39
-
40
- - name: Install dependencies on Windows
41
- if: runner.os == 'Windows'
42
- run: |
43
- choco install opencv -y
44
-
45
- - name: Add requirements
46
- run: python -m pip install --upgrade setuptools wheel build
47
-
48
- - name: Install Python dependencies
49
- run: |
50
- pip install pytest
51
- pip install -r requirements.txt
52
- sudo apt-get update && sudo apt-get install ffmpeg libsm6 libxext6 -y
53
-
54
- - name: Build source distribution
55
- run: |
56
- python -m build --outdir dist/
57
- ls -lh dist/
58
-
59
- - name: Upload to GitHub Release
60
- if: matrix.python-version == '3.10' && github.event_name == 'release'
61
- uses: softprops/action-gh-release@v2
62
- with:
63
- files: dist/*.whl
64
- env:
65
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
66
-
67
- - name: Archive wheels
68
- if: matrix.python-version == '3.10' && github.event_name == 'release'
69
- uses: actions/upload-artifact@v4
70
- with:
71
- name: dist
72
- path: dist/*.whl
73
-
74
-
75
- pypi-publish:
76
- name: upload release to PyPI
77
- needs: build
78
- runs-on: ubuntu-latest
79
- environment: pypi
80
- permissions:
81
- # IMPORTANT: this permission is mandatory for Trusted Publishing
82
- id-token: write
83
- steps:
84
- # retrieve your distributions here
85
- - name: Download artifacts
86
- uses: actions/download-artifact@v4
87
- with:
88
- name: dist
89
- path: dist
90
-
91
- - name: List dist directory
92
- run: ls -lh dist/
93
-
94
- - name: Publish package distributions to PyPI
95
- uses: pypa/gh-action-pypi-publish@release/v1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore CHANGED
@@ -1,4 +1,5 @@
1
  build/
 
2
  bin/
3
  cmake_modules/
4
  cmake-build-debug/
@@ -25,9 +26,3 @@ gen_example.py
25
  datasets/lines/terrace0.JPG
26
  datasets/lines/terrace1.JPG
27
  datasets/South-Building*
28
- *.pkl
29
- oryx-build-commands.txt
30
- .ruff_cache*
31
- dist
32
- tmp
33
- backup*
 
1
  build/
2
+ # lib
3
  bin/
4
  cmake_modules/
5
  cmake-build-debug/
 
26
  datasets/lines/terrace0.JPG
27
  datasets/lines/terrace1.JPG
28
  datasets/South-Building*
 
 
 
 
 
 
.pre-commit-config.yaml DELETED
@@ -1,88 +0,0 @@
1
- # To use:
2
- #
3
- # pre-commit run -a
4
- #
5
- # Or:
6
- #
7
- # pre-commit run --all-files
8
- #
9
- # Or:
10
- #
11
- # pre-commit install # (runs every time you commit in git)
12
- #
13
- # To update this file:
14
- #
15
- # pre-commit autoupdate
16
- #
17
- # See https://github.com/pre-commit/pre-commit
18
-
19
- ci:
20
- autoupdate_commit_msg: "chore: update pre-commit hooks"
21
- autofix_commit_msg: "style: pre-commit fixes"
22
-
23
- repos:
24
- # Standard hooks
25
- - repo: https://github.com/pre-commit/pre-commit-hooks
26
- rev: v5.0.0
27
- hooks:
28
- - id: check-added-large-files
29
- exclude: ^imcui/third_party/
30
- - id: check-case-conflict
31
- exclude: ^imcui/third_party/
32
- - id: check-merge-conflict
33
- exclude: ^imcui/third_party/
34
- - id: check-symlinks
35
- exclude: ^imcui/third_party/
36
- - id: check-yaml
37
- exclude: ^imcui/third_party/
38
- - id: debug-statements
39
- exclude: ^imcui/third_party/
40
- - id: end-of-file-fixer
41
- exclude: ^imcui/third_party/
42
- - id: mixed-line-ending
43
- exclude: ^imcui/third_party/
44
- - id: requirements-txt-fixer
45
- exclude: ^imcui/third_party/
46
- - id: trailing-whitespace
47
- exclude: ^imcui/third_party/
48
-
49
- - repo: https://github.com/astral-sh/ruff-pre-commit
50
- rev: "v0.8.4"
51
- hooks:
52
- - id: ruff
53
- args: ["--fix", "--show-fixes", "--extend-ignore=E402"]
54
- - id: ruff-format
55
- exclude: ^(docs|imcui/third_party/)
56
-
57
- # Checking static types
58
- - repo: https://github.com/pre-commit/mirrors-mypy
59
- rev: "v1.14.0"
60
- hooks:
61
- - id: mypy
62
- files: "setup.py"
63
- args: []
64
- additional_dependencies: [types-setuptools]
65
- exclude: ^imcui/third_party/
66
- # Changes tabs to spaces
67
- - repo: https://github.com/Lucas-C/pre-commit-hooks
68
- rev: v1.5.5
69
- hooks:
70
- - id: remove-tabs
71
- exclude: ^(docs|imcui/third_party/)
72
-
73
- # CMake formatting
74
- - repo: https://github.com/cheshirekow/cmake-format-precommit
75
- rev: v0.6.13
76
- hooks:
77
- - id: cmake-format
78
- additional_dependencies: [pyyaml]
79
- types: [file]
80
- files: (\.cmake|CMakeLists.txt)(.in)?$
81
- exclude: ^imcui/third_party/
82
-
83
- # Suggested hook if you add a .clang-format file
84
- - repo: https://github.com/pre-commit/mirrors-clang-format
85
- rev: v13.0.0
86
- hooks:
87
- - id: clang-format
88
- exclude: ^imcui/third_party/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CODE_OF_CONDUCT.md DELETED
@@ -1,128 +0,0 @@
1
- # Contributor Covenant Code of Conduct
2
-
3
- ## Our Pledge
4
-
5
- We as members, contributors, and leaders pledge to make participation in our
6
- community a harassment-free experience for everyone, regardless of age, body
7
- size, visible or invisible disability, ethnicity, sex characteristics, gender
8
- identity and expression, level of experience, education, socio-economic status,
9
- nationality, personal appearance, race, religion, or sexual identity
10
- and orientation.
11
-
12
- We pledge to act and interact in ways that contribute to an open, welcoming,
13
- diverse, inclusive, and healthy community.
14
-
15
- ## Our Standards
16
-
17
- Examples of behavior that contributes to a positive environment for our
18
- community include:
19
-
20
- * Demonstrating empathy and kindness toward other people
21
- * Being respectful of differing opinions, viewpoints, and experiences
22
- * Giving and gracefully accepting constructive feedback
23
- * Accepting responsibility and apologizing to those affected by our mistakes,
24
- and learning from the experience
25
- * Focusing on what is best not just for us as individuals, but for the
26
- overall community
27
-
28
- Examples of unacceptable behavior include:
29
-
30
- * The use of sexualized language or imagery, and sexual attention or
31
- advances of any kind
32
- * Trolling, insulting or derogatory comments, and personal or political attacks
33
- * Public or private harassment
34
- * Publishing others' private information, such as a physical or email
35
- address, without their explicit permission
36
- * Other conduct which could reasonably be considered inappropriate in a
37
- professional setting
38
-
39
- ## Enforcement Responsibilities
40
-
41
- Community leaders are responsible for clarifying and enforcing our standards of
42
- acceptable behavior and will take appropriate and fair corrective action in
43
- response to any behavior that they deem inappropriate, threatening, offensive,
44
- or harmful.
45
-
46
- Community leaders have the right and responsibility to remove, edit, or reject
47
- comments, commits, code, wiki edits, issues, and other contributions that are
48
- not aligned to this Code of Conduct, and will communicate reasons for moderation
49
- decisions when appropriate.
50
-
51
- ## Scope
52
-
53
- This Code of Conduct applies within all community spaces, and also applies when
54
- an individual is officially representing the community in public spaces.
55
- Examples of representing our community include using an official e-mail address,
56
- posting via an official social media account, or acting as an appointed
57
- representative at an online or offline event.
58
-
59
- ## Enforcement
60
-
61
- Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
- reported to the community leaders responsible for enforcement at
63
- alpharealcat@gmail.com.
64
- All complaints will be reviewed and investigated promptly and fairly.
65
-
66
- All community leaders are obligated to respect the privacy and security of the
67
- reporter of any incident.
68
-
69
- ## Enforcement Guidelines
70
-
71
- Community leaders will follow these Community Impact Guidelines in determining
72
- the consequences for any action they deem in violation of this Code of Conduct:
73
-
74
- ### 1. Correction
75
-
76
- **Community Impact**: Use of inappropriate language or other behavior deemed
77
- unprofessional or unwelcome in the community.
78
-
79
- **Consequence**: A private, written warning from community leaders, providing
80
- clarity around the nature of the violation and an explanation of why the
81
- behavior was inappropriate. A public apology may be requested.
82
-
83
- ### 2. Warning
84
-
85
- **Community Impact**: A violation through a single incident or series
86
- of actions.
87
-
88
- **Consequence**: A warning with consequences for continued behavior. No
89
- interaction with the people involved, including unsolicited interaction with
90
- those enforcing the Code of Conduct, for a specified period of time. This
91
- includes avoiding interactions in community spaces as well as external channels
92
- like social media. Violating these terms may lead to a temporary or
93
- permanent ban.
94
-
95
- ### 3. Temporary Ban
96
-
97
- **Community Impact**: A serious violation of community standards, including
98
- sustained inappropriate behavior.
99
-
100
- **Consequence**: A temporary ban from any sort of interaction or public
101
- communication with the community for a specified period of time. No public or
102
- private interaction with the people involved, including unsolicited interaction
103
- with those enforcing the Code of Conduct, is allowed during this period.
104
- Violating these terms may lead to a permanent ban.
105
-
106
- ### 4. Permanent Ban
107
-
108
- **Community Impact**: Demonstrating a pattern of violation of community
109
- standards, including sustained inappropriate behavior, harassment of an
110
- individual, or aggression toward or disparagement of classes of individuals.
111
-
112
- **Consequence**: A permanent ban from any sort of public interaction within
113
- the community.
114
-
115
- ## Attribution
116
-
117
- This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118
- version 2.0, available at
119
- https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120
-
121
- Community Impact Guidelines were inspired by [Mozilla's code of conduct
122
- enforcement ladder](https://github.com/mozilla/diversity).
123
-
124
- [homepage]: https://www.contributor-covenant.org
125
-
126
- For answers to common questions about this code of conduct, see the FAQ at
127
- https://www.contributor-covenant.org/faq. Translations are available at
128
- https://www.contributor-covenant.org/translations.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dockerfile CHANGED
@@ -11,7 +11,7 @@ RUN apt-get update && apt-get install -y git-lfs
11
  RUN git lfs install
12
 
13
  # Clone the Git repository
14
- RUN git clone --recursive https://github.com/Vincentqyw/image-matching-webui.git /code
15
 
16
  RUN conda create -n imw python=${PYTHON_VERSION}
17
  RUN echo "source activate imw" > ~/.bashrc
 
11
  RUN git lfs install
12
 
13
  # Clone the Git repository
14
+ RUN git clone https://huggingface.co/spaces/Realcat/image-matching-webui /code
15
 
16
  RUN conda create -n imw python=${PYTHON_VERSION}
17
  RUN echo "source activate imw" > ~/.bashrc
MANIFEST.in DELETED
@@ -1,12 +0,0 @@
1
- # logo
2
- include imcui/assets/logo.webp
3
-
4
- recursive-include imcui/ui *.yaml
5
- recursive-include imcui/api *.yaml
6
- recursive-include imcui/third_party *.yaml *.cfg *.yml
7
-
8
- # ui examples
9
- # recursive-include imcui/datasets *.JPG *.jpg *.png
10
-
11
- # model
12
- recursive-include imcui/third_party/SuperGluePretrainedNetwork *.pth
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -9,93 +9,81 @@ app_file: app.py
9
  pinned: true
10
  license: apache-2.0
11
  ---
 
12
  [![Contributors][contributors-shield]][contributors-url]
13
  [![Forks][forks-shield]][forks-url]
14
  [![Stargazers][stars-shield]][stars-url]
15
  [![Issues][issues-shield]][issues-url]
16
 
17
  <p align="center">
18
- <h1 align="center"><br><ins>Image Matching WebUI</ins>
19
- <br>Matching Keypoints between two images</h1>
20
  </p>
21
- <div align="center">
22
- <a target="_blank" href="https://github.com/Vincentqyw/image-matching-webui/actions/workflows/release.yml"><img src="https://github.com/Vincentqyw/image-matching-webui/actions/workflows/release.yml/badge.svg" alt="PyPI Release"></a>
23
- <a target="_blank" href='https://huggingface.co/spaces/Realcat/image-matching-webui'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
24
- <a target="_blank" href="https://pypi.org/project/imcui"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/imcui?style=flat&logo=pypi&label=imcui&link=https%3A%2F%2Fpypi.org%2Fproject%2Fimcui"></a>
25
- <a target="_blank" href="https://hub.docker.com/r/vincentqin/image-matching-webui"><img alt="Docker Image Version" src="https://img.shields.io/docker/v/vincentqin/image-matching-webui?sort=date&arch=amd64&logo=docker&label=imcui&link=https%3A%2F%2Fhub.docker.com%2Fr%2Fvincentqin%2Fimage-matching-webui"></a>
26
- <a target="_blank" href="https://pepy.tech/projects/imcui"><img src="https://static.pepy.tech/badge/imcui" alt="PyPI Downloads"></a>
27
-
28
- </div>
29
 
30
  ## Description
31
 
32
- `Image Matching WebUI (IMCUI)` efficiently matches image pairs using multiple famous image matching algorithms. The tool features a Graphical User Interface (GUI) designed using [gradio](https://gradio.app/). You can effortlessly select two images and a matching algorithm and obtain a precise matching result.
33
  **Note**: the images source can be either local images or webcam images.
34
 
35
- Try it on
36
- <a href='https://huggingface.co/spaces/Realcat/image-matching-webui'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
37
- <a target="_blank" href="https://lightning.ai/realcat/studios/image-matching-webui"><img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/studio-badge.svg" alt="Open In Studio"/></a>
 
38
 
39
  Here is a demo of the tool:
40
 
41
- https://github.com/Vincentqyw/image-matching-webui/assets/18531182/263534692-c3484d1b-cc00-4fdc-9b31-e5b7af07ecd9
42
 
43
  The tool currently supports various popular image matching algorithms, namely:
44
-
45
- | Algorithm | Supported | Conference/Journal | Year | GitHub Link |
46
- |------------------|-----------|--------------------|------|-------------|
47
- | MINIMA | βœ… | ARXIV | 2024 | [Link](https://github.com/LSXI7/MINIMA) |
48
- | XoFTR | βœ… | CVPR | 2024 | [Link](https://github.com/OnderT/XoFTR) |
49
- | EfficientLoFTR | βœ… | CVPR | 2024 | [Link](https://github.com/zju3dv/EfficientLoFTR) |
50
- | MASt3R | βœ… | CVPR | 2024 | [Link](https://github.com/naver/mast3r) |
51
- | DUSt3R | βœ… | CVPR | 2024 | [Link](https://github.com/naver/dust3r) |
52
- | OmniGlue | βœ… | CVPR | 2024 | [Link](https://github.com/Vincentqyw/omniglue-onnx) |
53
- | XFeat | βœ… | CVPR | 2024 | [Link](https://github.com/verlab/accelerated_features) |
54
- | RoMa | βœ… | CVPR | 2024 | [Link](https://github.com/Vincentqyw/RoMa) |
55
- | DeDoDe | βœ… | 3DV | 2024 | [Link](https://github.com/Parskatt/DeDoDe) |
56
- | Mickey | ❌ | CVPR | 2024 | [Link](https://github.com/nianticlabs/mickey) |
57
- | GIM | βœ… | ICLR | 2024 | [Link](https://github.com/xuelunshen/gim) |
58
- | ALIKED | βœ… | ICCV | 2023 | [Link](https://github.com/Shiaoming/ALIKED) |
59
- | LightGlue | βœ… | ICCV | 2023 | [Link](https://github.com/cvg/LightGlue) |
60
- | DarkFeat | βœ… | AAAI | 2023 | [Link](https://github.com/THU-LYJ-Lab/DarkFeat) |
61
- | SFD2 | βœ… | CVPR | 2023 | [Link](https://github.com/feixue94/sfd2) |
62
- | IMP | βœ… | CVPR | 2023 | [Link](https://github.com/feixue94/imp-release) |
63
- | ASTR | ❌ | CVPR | 2023 | [Link](https://github.com/ASTR2023/ASTR) |
64
- | SEM | ❌ | CVPR | 2023 | [Link](https://github.com/SEM2023/SEM) |
65
- | DeepLSD | ❌ | CVPR | 2023 | [Link](https://github.com/cvg/DeepLSD) |
66
- | GlueStick | βœ… | ICCV | 2023 | [Link](https://github.com/cvg/GlueStick) |
67
- | ConvMatch | ❌ | AAAI | 2023 | [Link](https://github.com/SuhZhang/ConvMatch) |
68
- | LoFTR | βœ… | CVPR | 2021 | [Link](https://github.com/zju3dv/LoFTR) |
69
- | SOLD2 | βœ… | CVPR | 2021 | [Link](https://github.com/cvg/SOLD2) |
70
- | LineTR | ❌ | RA-L | 2021 | [Link](https://github.com/yosungho/LineTR) |
71
- | DKM | βœ… | CVPR | 2023 | [Link](https://github.com/Parskatt/DKM) |
72
- | NCMNet | ❌ | CVPR | 2023 | [Link](https://github.com/xinliu29/NCMNet) |
73
- | TopicFM | βœ… | AAAI | 2023 | [Link](https://github.com/Vincentqyw/TopicFM) |
74
- | AspanFormer | βœ… | ECCV | 2022 | [Link](https://github.com/Vincentqyw/ml-aspanformer) |
75
- | LANet | βœ… | ACCV | 2022 | [Link](https://github.com/wangch-g/lanet) |
76
- | LISRD | ❌ | ECCV | 2022 | [Link](https://github.com/rpautrat/LISRD) |
77
- | REKD | ❌ | CVPR | 2022 | [Link](https://github.com/bluedream1121/REKD) |
78
- | CoTR | βœ… | ICCV | 2021 | [Link](https://github.com/ubc-vision/COTR) |
79
- | ALIKE | βœ… | TMM | 2022 | [Link](https://github.com/Shiaoming/ALIKE) |
80
- | RoRD | βœ… | IROS | 2021 | [Link](https://github.com/UditSinghParihar/RoRD) |
81
- | SGMNet | βœ… | ICCV | 2021 | [Link](https://github.com/vdvchen/SGMNet) |
82
- | SuperPoint | βœ… | CVPRW | 2018 | [Link](https://github.com/magicleap/SuperPointPretrainedNetwork) |
83
- | SuperGlue | βœ… | CVPR | 2020 | [Link](https://github.com/magicleap/SuperGluePretrainedNetwork) |
84
- | D2Net | βœ… | CVPR | 2019 | [Link](https://github.com/Vincentqyw/d2-net) |
85
- | R2D2 | βœ… | NeurIPS | 2019 | [Link](https://github.com/naver/r2d2) |
86
- | DISK | βœ… | NeurIPS | 2020 | [Link](https://github.com/cvlab-epfl/disk) |
87
- | Key.Net | ❌ | ICCV | 2019 | [Link](https://github.com/axelBarroso/Key.Net) |
88
- | OANet | ❌ | ICCV | 2019 | [Link](https://github.com/zjhthu/OANet) |
89
- | SOSNet | βœ… | CVPR | 2019 | [Link](https://github.com/scape-research/SOSNet) |
90
- | HardNet | βœ… | NeurIPS | 2017 | [Link](https://github.com/DagnyT/hardnet) |
91
- | SIFT | βœ… | IJCV | 2004 | [Link](https://docs.opencv.org/4.x/da/df5/tutorial_py_sift_intro.html) |
92
-
93
 
94
  ## How to use
95
 
96
  ### HuggingFace / Lightning AI
97
 
98
- Just try it on <a href='https://huggingface.co/spaces/Realcat/image-matching-webui'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
99
  <a target="_blank" href="https://lightning.ai/realcat/studios/image-matching-webui">
100
  <img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/studio-badge.svg" alt="Open In Studio"/>
101
  </a>
@@ -103,25 +91,11 @@ Just try it on <a href='https://huggingface.co/spaces/Realcat/image-matching-web
103
  or deploy it locally following the instructions below.
104
 
105
  ### Requirements
106
-
107
- - [Python 3.9+](https://www.python.org/downloads/)
108
-
109
- #### Install from pip [NEW]
110
-
111
- Update: now support install from [pip](https://pypi.org/project/imcui), just run:
112
-
113
- ```bash
114
- pip install imcui
115
- ```
116
-
117
- #### Install from source
118
-
119
  ``` bash
120
  git clone --recursive https://github.com/Vincentqyw/image-matching-webui.git
121
  cd image-matching-webui
122
  conda env create -f environment.yaml
123
  conda activate imw
124
- pip install -e .
125
  ```
126
 
127
  or using [docker](https://hub.docker.com/r/vincentqin/image-matching-webui):
@@ -130,18 +104,10 @@ or using [docker](https://hub.docker.com/r/vincentqin/image-matching-webui):
130
  docker pull vincentqin/image-matching-webui:latest
131
  docker run -it -p 7860:7860 vincentqin/image-matching-webui:latest python app.py --server_name "0.0.0.0" --server_port=7860
132
  ```
133
-
134
- ### Deploy to Railway
135
-
136
- Deploy to [Railway](https://railway.app/), setting up a `Custom Start Command` in `Deploy` section:
137
-
138
- ``` bash
139
- python -m imcui.api.server
140
- ```
141
-
142
  ### Run demo
143
  ``` bash
144
- python app.py --config ./config/config.yaml
145
  ```
146
  then open http://localhost:7860 in your browser.
147
 
@@ -149,54 +115,28 @@ then open http://localhost:7860 in your browser.
149
 
150
  ### Add your own feature / matcher
151
 
152
- I provide an example to add local feature in [imcui/hloc/extractors/example.py](imcui/hloc/extractors/example.py). Then add feature settings in `confs` in file [imcui/hloc/extract_features.py](imcui/hloc/extract_features.py). Last step is adding some settings to `matcher_zoo` in file [imcui/ui/config.yaml](imcui/ui/config.yaml).
153
-
154
- ### Upload models
155
-
156
- IMCUI hosts all models on [Huggingface](https://huggingface.co/Realcat/imcui_checkpoints). You can upload your model to Huggingface and add it to the [Realcat/imcui_checkpoints](https://huggingface.co/Realcat/imcui_checkpoints) repository.
157
-
158
 
159
  ## Contributions welcome!
160
 
161
- External contributions are very much welcome. Please follow the [PEP8 style guidelines](https://www.python.org/dev/peps/pep-0008/) using a linter like flake8. This is a non-exhaustive list of features that might be valuable additions:
162
 
163
- - [x] support pip install command
164
- - [x] add [CPU CI](.github/workflows/ci.yml)
165
  - [x] add webcam support
166
  - [x] add [line feature matching](https://github.com/Vincentqyw/LineSegmentsDetection) algorithms
167
  - [x] example to add a new feature extractor / matcher
168
  - [x] ransac to filter outliers
169
- - [ ] add [rotation images](https://github.com/pidahbus/deep-image-orientation-angle-detection) options before matching
170
  - [ ] support export matches to colmap ([#issue 6](https://github.com/Vincentqyw/image-matching-webui/issues/6))
171
- - [x] add config file to set default parameters
172
- - [x] dynamically load models and reduce GPU overload
173
-
174
- Adding local features / matchers as submodules is very easy. For example, to add the [GlueStick](https://github.com/cvg/GlueStick):
175
-
176
- ``` bash
177
- git submodule add https://github.com/cvg/GlueStick.git imcui/third_party/GlueStick
178
- ```
179
 
180
- If remote submodule repositories are updated, don't forget to pull submodules with:
181
 
182
  ``` bash
183
- git submodule update --init --recursive # init and download
184
- git submodule update --remote # update
185
- ```
186
-
187
- if you only want to update one submodule, use `git submodule update --remote imcui/third_party/GlueStick`.
188
-
189
- To format code before committing, run:
190
-
191
- ```bash
192
- pre-commit run -a # Auto-checks and fixes
193
  ```
194
 
195
- ## Contributors
196
-
197
- <a href="https://github.com/Vincentqyw/image-matching-webui/graphs/contributors">
198
- <img src="https://contrib.rocks/image?repo=Vincentqyw/image-matching-webui" />
199
- </a>
200
 
201
  ## Resources
202
  - [Image Matching: Local Features & Beyond](https://image-matching-workshop.github.io)
@@ -213,4 +153,4 @@ This code is built based on [Hierarchical-Localization](https://github.com/cvg/H
213
  [stars-shield]: https://img.shields.io/github/stars/Vincentqyw/image-matching-webui.svg?style=for-the-badge
214
  [stars-url]: https://github.com/Vincentqyw/image-matching-webui/stargazers
215
  [issues-shield]: https://img.shields.io/github/issues/Vincentqyw/image-matching-webui.svg?style=for-the-badge
216
- [issues-url]: https://github.com/Vincentqyw/image-matching-webui/issues
 
9
  pinned: true
10
  license: apache-2.0
11
  ---
12
+
13
  [![Contributors][contributors-shield]][contributors-url]
14
  [![Forks][forks-shield]][forks-url]
15
  [![Stargazers][stars-shield]][stars-url]
16
  [![Issues][issues-shield]][issues-url]
17
 
18
  <p align="center">
19
+ <h1 align="center"><br><ins>Image Matching WebUI</ins><br>Identify matching points between two images</h1>
 
20
  </p>
 
 
 
 
 
 
 
 
21
 
22
  ## Description
23
 
24
+ This simple tool efficiently matches image pairs using multiple famous image matching algorithms. The tool features a Graphical User Interface (GUI) designed using [gradio](https://gradio.app/). You can effortlessly select two images and a matching algorithm and obtain a precise matching result.
25
  **Note**: the images source can be either local images or webcam images.
26
 
27
+ Try it on <a href='https://huggingface.co/spaces/Realcat/image-matching-webui'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
28
+ <a target="_blank" href="https://lightning.ai/realcat/studios/image-matching-webui">
29
+ <img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/studio-badge.svg" alt="Open In Studio"/>
30
+ </a>
31
 
32
  Here is a demo of the tool:
33
 
34
+ ![demo](assets/demo.gif)
35
 
36
  The tool currently supports various popular image matching algorithms, namely:
37
+ - [x] [XoFTR](https://github.com/OnderT/XoFTR), CVPR 2024
38
+ - [x] [EfficientLoFTR](https://github.com/zju3dv/EfficientLoFTR), CVPR 2024
39
+ - [x] [MASt3R](https://github.com/naver/mast3r), CVPR 2024
40
+ - [x] [DUSt3R](https://github.com/naver/dust3r), CVPR 2024
41
+ - [x] [OmniGlue](https://github.com/Vincentqyw/omniglue-onnx), CVPR 2024
42
+ - [x] [XFeat](https://github.com/verlab/accelerated_features), CVPR 2024
43
+ - [x] [RoMa](https://github.com/Vincentqyw/RoMa), CVPR 2024
44
+ - [x] [DeDoDe](https://github.com/Parskatt/DeDoDe), 3DV 2024
45
+ - [ ] [Mickey](https://github.com/nianticlabs/mickey), CVPR 2024
46
+ - [x] [GIM](https://github.com/xuelunshen/gim), ICLR 2024
47
+ - [ ] [DUSt3R](https://github.com/naver/dust3r), arXiv 2023
48
+ - [x] [LightGlue](https://github.com/cvg/LightGlue), ICCV 2023
49
+ - [x] [DarkFeat](https://github.com/THU-LYJ-Lab/DarkFeat), AAAI 2023
50
+ - [x] [SFD2](https://github.com/feixue94/sfd2), CVPR 2023
51
+ - [x] [IMP](https://github.com/feixue94/imp-release), CVPR 2023
52
+ - [ ] [ASTR](https://github.com/ASTR2023/ASTR), CVPR 2023
53
+ - [ ] [SEM](https://github.com/SEM2023/SEM), CVPR 2023
54
+ - [ ] [DeepLSD](https://github.com/cvg/DeepLSD), CVPR 2023
55
+ - [x] [GlueStick](https://github.com/cvg/GlueStick), ICCV 2023
56
+ - [ ] [ConvMatch](https://github.com/SuhZhang/ConvMatch), AAAI 2023
57
+ - [x] [LoFTR](https://github.com/zju3dv/LoFTR), CVPR 2021
58
+ - [x] [SOLD2](https://github.com/cvg/SOLD2), CVPR 2021
59
+ - [ ] [LineTR](https://github.com/yosungho/LineTR), RA-L 2021
60
+ - [x] [DKM](https://github.com/Parskatt/DKM), CVPR 2023
61
+ - [ ] [NCMNet](https://github.com/xinliu29/NCMNet), CVPR 2023
62
+ - [x] [TopicFM](https://github.com/Vincentqyw/TopicFM), AAAI 2023
63
+ - [x] [AspanFormer](https://github.com/Vincentqyw/ml-aspanformer), ECCV 2022
64
+ - [x] [LANet](https://github.com/wangch-g/lanet), ACCV 2022
65
+ - [ ] [LISRD](https://github.com/rpautrat/LISRD), ECCV 2022
66
+ - [ ] [REKD](https://github.com/bluedream1121/REKD), CVPR 2022
67
+ - [x] [CoTR](https://github.com/ubc-vision/COTR), ICCV 2021
68
+ - [x] [ALIKE](https://github.com/Shiaoming/ALIKE), TMM 2022
69
+ - [x] [RoRD](https://github.com/UditSinghParihar/RoRD), IROS 2021
70
+ - [x] [SGMNet](https://github.com/vdvchen/SGMNet), ICCV 2021
71
+ - [x] [SuperPoint](https://github.com/magicleap/SuperPointPretrainedNetwork), CVPRW 2018
72
+ - [x] [SuperGlue](https://github.com/magicleap/SuperGluePretrainedNetwork), CVPR 2020
73
+ - [x] [D2Net](https://github.com/Vincentqyw/d2-net), CVPR 2019
74
+ - [x] [R2D2](https://github.com/naver/r2d2), NeurIPS 2019
75
+ - [x] [DISK](https://github.com/cvlab-epfl/disk), NeurIPS 2020
76
+ - [ ] [Key.Net](https://github.com/axelBarroso/Key.Net), ICCV 2019
77
+ - [ ] [OANet](https://github.com/zjhthu/OANet), ICCV 2019
78
+ - [x] [SOSNet](https://github.com/scape-research/SOSNet), CVPR 2019
79
+ - [x] [HardNet](https://github.com/DagnyT/hardnet), NeurIPS 2017
80
+ - [x] [SIFT](https://docs.opencv.org/4.x/da/df5/tutorial_py_sift_intro.html), IJCV 2004
 
 
 
 
 
81
 
82
  ## How to use
83
 
84
  ### HuggingFace / Lightning AI
85
 
86
+ Just try it on <a href='https://huggingface.co/spaces/Realcat/image-matching-webui'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
87
  <a target="_blank" href="https://lightning.ai/realcat/studios/image-matching-webui">
88
  <img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/studio-badge.svg" alt="Open In Studio"/>
89
  </a>
 
91
  or deploy it locally following the instructions below.
92
 
93
  ### Requirements
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  ``` bash
95
  git clone --recursive https://github.com/Vincentqyw/image-matching-webui.git
96
  cd image-matching-webui
97
  conda env create -f environment.yaml
98
  conda activate imw
 
99
  ```
100
 
101
  or using [docker](https://hub.docker.com/r/vincentqin/image-matching-webui):
 
104
  docker pull vincentqin/image-matching-webui:latest
105
  docker run -it -p 7860:7860 vincentqin/image-matching-webui:latest python app.py --server_name "0.0.0.0" --server_port=7860
106
  ```
107
+
 
 
 
 
 
 
 
 
108
  ### Run demo
109
  ``` bash
110
+ python3 ./app.py
111
  ```
112
  then open http://localhost:7860 in your browser.
113
 
 
115
 
116
  ### Add your own feature / matcher
117
 
118
+ I provide an example to add local feature in [hloc/extractors/example.py](hloc/extractors/example.py). Then add feature settings in `confs` in file [hloc/extract_features.py](hloc/extract_features.py). Last step is adding some settings to `model_zoo` in file [ui/config.yaml](ui/config.yaml).
 
 
 
 
 
119
 
120
  ## Contributions welcome!
121
 
122
+ External contributions are very much welcome. Please follow the [PEP8 style guidelines](https://www.python.org/dev/peps/pep-0008/) using a linter like flake8 (reformat using command `python -m black .`). This is a non-exhaustive list of features that might be valuable additions:
123
 
 
 
124
  - [x] add webcam support
125
  - [x] add [line feature matching](https://github.com/Vincentqyw/LineSegmentsDetection) algorithms
126
  - [x] example to add a new feature extractor / matcher
127
  - [x] ransac to filter outliers
128
+ - [ ] add [rotation images](https://github.com/pidahbus/deep-image-orientation-angle-detection) options before matching
129
  - [ ] support export matches to colmap ([#issue 6](https://github.com/Vincentqyw/image-matching-webui/issues/6))
130
+ - [ ] add config file to set default parameters
131
+ - [ ] dynamically load models and reduce GPU overload
 
 
 
 
 
 
132
 
133
+ Adding local features / matchers as submodules is very easy. For example, to add the [GlueStick](https://github.com/cvg/GlueStick):
134
 
135
  ``` bash
136
+ git submodule add https://github.com/cvg/GlueStick.git third_party/GlueStick
 
 
 
 
 
 
 
 
 
137
  ```
138
 
139
+ If remote submodule repositories are updated, don't forget to pull submodules with `git submodule update --remote`, if you only want to update one submodule, use `git submodule update --remote third_party/GlueStick`.
 
 
 
 
140
 
141
  ## Resources
142
  - [Image Matching: Local Features & Beyond](https://image-matching-workshop.github.io)
 
153
  [stars-shield]: https://img.shields.io/github/stars/Vincentqyw/image-matching-webui.svg?style=for-the-badge
154
  [stars-url]: https://github.com/Vincentqyw/image-matching-webui/stargazers
155
  [issues-shield]: https://img.shields.io/github/issues/Vincentqyw/image-matching-webui.svg?style=for-the-badge
156
+ [issues-url]: https://github.com/Vincentqyw/image-matching-webui/issues
{imcui β†’ api}/__init__.py RENAMED
File without changes
{imcui/api β†’ api}/client.py RENAMED
@@ -120,7 +120,9 @@ def send_request_match(path0: str, path1: str) -> Dict[str, np.ndarray]:
120
  for key in list(pred.keys()):
121
  pred[key] = np.array(pred[key])
122
  else:
123
- print(f"Error: Response code {response.status_code} - {response.text}")
 
 
124
  finally:
125
  files["image0"].close()
126
  files["image1"].close()
@@ -150,8 +152,7 @@ def send_request_extract(
150
  url=API_URL_EXTRACT,
151
  **inputs,
152
  )
153
- # breakpoint()
154
- # print("Keypoints detected: {}".format(len(response[0]["keypoints"])))
155
 
156
  # draw matching, debug only
157
  if viz:
@@ -181,8 +182,6 @@ def get_api_version():
181
 
182
 
183
  if __name__ == "__main__":
184
- from pathlib import Path
185
-
186
  parser = argparse.ArgumentParser(
187
  description="Send text to stable audio server and receive generated audio."
188
  )
@@ -190,19 +189,13 @@ if __name__ == "__main__":
190
  "--image0",
191
  required=False,
192
  help="Path for the file's melody",
193
- default=str(
194
- Path(__file__).parents[1]
195
- / "datasets/sacre_coeur/mapping_rot/02928139_3448003521_rot45.jpg"
196
- ),
197
  )
198
  parser.add_argument(
199
  "--image1",
200
  required=False,
201
  help="Path for the file's melody",
202
- default=str(
203
- Path(__file__).parents[1]
204
- / "datasets/sacre_coeur/mapping_rot/02928139_3448003521_rot90.jpg"
205
- ),
206
  )
207
  args = parser.parse_args()
208
 
@@ -221,7 +214,7 @@ if __name__ == "__main__":
221
  # )
222
 
223
  # request extract
224
- for i in range(1000):
225
  t1 = time.time()
226
  preds = send_request_extract(args.image0)
227
  t2 = time.time()
 
120
  for key in list(pred.keys()):
121
  pred[key] = np.array(pred[key])
122
  else:
123
+ print(
124
+ f"Error: Response code {response.status_code} - {response.text}"
125
+ )
126
  finally:
127
  files["image0"].close()
128
  files["image1"].close()
 
152
  url=API_URL_EXTRACT,
153
  **inputs,
154
  )
155
+ print("Keypoints detected: {}".format(len(response[0]["keypoints"])))
 
156
 
157
  # draw matching, debug only
158
  if viz:
 
182
 
183
 
184
  if __name__ == "__main__":
 
 
185
  parser = argparse.ArgumentParser(
186
  description="Send text to stable audio server and receive generated audio."
187
  )
 
189
  "--image0",
190
  required=False,
191
  help="Path for the file's melody",
192
+ default="datasets/sacre_coeur/mapping_rot/02928139_3448003521_rot45.jpg",
 
 
 
193
  )
194
  parser.add_argument(
195
  "--image1",
196
  required=False,
197
  help="Path for the file's melody",
198
+ default="datasets/sacre_coeur/mapping_rot/02928139_3448003521_rot90.jpg",
 
 
 
199
  )
200
  args = parser.parse_args()
201
 
 
214
  # )
215
 
216
  # request extract
217
+ for i in range(10):
218
  t1 = time.time()
219
  preds = send_request_extract(args.image0)
220
  t2 = time.time()
imcui/api/core.py β†’ api/server.py RENAMED
@@ -1,21 +1,50 @@
1
- # api.py
 
 
 
2
  import warnings
3
  from pathlib import Path
4
- from typing import Any, Dict, Optional
5
 
6
  import cv2
7
  import matplotlib.pyplot as plt
8
  import numpy as np
9
  import torch
 
 
 
 
 
10
 
11
- from ..hloc import extract_features, logger, match_dense, match_features
12
- from ..hloc.utils.viz import add_text, plot_keypoints
13
- from ..ui.utils import filter_matches, get_feature_model, get_model
14
- from ..ui.viz import display_matches, fig2im, plot_images
 
 
 
 
15
 
16
  warnings.simplefilter("ignore")
17
 
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  class ImageMatchingAPI(torch.nn.Module):
20
  default_conf = {
21
  "ransac": {
@@ -58,22 +87,32 @@ class ImageMatchingAPI(torch.nn.Module):
58
  if device == "cuda":
59
  memory_allocated = torch.cuda.memory_allocated(device)
60
  memory_reserved = torch.cuda.memory_reserved(device)
61
- logger.info(f"GPU memory allocated: {memory_allocated / 1024**2:.3f} MB")
62
- logger.info(f"GPU memory reserved: {memory_reserved / 1024**2:.3f} MB")
 
 
 
 
63
  self.pred = None
64
 
65
  def parse_match_config(self, conf):
66
  if conf["dense"]:
67
  return {
68
  **conf,
69
- "matcher": match_dense.confs.get(conf["matcher"]["model"]["name"]),
 
 
70
  "dense": True,
71
  }
72
  else:
73
  return {
74
  **conf,
75
- "feature": extract_features.confs.get(conf["feature"]["model"]["name"]),
76
- "matcher": match_features.confs.get(conf["matcher"]["model"]["name"]),
 
 
 
 
77
  "dense": False,
78
  }
79
 
@@ -86,12 +125,16 @@ class ImageMatchingAPI(torch.nn.Module):
86
  self.dense = self.conf["dense"]
87
  if self.conf["dense"]:
88
  try:
89
- self.conf["matcher"]["model"]["match_threshold"] = match_threshold
 
 
90
  except TypeError as e:
91
  logger.error(e)
92
  else:
93
  self.conf["feature"]["model"]["max_keypoints"] = max_keypoints
94
- self.conf["feature"]["model"]["keypoint_threshold"] = detect_threshold
 
 
95
  self.extract_conf = self.conf["feature"]
96
 
97
  self.match_conf = self.conf["matcher"]
@@ -127,17 +170,6 @@ class ImageMatchingAPI(torch.nn.Module):
127
  pred = match_features.match_images(self.matcher, pred0, pred1)
128
  return pred
129
 
130
- def _convert_pred(self, pred):
131
- ret = {
132
- k: v.cpu().detach()[0].numpy() if isinstance(v, torch.Tensor) else v
133
- for k, v in pred.items()
134
- }
135
- ret = {
136
- k: v[0].cpu().detach().numpy() if isinstance(v, list) else v
137
- for k, v in ret.items()
138
- }
139
- return ret
140
-
141
  @torch.inference_mode()
142
  def extract(self, img0: np.ndarray, **kwargs) -> Dict[str, np.ndarray]:
143
  """Extract features from a single image.
@@ -158,13 +190,17 @@ class ImageMatchingAPI(torch.nn.Module):
158
  pred = extract_features.extract(
159
  self.extractor, img0, self.extract_conf["preprocessing"]
160
  )
161
- pred = self._convert_pred(pred)
 
 
 
162
  # back to origin scale
163
  s0 = pred["original_size"] / pred["size"]
164
  pred["keypoints_orig"] = (
165
  match_features.scale_keypoints(pred["keypoints"] + 0.5, s0) - 0.5
166
  )
167
  # TODO: rotate back
 
168
  binarize = kwargs.get("binarize", False)
169
  if binarize:
170
  assert "descriptors" in pred
@@ -263,7 +299,10 @@ class ImageMatchingAPI(torch.nn.Module):
263
  output_keypoints: np.ndarray = plot_images(
264
  [image0, image1], titles=titles, dpi=300
265
  )
266
- if "keypoints0_orig" in pred.keys() and "keypoints1_orig" in pred.keys():
 
 
 
267
  plot_keypoints([pred["keypoints0_orig"], pred["keypoints1_orig"]])
268
  text: str = (
269
  f"# keypoints0: {len(pred['keypoints0_orig'])} \n"
@@ -289,7 +328,9 @@ class ImageMatchingAPI(torch.nn.Module):
289
  )
290
  if log_path is not None:
291
  img_keypoints_path: Path = log_path / f"img_keypoints_{postfix}.png"
292
- img_matches_raw_path: Path = log_path / f"img_matches_raw_{postfix}.png"
 
 
293
  img_matches_ransac_path: Path = (
294
  log_path / f"img_matches_ransac_{postfix}.png"
295
  )
@@ -306,3 +347,153 @@ class ImageMatchingAPI(torch.nn.Module):
306
  output_matches_ransac[:, :, ::-1].copy(), # RGB -> BGR
307
  )
308
  plt.close("all")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # server.py
2
+ import base64
3
+ import io
4
+ import sys
5
  import warnings
6
  from pathlib import Path
7
+ from typing import Any, Dict, Optional, Union
8
 
9
  import cv2
10
  import matplotlib.pyplot as plt
11
  import numpy as np
12
  import torch
13
+ import uvicorn
14
+ from fastapi import FastAPI, File, UploadFile
15
+ from fastapi.exceptions import HTTPException
16
+ from fastapi.responses import JSONResponse
17
+ from PIL import Image
18
 
19
+ sys.path.append(str(Path(__file__).parents[1]))
20
+
21
+ from api.types import ImagesInput
22
+ from hloc import DEVICE, extract_features, logger, match_dense, match_features
23
+ from hloc.utils.viz import add_text, plot_keypoints
24
+ from ui import get_version
25
+ from ui.utils import filter_matches, get_feature_model, get_model
26
+ from ui.viz import display_matches, fig2im, plot_images
27
 
28
  warnings.simplefilter("ignore")
29
 
30
 
31
+ def decode_base64_to_image(encoding):
32
+ if encoding.startswith("data:image/"):
33
+ encoding = encoding.split(";")[1].split(",")[1]
34
+ try:
35
+ image = Image.open(io.BytesIO(base64.b64decode(encoding)))
36
+ return image
37
+ except Exception as e:
38
+ logger.warning(f"API cannot decode image: {e}")
39
+ raise HTTPException(
40
+ status_code=500, detail="Invalid encoded image"
41
+ ) from e
42
+
43
+
44
+ def to_base64_nparray(encoding: str) -> np.ndarray:
45
+ return np.array(decode_base64_to_image(encoding)).astype("uint8")
46
+
47
+
48
  class ImageMatchingAPI(torch.nn.Module):
49
  default_conf = {
50
  "ransac": {
 
87
  if device == "cuda":
88
  memory_allocated = torch.cuda.memory_allocated(device)
89
  memory_reserved = torch.cuda.memory_reserved(device)
90
+ logger.info(
91
+ f"GPU memory allocated: {memory_allocated / 1024**2:.3f} MB"
92
+ )
93
+ logger.info(
94
+ f"GPU memory reserved: {memory_reserved / 1024**2:.3f} MB"
95
+ )
96
  self.pred = None
97
 
98
  def parse_match_config(self, conf):
99
  if conf["dense"]:
100
  return {
101
  **conf,
102
+ "matcher": match_dense.confs.get(
103
+ conf["matcher"]["model"]["name"]
104
+ ),
105
  "dense": True,
106
  }
107
  else:
108
  return {
109
  **conf,
110
+ "feature": extract_features.confs.get(
111
+ conf["feature"]["model"]["name"]
112
+ ),
113
+ "matcher": match_features.confs.get(
114
+ conf["matcher"]["model"]["name"]
115
+ ),
116
  "dense": False,
117
  }
118
 
 
125
  self.dense = self.conf["dense"]
126
  if self.conf["dense"]:
127
  try:
128
+ self.conf["matcher"]["model"][
129
+ "match_threshold"
130
+ ] = match_threshold
131
  except TypeError as e:
132
  logger.error(e)
133
  else:
134
  self.conf["feature"]["model"]["max_keypoints"] = max_keypoints
135
+ self.conf["feature"]["model"][
136
+ "keypoint_threshold"
137
+ ] = detect_threshold
138
  self.extract_conf = self.conf["feature"]
139
 
140
  self.match_conf = self.conf["matcher"]
 
170
  pred = match_features.match_images(self.matcher, pred0, pred1)
171
  return pred
172
 
 
 
 
 
 
 
 
 
 
 
 
173
  @torch.inference_mode()
174
  def extract(self, img0: np.ndarray, **kwargs) -> Dict[str, np.ndarray]:
175
  """Extract features from a single image.
 
190
  pred = extract_features.extract(
191
  self.extractor, img0, self.extract_conf["preprocessing"]
192
  )
193
+ pred = {
194
+ k: v.cpu().detach()[0].numpy() if isinstance(v, torch.Tensor) else v
195
+ for k, v in pred.items()
196
+ }
197
  # back to origin scale
198
  s0 = pred["original_size"] / pred["size"]
199
  pred["keypoints_orig"] = (
200
  match_features.scale_keypoints(pred["keypoints"] + 0.5, s0) - 0.5
201
  )
202
  # TODO: rotate back
203
+
204
  binarize = kwargs.get("binarize", False)
205
  if binarize:
206
  assert "descriptors" in pred
 
299
  output_keypoints: np.ndarray = plot_images(
300
  [image0, image1], titles=titles, dpi=300
301
  )
302
+ if (
303
+ "keypoints0_orig" in pred.keys()
304
+ and "keypoints1_orig" in pred.keys()
305
+ ):
306
  plot_keypoints([pred["keypoints0_orig"], pred["keypoints1_orig"]])
307
  text: str = (
308
  f"# keypoints0: {len(pred['keypoints0_orig'])} \n"
 
328
  )
329
  if log_path is not None:
330
  img_keypoints_path: Path = log_path / f"img_keypoints_{postfix}.png"
331
+ img_matches_raw_path: Path = (
332
+ log_path / f"img_matches_raw_{postfix}.png"
333
+ )
334
  img_matches_ransac_path: Path = (
335
  log_path / f"img_matches_ransac_{postfix}.png"
336
  )
 
347
  output_matches_ransac[:, :, ::-1].copy(), # RGB -> BGR
348
  )
349
  plt.close("all")
350
+
351
+
352
+ class ImageMatchingService:
353
+ def __init__(self, conf: dict, device: str):
354
+ self.conf = conf
355
+ self.api = ImageMatchingAPI(conf=conf, device=device)
356
+ self.app = FastAPI()
357
+ self.register_routes()
358
+
359
+ def register_routes(self):
360
+
361
+ @self.app.get("/version")
362
+ async def version():
363
+ return {"version": get_version()}
364
+
365
+ @self.app.post("/v1/match")
366
+ async def match(
367
+ image0: UploadFile = File(...), image1: UploadFile = File(...)
368
+ ):
369
+ """
370
+ Handle the image matching request and return the processed result.
371
+
372
+ Args:
373
+ image0 (UploadFile): The first image file for matching.
374
+ image1 (UploadFile): The second image file for matching.
375
+
376
+ Returns:
377
+ JSONResponse: A JSON response containing the filtered match results
378
+ or an error message in case of failure.
379
+ """
380
+ try:
381
+ # Load the images from the uploaded files
382
+ image0_array = self.load_image(image0)
383
+ image1_array = self.load_image(image1)
384
+
385
+ # Perform image matching using the API
386
+ output = self.api(image0_array, image1_array)
387
+
388
+ # Keys to skip in the output
389
+ skip_keys = ["image0_orig", "image1_orig"]
390
+
391
+ # Postprocess the output to filter unwanted data
392
+ pred = self.postprocess(output, skip_keys)
393
+
394
+ # Return the filtered prediction as a JSON response
395
+ return JSONResponse(content=pred)
396
+ except Exception as e:
397
+ # Return an error message with status code 500 in case of exception
398
+ return JSONResponse(content={"error": str(e)}, status_code=500)
399
+
400
+ @self.app.post("/v1/extract")
401
+ async def extract(input_info: ImagesInput):
402
+ """
403
+ Extract keypoints and descriptors from images.
404
+
405
+ Args:
406
+ input_info: An object containing the image data and options.
407
+
408
+ Returns:
409
+ A list of dictionaries containing the keypoints and descriptors.
410
+ """
411
+ try:
412
+ preds = []
413
+ for i, input_image in enumerate(input_info.data):
414
+ # Load the image from the input data
415
+ image_array = to_base64_nparray(input_image)
416
+ # Extract keypoints and descriptors
417
+ output = self.api.extract(
418
+ image_array,
419
+ max_keypoints=input_info.max_keypoints[i],
420
+ binarize=input_info.binarize,
421
+ )
422
+ # Do not return the original image and image_orig
423
+ # skip_keys = ["image", "image_orig"]
424
+ skip_keys = []
425
+
426
+ # Postprocess the output
427
+ pred = self.postprocess(output, skip_keys)
428
+ preds.append(pred)
429
+ # Return the list of extracted features
430
+ return JSONResponse(content=preds)
431
+ except Exception as e:
432
+ # Return an error message if an exception occurs
433
+ return JSONResponse(content={"error": str(e)}, status_code=500)
434
+
435
+ def load_image(self, file_path: Union[str, UploadFile]) -> np.ndarray:
436
+ """
437
+ Reads an image from a file path or an UploadFile object.
438
+
439
+ Args:
440
+ file_path: A file path or an UploadFile object.
441
+
442
+ Returns:
443
+ A numpy array representing the image.
444
+ """
445
+ if isinstance(file_path, str):
446
+ file_path = Path(file_path).resolve(strict=False)
447
+ else:
448
+ file_path = file_path.file
449
+ with Image.open(file_path) as img:
450
+ image_array = np.array(img)
451
+ return image_array
452
+
453
+ def postprocess(
454
+ self, output: dict, skip_keys: list, binarize: bool = True
455
+ ) -> dict:
456
+ pred = {}
457
+ for key, value in output.items():
458
+ if key in skip_keys:
459
+ continue
460
+ if isinstance(value, np.ndarray):
461
+ pred[key] = value.tolist()
462
+ return pred
463
+
464
+ def run(self, host: str = "0.0.0.0", port: int = 8001):
465
+ uvicorn.run(self.app, host=host, port=port)
466
+
467
+
468
+ if __name__ == "__main__":
469
+ conf = {
470
+ "feature": {
471
+ "output": "feats-superpoint-n4096-rmax1600",
472
+ "model": {
473
+ "name": "superpoint",
474
+ "nms_radius": 3,
475
+ "max_keypoints": 4096,
476
+ "keypoint_threshold": 0.005,
477
+ },
478
+ "preprocessing": {
479
+ "grayscale": True,
480
+ "force_resize": True,
481
+ "resize_max": 1600,
482
+ "width": 640,
483
+ "height": 480,
484
+ "dfactor": 8,
485
+ },
486
+ },
487
+ "matcher": {
488
+ "output": "matches-NN-mutual",
489
+ "model": {
490
+ "name": "nearest_neighbor",
491
+ "do_mutual_check": True,
492
+ "match_threshold": 0.2,
493
+ },
494
+ },
495
+ "dense": False,
496
+ }
497
+
498
+ service = ImageMatchingService(conf=conf, device=DEVICE)
499
+ service.run()
{imcui/api β†’ api}/test/CMakeLists.txt RENAMED
@@ -6,12 +6,11 @@ find_package(OpenCV REQUIRED)
6
 
7
  find_package(Boost REQUIRED COMPONENTS system)
8
  if(Boost_FOUND)
9
- include_directories(${Boost_INCLUDE_DIRS})
10
  endif()
11
 
12
  add_executable(client client.cpp)
13
 
14
- target_include_directories(client PRIVATE ${Boost_LIBRARIES}
15
- ${OpenCV_INCLUDE_DIRS})
16
 
17
  target_link_libraries(client PRIVATE curl jsoncpp b64 ${OpenCV_LIBS})
 
6
 
7
  find_package(Boost REQUIRED COMPONENTS system)
8
  if(Boost_FOUND)
9
+ include_directories(${Boost_INCLUDE_DIRS})
10
  endif()
11
 
12
  add_executable(client client.cpp)
13
 
14
+ target_include_directories(client PRIVATE ${Boost_LIBRARIES} ${OpenCV_INCLUDE_DIRS})
 
15
 
16
  target_link_libraries(client PRIVATE curl jsoncpp b64 ${OpenCV_LIBS})
{imcui/api β†’ api}/test/build_and_run.sh RENAMED
File without changes
{imcui/api β†’ api}/test/client.cpp RENAMED
@@ -3,8 +3,7 @@
3
  #include "helper.h"
4
 
5
  int main() {
6
- std::string img_path =
7
- "../../../datasets/sacre_coeur/mapping_rot/02928139_3448003521_rot45.jpg";
8
  cv::Mat original_img = cv::imread(img_path, cv::IMREAD_GRAYSCALE);
9
 
10
  if (original_img.empty()) {
@@ -28,19 +27,23 @@ int main() {
28
  }
29
 
30
  // construct params
31
- APIParams params{.data = {base64_img},
32
- .max_keypoints = {100, 100},
33
- .timestamps = {"0", "1"},
34
- .grayscale = {0},
35
- .image_hw = {{480, 640}, {240, 320}},
36
- .feature_type = 0,
37
- .rotates = {0.0f, 0.0f},
38
- .scales = {1.0f, 1.0f},
39
- .reference_points = {{1.23e+2f, 1.2e+1f},
40
- {5.0e-1f, 3.0e-1f},
41
- {2.3e+2f, 2.2e+1f},
42
- {6.0e-1f, 4.0e-1f}},
43
- .binarize = {1}};
 
 
 
 
44
 
45
  KeyPointResults kpts_results;
46
 
@@ -67,11 +70,11 @@ int main() {
67
  res = curl_easy_perform(curl);
68
 
69
  if (res != CURLE_OK)
70
- fprintf(
71
- stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
72
  else {
73
  // std::cout << "Response from server: " << readBuffer << std::endl;
74
- kpts_results = decode_response(readBuffer);
75
  }
76
  curl_easy_cleanup(curl);
77
  }
 
3
  #include "helper.h"
4
 
5
  int main() {
6
+ std::string img_path = "../../../datasets/sacre_coeur/mapping_rot/02928139_3448003521_rot45.jpg";
 
7
  cv::Mat original_img = cv::imread(img_path, cv::IMREAD_GRAYSCALE);
8
 
9
  if (original_img.empty()) {
 
27
  }
28
 
29
  // construct params
30
+ APIParams params{
31
+ .data = {base64_img},
32
+ .max_keypoints = {100, 100},
33
+ .timestamps = {"0", "1"},
34
+ .grayscale = {0},
35
+ .image_hw = {{480, 640}, {240, 320}},
36
+ .feature_type = 0,
37
+ .rotates = {0.0f, 0.0f},
38
+ .scales = {1.0f, 1.0f},
39
+ .reference_points = {
40
+ {1.23e+2f, 1.2e+1f},
41
+ {5.0e-1f, 3.0e-1f},
42
+ {2.3e+2f, 2.2e+1f},
43
+ {6.0e-1f, 4.0e-1f}
44
+ },
45
+ .binarize = {1}
46
+ };
47
 
48
  KeyPointResults kpts_results;
49
 
 
70
  res = curl_easy_perform(curl);
71
 
72
  if (res != CURLE_OK)
73
+ fprintf(stderr, "curl_easy_perform() failed: %s\n",
74
+ curl_easy_strerror(res));
75
  else {
76
  // std::cout << "Response from server: " << readBuffer << std::endl;
77
+ kpts_results = decode_response(readBuffer);
78
  }
79
  curl_easy_cleanup(curl);
80
  }
{imcui/api β†’ api}/test/helper.h RENAMED
@@ -1,15 +1,15 @@
1
 
2
- #include <b64/encode.h>
3
  #include <fstream>
 
 
4
  #include <jsoncpp/json/json.h>
5
  #include <opencv2/opencv.hpp>
6
- #include <sstream>
7
- #include <vector>
8
 
9
  // base64 to image
10
- #include <boost/archive/iterators/base64_from_binary.hpp>
11
  #include <boost/archive/iterators/binary_from_base64.hpp>
12
  #include <boost/archive/iterators/transform_width.hpp>
 
13
 
14
  /// Parameters used in the API
15
  struct APIParams {
@@ -50,19 +50,17 @@ struct APIParams {
50
  * @details Stores the keypoints and descriptors for each image.
51
  */
52
  class KeyPointResults {
53
- public:
54
- KeyPointResults() {
55
- }
56
 
57
  /**
58
  * @brief Constructor.
59
  *
60
  * @param kp The keypoints for each image.
61
  */
62
- KeyPointResults(const std::vector<std::vector<cv::KeyPoint>>& kp,
63
  const std::vector<cv::Mat>& desc)
64
- : keypoints(kp), descriptors(desc) {
65
- }
66
 
67
  /**
68
  * @brief Append keypoints to the result.
@@ -100,12 +98,13 @@ class KeyPointResults {
100
  return descriptors;
101
  }
102
 
103
- private:
104
  std::vector<std::vector<cv::KeyPoint>> keypoints;
105
  std::vector<cv::Mat> descriptors;
106
  std::vector<std::vector<float>> scores;
107
  };
108
 
 
109
  /**
110
  * @brief Decodes a base64 encoded string.
111
  *
@@ -127,6 +126,8 @@ std::string base64_decode(const std::string& base64) {
127
  return std::string(It(base64.begin()), It(base64.begin() + end));
128
  }
129
 
 
 
130
  /**
131
  * @brief Decodes a base64 string into an OpenCV image
132
  *
@@ -149,6 +150,7 @@ cv::Mat base64_to_image(const std::string& base64) {
149
  return img;
150
  }
151
 
 
152
  /**
153
  * @brief Encodes an OpenCV image into a base64 string
154
  *
@@ -161,7 +163,7 @@ cv::Mat base64_to_image(const std::string& base64) {
161
  *
162
  * @throws std::runtime_error if the image is empty or encoding fails
163
  */
164
- std::string image_to_base64(cv::Mat& img) {
165
  if (img.empty()) {
166
  throw std::runtime_error("Failed to read image");
167
  }
@@ -174,8 +176,7 @@ std::string image_to_base64(cv::Mat& img) {
174
 
175
  // Encode the bytes as a base64 string
176
  using namespace boost::archive::iterators;
177
- using It =
178
- base64_from_binary<transform_width<std::vector<uchar>::const_iterator, 6, 8>>;
179
  std::string base64(It(buf.begin()), It(buf.end()));
180
 
181
  // Pad the string with '=' characters to a multiple of 4 bytes
@@ -184,6 +185,7 @@ std::string image_to_base64(cv::Mat& img) {
184
  return base64;
185
  }
186
 
 
187
  /**
188
  * @brief Callback function for libcurl to write data to a string
189
  *
@@ -225,7 +227,8 @@ size_t WriteCallback(void* contents, size_t size, size_t nmemb, std::string* s)
225
  * @param val The value to convert
226
  * @return The converted Json::Value
227
  */
228
- template <typename T> Json::Value toJson(const T& val) {
 
229
  return Json::Value(val);
230
  }
231
 
@@ -238,7 +241,8 @@ template <typename T> Json::Value toJson(const T& val) {
238
  * @param vec The vector to convert to Json::Value
239
  * @return The Json::Value representing the vector
240
  */
241
- template <typename T> Json::Value vectorToJson(const std::vector<T>& vec) {
 
242
  Json::Value json(Json::arrayValue);
243
  for (const auto& item : vec) {
244
  json.append(item);
@@ -249,9 +253,8 @@ template <typename T> Json::Value vectorToJson(const std::vector<T>& vec) {
249
  /**
250
  * @brief Converts a nested vector to a Json::Value
251
  *
252
- * This function takes a nested vector of type T and converts it to a
253
- * Json::Value. Each sub-vector is converted to a Json::Value array and appended
254
- * to the main Json::Value array.
255
  *
256
  * @param vec The nested vector to convert to Json::Value
257
  * @return The Json::Value representing the nested vector
@@ -265,23 +268,22 @@ Json::Value nestedVectorToJson(const std::vector<std::vector<T>>& vec) {
265
  return json;
266
  }
267
 
 
 
268
  /**
269
  * @brief Converts the APIParams struct to a Json::Value
270
  *
271
  * This function takes an APIParams struct and converts it to a Json::Value.
272
  * The Json::Value is a JSON object with the following fields:
273
  * - data: a JSON array of base64 encoded images
274
- * - max_keypoints: a JSON array of integers, max number of keypoints for each
275
- * image
276
  * - timestamps: a JSON array of timestamps, one for each image
277
  * - grayscale: a JSON boolean, whether to convert images to grayscale
278
- * - image_hw: a nested JSON array, each sub-array contains the height and width
279
- * of an image
280
  * - feature_type: a JSON integer, the type of feature detector to use
281
  * - rotates: a JSON array of doubles, the rotation of each image
282
  * - scales: a JSON array of doubles, the scale of each image
283
- * - reference_points: a nested JSON array, each sub-array contains the
284
- * reference points of an image
285
  * - binarize: a JSON boolean, whether to binarize the descriptors
286
  *
287
  * @param params The APIParams struct to convert
@@ -302,7 +304,8 @@ Json::Value paramsToJson(const APIParams& params) {
302
  return json;
303
  }
304
 
305
- template <typename T> cv::Mat jsonToMat(Json::Value json) {
 
306
  int rows = json.size();
307
  int cols = json[0].size();
308
 
@@ -317,14 +320,14 @@ template <typename T> cv::Mat jsonToMat(Json::Value json) {
317
  }
318
 
319
  // Create a cv::Mat object that points to the data.
320
- cv::Mat mat(rows, cols, CV_8UC1,
321
- data.data()); // Change the type if necessary.
322
- // cv::Mat mat(cols, rows,CV_8UC1, data.data()); // Change the type if
323
- // necessary.
324
 
325
  return mat;
326
  }
327
 
 
 
328
  /**
329
  * @brief Decodes the response of the server and prints the keypoints
330
  *
@@ -334,7 +337,7 @@ template <typename T> cv::Mat jsonToMat(Json::Value json) {
334
  * @param response The response of the server
335
  * @return The keypoints and descriptors
336
  */
337
- KeyPointResults decode_response(const std::string& response, bool viz = true) {
338
  Json::CharReaderBuilder builder;
339
  Json::CharReader* reader = builder.newCharReader();
340
 
@@ -342,8 +345,8 @@ KeyPointResults decode_response(const std::string& response, bool viz = true) {
342
  std::string errors;
343
 
344
  // Parse the JSON response
345
- bool parsingSuccessful = reader->parse(
346
- response.c_str(), response.c_str() + response.size(), &jsonData, &errors);
347
  delete reader;
348
 
349
  if (!parsingSuccessful) {
@@ -373,23 +376,25 @@ KeyPointResults decode_response(const std::string& response, bool viz = true) {
373
  for (const auto& keypoint : jkeypoints_orig) {
374
  if (counter < 10) {
375
  // Print the first 10 keypoints
376
- std::cout << keypoint[0].asFloat() << ", " << keypoint[1].asFloat()
377
- << std::endl;
378
  }
379
  counter++;
380
  // Convert the Json::Value to a cv::KeyPoint
381
- vkeypoints.emplace_back(
382
- cv::KeyPoint(keypoint[0].asFloat(), keypoint[1].asFloat(), 0.0));
383
  }
384
 
385
  if (viz && jsonItem.isMember("image_orig")) {
 
386
  auto jimg_orig = jsonItem["image_orig"];
387
  cv::Mat img = jsonToMat<uchar>(jimg_orig);
388
  cv::imwrite("viz_image_orig.jpg", img);
389
 
390
  // Draw keypoints on the image
391
  cv::Mat imgWithKeypoints;
392
- cv::drawKeypoints(img, vkeypoints, imgWithKeypoints, cv::Scalar(0, 0, 255));
 
393
 
394
  // Write the image with keypoints
395
  std::string filename = "viz_image_orig_keypoints.jpg";
@@ -397,7 +402,7 @@ KeyPointResults decode_response(const std::string& response, bool viz = true) {
397
  }
398
 
399
  // Iterate over the descriptors
400
- cv::Mat descriptors = jsonToMat<uchar>(jdescriptors);
401
  kpts_results.append_keypoints(vkeypoints);
402
  kpts_results.append_descriptors(descriptors);
403
  }
 
1
 
2
+ #include <sstream>
3
  #include <fstream>
4
+ #include <vector>
5
+ #include <b64/encode.h>
6
  #include <jsoncpp/json/json.h>
7
  #include <opencv2/opencv.hpp>
 
 
8
 
9
  // base64 to image
 
10
  #include <boost/archive/iterators/binary_from_base64.hpp>
11
  #include <boost/archive/iterators/transform_width.hpp>
12
+ #include <boost/archive/iterators/base64_from_binary.hpp>
13
 
14
  /// Parameters used in the API
15
  struct APIParams {
 
50
  * @details Stores the keypoints and descriptors for each image.
51
  */
52
  class KeyPointResults {
53
+ public:
54
+ KeyPointResults() {}
 
55
 
56
  /**
57
  * @brief Constructor.
58
  *
59
  * @param kp The keypoints for each image.
60
  */
61
+ KeyPointResults(const std::vector<std::vector<cv::KeyPoint>>& kp,
62
  const std::vector<cv::Mat>& desc)
63
+ : keypoints(kp), descriptors(desc) {}
 
64
 
65
  /**
66
  * @brief Append keypoints to the result.
 
98
  return descriptors;
99
  }
100
 
101
+ private:
102
  std::vector<std::vector<cv::KeyPoint>> keypoints;
103
  std::vector<cv::Mat> descriptors;
104
  std::vector<std::vector<float>> scores;
105
  };
106
 
107
+
108
  /**
109
  * @brief Decodes a base64 encoded string.
110
  *
 
126
  return std::string(It(base64.begin()), It(base64.begin() + end));
127
  }
128
 
129
+
130
+
131
  /**
132
  * @brief Decodes a base64 string into an OpenCV image
133
  *
 
150
  return img;
151
  }
152
 
153
+
154
  /**
155
  * @brief Encodes an OpenCV image into a base64 string
156
  *
 
163
  *
164
  * @throws std::runtime_error if the image is empty or encoding fails
165
  */
166
+ std::string image_to_base64(cv::Mat &img) {
167
  if (img.empty()) {
168
  throw std::runtime_error("Failed to read image");
169
  }
 
176
 
177
  // Encode the bytes as a base64 string
178
  using namespace boost::archive::iterators;
179
+ using It = base64_from_binary<transform_width<std::vector<uchar>::const_iterator, 6, 8>>;
 
180
  std::string base64(It(buf.begin()), It(buf.end()));
181
 
182
  // Pad the string with '=' characters to a multiple of 4 bytes
 
185
  return base64;
186
  }
187
 
188
+
189
  /**
190
  * @brief Callback function for libcurl to write data to a string
191
  *
 
227
  * @param val The value to convert
228
  * @return The converted Json::Value
229
  */
230
+ template <typename T>
231
+ Json::Value toJson(const T& val) {
232
  return Json::Value(val);
233
  }
234
 
 
241
  * @param vec The vector to convert to Json::Value
242
  * @return The Json::Value representing the vector
243
  */
244
+ template <typename T>
245
+ Json::Value vectorToJson(const std::vector<T>& vec) {
246
  Json::Value json(Json::arrayValue);
247
  for (const auto& item : vec) {
248
  json.append(item);
 
253
  /**
254
  * @brief Converts a nested vector to a Json::Value
255
  *
256
+ * This function takes a nested vector of type T and converts it to a Json::Value.
257
+ * Each sub-vector is converted to a Json::Value array and appended to the main Json::Value array.
 
258
  *
259
  * @param vec The nested vector to convert to Json::Value
260
  * @return The Json::Value representing the nested vector
 
268
  return json;
269
  }
270
 
271
+
272
+
273
  /**
274
  * @brief Converts the APIParams struct to a Json::Value
275
  *
276
  * This function takes an APIParams struct and converts it to a Json::Value.
277
  * The Json::Value is a JSON object with the following fields:
278
  * - data: a JSON array of base64 encoded images
279
+ * - max_keypoints: a JSON array of integers, max number of keypoints for each image
 
280
  * - timestamps: a JSON array of timestamps, one for each image
281
  * - grayscale: a JSON boolean, whether to convert images to grayscale
282
+ * - image_hw: a nested JSON array, each sub-array contains the height and width of an image
 
283
  * - feature_type: a JSON integer, the type of feature detector to use
284
  * - rotates: a JSON array of doubles, the rotation of each image
285
  * - scales: a JSON array of doubles, the scale of each image
286
+ * - reference_points: a nested JSON array, each sub-array contains the reference points of an image
 
287
  * - binarize: a JSON boolean, whether to binarize the descriptors
288
  *
289
  * @param params The APIParams struct to convert
 
304
  return json;
305
  }
306
 
307
+ template<typename T>
308
+ cv::Mat jsonToMat(Json::Value json) {
309
  int rows = json.size();
310
  int cols = json[0].size();
311
 
 
320
  }
321
 
322
  // Create a cv::Mat object that points to the data.
323
+ cv::Mat mat(rows, cols, CV_8UC1, data.data()); // Change the type if necessary.
324
+ // cv::Mat mat(cols, rows,CV_8UC1, data.data()); // Change the type if necessary.
 
 
325
 
326
  return mat;
327
  }
328
 
329
+
330
+
331
  /**
332
  * @brief Decodes the response of the server and prints the keypoints
333
  *
 
337
  * @param response The response of the server
338
  * @return The keypoints and descriptors
339
  */
340
+ KeyPointResults decode_response(const std::string& response, bool viz=true) {
341
  Json::CharReaderBuilder builder;
342
  Json::CharReader* reader = builder.newCharReader();
343
 
 
345
  std::string errors;
346
 
347
  // Parse the JSON response
348
+ bool parsingSuccessful = reader->parse(response.c_str(),
349
+ response.c_str() + response.size(), &jsonData, &errors);
350
  delete reader;
351
 
352
  if (!parsingSuccessful) {
 
376
  for (const auto& keypoint : jkeypoints_orig) {
377
  if (counter < 10) {
378
  // Print the first 10 keypoints
379
+ std::cout << keypoint[0].asFloat() << ", "
380
+ << keypoint[1].asFloat() << std::endl;
381
  }
382
  counter++;
383
  // Convert the Json::Value to a cv::KeyPoint
384
+ vkeypoints.emplace_back(cv::KeyPoint(keypoint[0].asFloat(),
385
+ keypoint[1].asFloat(), 0.0));
386
  }
387
 
388
  if (viz && jsonItem.isMember("image_orig")) {
389
+
390
  auto jimg_orig = jsonItem["image_orig"];
391
  cv::Mat img = jsonToMat<uchar>(jimg_orig);
392
  cv::imwrite("viz_image_orig.jpg", img);
393
 
394
  // Draw keypoints on the image
395
  cv::Mat imgWithKeypoints;
396
+ cv::drawKeypoints(img, vkeypoints,
397
+ imgWithKeypoints, cv::Scalar(0, 0, 255));
398
 
399
  // Write the image with keypoints
400
  std::string filename = "viz_image_orig_keypoints.jpg";
 
402
  }
403
 
404
  // Iterate over the descriptors
405
+ cv::Mat descriptors = jsonToMat<uchar>(jdescriptors);
406
  kpts_results.append_keypoints(vkeypoints);
407
  kpts_results.append_descriptors(descriptors);
408
  }
imcui/api/__init__.py β†’ api/types.py RENAMED
@@ -1,15 +1,7 @@
1
- import base64
2
- import io
3
  from typing import List
4
 
5
- import numpy as np
6
- from fastapi.exceptions import HTTPException
7
- from PIL import Image
8
  from pydantic import BaseModel
9
 
10
- from ..hloc import logger
11
- from .core import ImageMatchingAPI
12
-
13
 
14
  class ImagesInput(BaseModel):
15
  data: List[str] = []
@@ -22,26 +14,3 @@ class ImagesInput(BaseModel):
22
  scales: List[float] = []
23
  reference_points: List[List[float]] = []
24
  binarize: bool = False
25
-
26
-
27
- def decode_base64_to_image(encoding):
28
- if encoding.startswith("data:image/"):
29
- encoding = encoding.split(";")[1].split(",")[1]
30
- try:
31
- image = Image.open(io.BytesIO(base64.b64decode(encoding)))
32
- return image
33
- except Exception as e:
34
- logger.warning(f"API cannot decode image: {e}")
35
- raise HTTPException(status_code=500, detail="Invalid encoded image") from e
36
-
37
-
38
- def to_base64_nparray(encoding: str) -> np.ndarray:
39
- return np.array(decode_base64_to_image(encoding)).astype("uint8")
40
-
41
-
42
- __all__ = [
43
- "ImageMatchingAPI",
44
- "ImagesInput",
45
- "decode_base64_to_image",
46
- "to_base64_nparray",
47
- ]
 
 
 
1
  from typing import List
2
 
 
 
 
3
  from pydantic import BaseModel
4
 
 
 
 
5
 
6
  class ImagesInput(BaseModel):
7
  data: List[str] = []
 
14
  scales: List[float] = []
15
  reference_points: List[List[float]] = []
16
  binarize: bool = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import argparse
2
  from pathlib import Path
3
- from imcui.ui.app_class import ImageMatchingApp
4
 
5
  if __name__ == "__main__":
6
  parser = argparse.ArgumentParser()
@@ -19,13 +19,10 @@ if __name__ == "__main__":
19
  parser.add_argument(
20
  "--config",
21
  type=str,
22
- default=Path(__file__).parent / "config/config.yaml",
23
  help="config file",
24
  )
25
  args = parser.parse_args()
26
  ImageMatchingApp(
27
- args.server_name,
28
- args.server_port,
29
- config=args.config,
30
- example_data_root=Path("imcui/datasets"),
31
  ).run()
 
1
  import argparse
2
  from pathlib import Path
3
+ from ui.app_class import ImageMatchingApp
4
 
5
  if __name__ == "__main__":
6
  parser = argparse.ArgumentParser()
 
19
  parser.add_argument(
20
  "--config",
21
  type=str,
22
+ default=Path(__file__).parent / "ui/config.yaml",
23
  help="config file",
24
  )
25
  args = parser.parse_args()
26
  ImageMatchingApp(
27
+ args.server_name, args.server_port, config=args.config
 
 
 
28
  ).run()
build_docker.sh CHANGED
@@ -1,3 +1,3 @@
1
  docker build -t image-matching-webui:latest . --no-cache
2
  docker tag image-matching-webui:latest vincentqin/image-matching-webui:latest
3
- docker push vincentqin/image-matching-webui:latest
 
1
  docker build -t image-matching-webui:latest . --no-cache
2
  docker tag image-matching-webui:latest vincentqin/image-matching-webui:latest
3
+ docker push vincentqin/image-matching-webui:latest
{imcui/datasets β†’ datasets}/.gitignore RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/README.md RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/02928139_3448003521.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/03903474_1471484089.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/10265353_3838484249.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/17295357_9106075285.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/32809961_8274055477.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/44120379_8371960244.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/51091044_3486849416.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/60584745_2207571072.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/71295362_4051449754.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping/93341989_396310999.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot135.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot180.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot225.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot270.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot315.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot45.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/02928139_3448003521_rot90.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot135.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot180.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot225.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot270.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot315.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot45.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/03903474_1471484089_rot90.jpg RENAMED
File without changes
{imcui/datasets β†’ datasets}/sacre_coeur/mapping_rot/10265353_3838484249_rot135.jpg RENAMED
File without changes