language
stringlengths 0
24
| filename
stringlengths 9
214
| code
stringlengths 99
9.93M
|
---|---|---|
Ciphey/.all-contributorsrc | {
"files": [
"README.md"
],
"imageSize": 100,
"commit": false,
"contributors": [
{
"login": "cyclic3",
"name": "cyclic3",
"avatar_url": "https://avatars1.githubusercontent.com/u/15613874?v=4",
"profile": "https://github.com/Cyclic3",
"contributions": [
"design",
"maintenance",
"code",
"ideas"
]
},
{
"login": "brandonskerritt",
"name": "Brandon",
"avatar_url": "https://avatars3.githubusercontent.com/u/10378052?v=4",
"profile": "https://skerritt.blog",
"contributions": [
"design",
"maintenance",
"code",
"ideas"
]
},
{
"login": "michalani",
"name": "michalani",
"avatar_url": "https://avatars0.githubusercontent.com/u/27767884?v=4",
"profile": "https://github.com/michalani",
"contributions": [
"code"
]
},
{
"login": "ashb07",
"name": "ashb07",
"avatar_url": "https://avatars2.githubusercontent.com/u/24845568?v=4",
"profile": "https://github.com/ashb07",
"contributions": [
"code"
]
},
{
"login": "TheAlcanian",
"name": "Shardion",
"avatar_url": "https://avatars3.githubusercontent.com/u/22127191?v=4",
"profile": "https://github.com/TheAlcanian",
"contributions": [
"bug"
]
},
{
"login": "Bryzizzle",
"name": "Bryan",
"avatar_url": "https://avatars0.githubusercontent.com/u/57810197?v=4",
"profile": "https://github.com/Bryzizzle",
"contributions": [
"translation",
"doc"
]
},
{
"login": "lukasgabriel",
"name": "Lukas Gabriel",
"avatar_url": "https://avatars0.githubusercontent.com/u/52338810?v=4",
"profile": "https://lukasgabriel.net",
"contributions": [
"code",
"bug",
"translation",
"ideas"
]
},
{
"login": "DarshanBhoi",
"name": "Darshan",
"avatar_url": "https://avatars2.githubusercontent.com/u/70128281?v=4",
"profile": "https://github.com/DarshanBhoi",
"contributions": [
"bug"
]
},
{
"login": "SkeletalDemise",
"name": "SkeletalDemise",
"avatar_url": "https://avatars1.githubusercontent.com/u/29117662?v=4",
"profile": "https://github.com/SkeletalDemise",
"contributions": [
"code"
]
},
{
"login": "cclauss",
"name": "Christian Clauss",
"avatar_url": "https://avatars3.githubusercontent.com/u/3709715?v=4",
"profile": "https://www.patreon.com/cclauss",
"contributions": [
"code",
"bug"
]
},
{
"login": "machinexa2",
"name": "Machinexa2",
"avatar_url": "https://avatars1.githubusercontent.com/u/60662297?v=4",
"profile": "http://machinexa.xss.ht",
"contributions": [
"content"
]
},
{
"login": "anantverma275",
"name": "Anant Verma",
"avatar_url": "https://avatars1.githubusercontent.com/u/18184503?v=4",
"profile": "https://github.com/anantverma275",
"contributions": [
"code",
"bug"
]
},
{
"login": "XVXTOR",
"name": "XVXTOR",
"avatar_url": "https://avatars1.githubusercontent.com/u/40268197?v=4",
"profile": "https://github.com/XVXTOR",
"contributions": [
"doc"
]
},
{
"login": "Itamikame",
"name": "Itamikame",
"avatar_url": "https://avatars2.githubusercontent.com/u/59034423?v=4",
"profile": "https://github.com/Itamikame",
"contributions": [
"code"
]
},
{
"login": "MikeMerz",
"name": "MikeMerz",
"avatar_url": "https://avatars3.githubusercontent.com/u/50526795?v=4",
"profile": "https://github.com/MikeMerz",
"contributions": [
"code"
]
},
{
"login": "jacobggman",
"name": "Jacob Galam",
"avatar_url": "https://avatars2.githubusercontent.com/u/30216976?v=4",
"profile": "https://github.com/jacobggman",
"contributions": [
"code",
"bug"
]
},
{
"login": "TuxTheXplorer",
"name": "TuxTheXplorer",
"avatar_url": "https://avatars1.githubusercontent.com/u/37508897?v=4",
"profile": "https://tuxthexplorer.github.io/",
"contributions": [
"translation"
]
},
{
"login": "Itamai",
"name": "Itamai",
"avatar_url": "https://avatars3.githubusercontent.com/u/53093696?v=4",
"profile": "https://github.com/Itamai",
"contributions": [
"code",
"bug"
]
},
{
"login": "Termack",
"name": "Filipe",
"avatar_url": "https://avatars2.githubusercontent.com/u/26333901?v=4",
"profile": "https://github.com/Termack",
"contributions": [
"translation"
]
},
{
"login": "malathit",
"name": "Malathi",
"avatar_url": "https://avatars0.githubusercontent.com/u/2684148?v=4",
"profile": "https://github.com/malathit",
"contributions": [
"code"
]
},
{
"login": "HexChaos",
"name": "Jack",
"avatar_url": "https://avatars1.githubusercontent.com/u/8947820?v=4",
"profile": "https://hexchaos.xyz/",
"contributions": [
"translation"
]
},
{
"login": "yafkari",
"name": "Younes",
"avatar_url": "https://avatars3.githubusercontent.com/u/41365655?v=4",
"profile": "https://github.com/yafkari",
"contributions": [
"translation"
]
},
{
"login": "Marnick39",
"name": "Marnick Vandecauter",
"avatar_url": "https://avatars2.githubusercontent.com/u/17315511?v=4",
"profile": "https://gitlab.com/Marnick39",
"contributions": [
"translation"
]
},
{
"login": "mav8557",
"name": "Michael V",
"avatar_url": "https://avatars0.githubusercontent.com/u/47306745?v=4",
"profile": "https://github.com/mav8557",
"contributions": [
"code"
]
},
{
"login": "chuinzer",
"name": "chuinzer",
"avatar_url": "https://avatars2.githubusercontent.com/u/64257785?v=4",
"profile": "https://github.com/chuinzer",
"contributions": [
"translation"
]
},
{
"login": "blackcat-917",
"name": "blackcat-917",
"avatar_url": "https://avatars1.githubusercontent.com/u/53786619?v=4",
"profile": "https://github.com/blackcat-917",
"contributions": [
"translation",
"doc"
]
},
{
"login": "Ozzyz",
"name": "Γβ¦smund Brekke",
"avatar_url": "https://avatars3.githubusercontent.com/u/6113447?v=4",
"profile": "https://github.com/Ozzyz",
"contributions": [
"code"
]
},
{
"login": "sashreek1",
"name": "Sashreek Shankar",
"avatar_url": "https://avatars1.githubusercontent.com/u/45600974?v=4",
"profile": "https://github.com/sashreek1",
"contributions": [
"code"
]
},
{
"login": "cryptobadger",
"name": "cryptobadger",
"avatar_url": "https://avatars2.githubusercontent.com/u/26308101?v=4",
"profile": "https://github.com/cryptobadger",
"contributions": [
"code",
"bug"
]
},
{
"login": "e1fy",
"name": "elf",
"avatar_url": "https://avatars3.githubusercontent.com/u/61194758?v=4",
"profile": "https://github.com/e1fy",
"contributions": [
"code"
]
},
{
"login": "rogercyyu",
"name": "Roger Yu",
"avatar_url": "https://avatars0.githubusercontent.com/u/45835736?v=4",
"profile": "https://github.com/rogercyyu",
"contributions": [
"code"
]
},
{
"login": "JesseEmond",
"name": "dysleixa",
"avatar_url": "https://avatars.githubusercontent.com/u/1843555?v=4",
"profile": "https://github.com/JesseEmond",
"contributions": [
"code"
]
},
{
"login": "mohzulfikar",
"name": "Mohammad Zulfikar",
"avatar_url": "https://avatars.githubusercontent.com/u/48849323?v=4",
"profile": "http://mohzulfikar.me",
"contributions": [
"doc"
]
},
{
"login": "AABur",
"name": "Alexander Burchenko",
"avatar_url": "https://avatars.githubusercontent.com/u/41373199?v=4",
"profile": "https://github.com/AABur",
"contributions": [
"translation"
]
}
],
"contributorsPerLine": 7,
"projectName": "Ciphey",
"projectOwner": "Ciphey",
"repoType": "github",
"repoHost": "https://github.com",
"skipCi": true
} |
|
Ciphey/.gitignore | # Created by https://www.gitignore.io/api/git,python,virtualenv
# Edit at https://www.gitignore.io/?templates=git,python,virtualenv
### Git ###
# Created by git for backups. To disable backups in Git:
# $ git config --global mergetool.keepBackup false
*.orig
# Created by https://www.gitignore.io/api/venv
# Edit at https://www.gitignore.io/?templates=venv
### venv ###
# Virtualenv
# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
.Python
pyvenv.cfg
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
pip-selfcheck.json
ciphey.egg-info
dist/
dist
hansard.txt
poetry.lock
# End of https://www.gitignore.io/api/venv
# Created by git when using merge tools for conflicts
*.BACKUP.*
*.BASE.*
*.LOCAL.*
*.REMOTE.*
*_BACKUP_*.txt
*_BASE_*.txt
*_LOCAL_*.txt
*_REMOTE_*.txt
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
### VirtualEnv ###
# Virtualenv
# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
[Bb]in
[Ii]nclude
[Ll]ib
[Ll]ib64
[Ll]ocal
[Ss]cripts
pyvenv.cfg
pip-selfcheck.json
# End of https://www.gitignore.io/api/git,python,virtualenv
.vscode/
.vim/coc-settings.json
# Poetry stuff
dist/
poetry.lock
# Nano
*.swp
# PyCharm
.idea/
ciphey/LanguageChecker/create\?max_size=60&spelling=US&spelling=GBs&max_variant=2&diacritic=both&special=hacker&special=roman-numerals&download=wordlist&encoding=utf-8&format=inline
ciphey/LanguageChecker/create\?max_size=80&spelling=US&spelling=GBs&spelling=GBz&spelling=CA&spelling=AU&max_variant=2&diacritic=both&special=hacker&special=roman-numerals&download=wordlist&encoding=utf-8&format=inline
ciphey/LanguageChecker/aspell.txt
dictionary.txt
aspell.txt
ciphey.spec
ciphey/__main__.spec
__main__.spec
.entry_point.spec/entry_point.spec
BEANOS INVADES THE FORTNITE ITEM SHOP AT 8_00 PM.EXE-uG0WJcr-cuI.f299.mp4.part
run.yml
tests/interface.rst
# Test Generator
test_main_generated.py
run.yml
tests/interface.rst
test.py
# Mac OS Files
.DS_Store
.AppleDouble
.LSOverride
Icon
._* |
|
YAML | Ciphey/codecov.yml | codecov:
require_ci_to_pass: yes
coverage:
precision: 2
round: down
range: "40...50"
status:
patch: no
changes: no
project:
default:
threshold: 2%
paths:
- "ciphey"
parsers:
gcov:
branch_detection:
conditional: yes
loop: yes
method: no
macro: no
comment:
layout: "reach,diff,flags,tree"
behavior: default
require_changes: no |
Markdown | Ciphey/CODE_OF_CONDUCT.md | # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at brandon_skerrit. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at <https://www.contributor-covenant.org/version/1/4/code-of-conduct.html>
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
<https://www.contributor-covenant.org/faq> |
Ciphey/license | Copyright 2020 Brandon Skerritt
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
|
Python | Ciphey/noxfile.py | """
The file for Nox
"""
from typing import Any
import nox
from nox.sessions import Session
locations = "ciphey/", "tests/", "docs/"
nox.options.sessions = ["tests"]
package = "ciphey"
def install_with_constraints(session: Session, *args: str, **kwargs: Any) -> None:
"""Install packages constrained by Poetry's lock file.
This function is a wrapper for nox.sessions.Session.install. It
invokes pip to install packages inside of the session's virtualenv.
Additionally, pip is passed a constraints file generated from
Poetry's lock file, to ensure that the packages are pinned to the
versions specified in poetry.lock. This allows you to manage the
packages as Poetry development dependencies.
Arguments:
session: The Session object.
args: Command-line arguments for pip.
kwargs: Additional keyword arguments for Session.install.
"""
session.run(
"poetry",
"export",
"--dev",
"--format=requirements.txt",
"--output=requirements.txt",
external=True,
)
session.install("--constraint=requirements.txt", *args, **kwargs)
# noxfile.py
@nox.session
def black(session):
args = session.posargs or locations
session.install("black")
session.run("black", *args)
@nox.session(python="3.8")
def coverage(session: Session) -> None:
"""Upload coverage data."""
install_with_constraints(session, "coverage[toml]", "codecov")
session.run("pip3", "install", "cipheydists")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
# noxfile.py
@nox.session
def docs(session: Session) -> None:
"""Build the documentation."""
install_with_constraints(session, "sphinx")
session.run("sphinx-build", "docs", "docs/_build")
@nox.session
def tests(session):
session.run("pip3", "install", "cipheydists")
session.run("poetry", "install", external=True)
session.run("poetry", "run", "pytest", "--cov=ciphey") |
TOML | Ciphey/pyproject.toml | [tool.poetry]
name = "ciphey"
version = "5.14.1"
description = "Automated Decryption Tool"
authors = ["Brandon <brandon@skerritt.blog>"]
license = "MIT"
documentation = "https://github.com/Ciphey/Ciphey/wiki"
exclude = ["tests/hansard.txt"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.7"
rich = ">=4,<11"
pyyaml = ">=5.3.1,<7.0.0"
pylint = "^2.6.0"
flake8 = ">=3.8.4,<5.0.0"
cipheydists = "^0.3.35"
cipheycore = "^0.3.2"
appdirs = "^1.4.4"
typing_inspect = { version = ">=0.6,<0.8", python = "~3.7" }
base58 = "^2.0.1"
base91 = "^1.0.1"
pybase62 = ">=0.4.3,<0.6.0"
click = ">=7.1.2,<9.0.0"
mock = "^4.0.3"
pywhat = "3.0.0"
xortool-ciphey = "^0.1.16"
[tool.poetry.dev-dependencies]
pytest-cov = "^3.0.0"
pytest = "^7.1.2"
black = "^21.4b2"
neovim = "^0.3.1"
codecov = "^2.1.11"
sphinx = "^5.0.1"
sphinx-autodoc-typehints = "^1.11.1"
nltk = "^3.5"
[tool.poetry.scripts]
ciphey = 'ciphey.ciphey:main'
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" |
YAML | Ciphey/.github/dependabot.yml | version: 2
updates:
- package-ecosystem: pip
directory: "/"
schedule:
interval: daily
open-pull-requests-limit: 10
ignore:
- dependency-name: black
versions:
- 21.4b0
- 21.4b1 |
YAML | Ciphey/.github/FUNDING.yml | # These are supported funding model platforms
github: bee-san
custom: ["https://www.buymeacoffee.com/beecodes", "https://paypal.me/cipheydevs"] |
YAML | Ciphey/.github/lock.yml | # Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
# Number of days of inactivity before a closed issue or pull request is locked
daysUntilLock: 60
# Skip issues and pull requests created before a given timestamp. Timestamp must
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
skipCreatedBefore: false
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
exemptLabels: []
# Label to add before locking, such as `outdated`. Set to `false` to disable
lockLabel: false
# Comment to post before locking. Set to `false` to disable
lockComment: >
This thread has been automatically locked since there has not been
any recent activity after it was closed. Please open a new issue for
related bugs.
# Assign `resolved` as the reason for locking. Set to `false` to disable
setLockReason: true
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings just for `issues` or `pulls`
# issues:
# exemptLabels:
# - help-wanted
# lockLabel: outdated
# pulls:
# daysUntilLock: 30
# Repository to extend settings from
# _extends: repo |
Markdown | Ciphey/.github/ISSUE_TEMPLATE/bug_report.md | ---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
---
**β οΈIMPORTANTβ οΈ if you do not fill this out, we will automatically delete your issue. We will not help anyone that cannot fill out this template.**
- [ ] Have you read our [Wiki page "Common Issues & Their Solutions"?](https://github.com/Ciphey/Ciphey/wiki/Common-Issues-&-Their-Solutions)
**Describe the bug**
A clear and concise description of what the bug is.
**Plaintext**
**β οΈIMPORTANTβ οΈ The below code is non-negotiable for "Ciphey didn't decrypt...." problems. If you do not tell us your plaintext, we will not help you.**
```
Include your plaintext here, replacing this
```
**Version**
**β οΈIMPORTANTβ οΈ We need this information because different environments will induce different bugs in Ciphey**
- OS/Distro: [e.g. Windows, Debian 11.0, Arch, OS X El Capitan]
- Python version: [python3 --version]
- Ciphey versions: [python3 -m pip show ciphey cipheycore cipheydists]
- Did you use Docker?
**Verbose Output**
**β οΈIMPORTANTβ οΈ Verbose output will tell us why it's not working the way we expected it to be.**
Run Ciphey with `ciphey -vvv` and paste the results into [Pastebin.com](https://pastebin.com) or a [GitHub Gist](https://gist.github.com/)
**To Reproduce**
Steps to reproduce the behavior:
1. What input did you use?
2. What flags / arguments did you use?
**Expected behavior**
A clear and concise description of what you expected to happen.
**Any other information?**
Add any other context about the problem here. |
Markdown | Ciphey/.github/ISSUE_TEMPLATE/feature_request.md | ---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: brandonskerritt
---
### Problem:
_What exactly do you think is missing? How does it impact you? Are there any hacks that people use instead?_
### Solution:
_How could this problem be solved? What could be added? How could it be integrated into the current system?_ |
Markdown | Ciphey/.github/ISSUE_TEMPLATE/maintenance_suggestion.md | ---
name: Maintenance suggestion
about: Suggest refactoring/restructuring that would help
title: ''
labels: maintenance
assignees: brandonskerritt
---
### Idea
_What exactly do you want to do?_
### Pros
_How would it help the code? Does it make things faster, or the code more maintainable?_
### Cons
_Would this break anything? Would it mean a rewrite of all the code?_ |
YAML | Ciphey/.github/workflows/coverage.yml | name: coverage
on: [push, pull_request]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: [3.6, 3.7, pypy3]
# exclude:
# - os: macos-latest
# python-version: 3.8
# - os: windows-latest
# python-version: 3.6
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Tests with Nox
coverage:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: '3.8'
architecture: x64
- run: pip3 install nox==2019.11.9
- run: pip3 install poetry==1.0.5
- run: nox --sessions tests coverage
env:
CODECOV_TOKEN: ${{secrets.CODECOV_TOKEN}} |
YAML | Ciphey/.github/workflows/release.yml | # .github/workflows/release.yml
name: Release
on:
release:
types: [published]
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
with:
python-version: '3.8'
architecture: x64
- run: pip install nox
- run: pip install poetry
- run: nox
- run: poetry build
- run: poetry publish --username=__token__ --password=${{ secrets.PYPI_TOKEN }} |
YAML | Ciphey/.github/workflows/releasetestpypi.yml | # .github/workflows/test-pypi.yml
name: TestPyPI
on:
push:
branches:
- master
jobs:
test_pypi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
with:
python-version: '3.8'
architecture: x64
- run: pip install poetry
- run: >-
poetry version patch &&
version=$(poetry version | awk '{print $2}') &&
poetry version $version.dev.$(date +%s)
- run: poetry build
- uses: pypa/gh-action-pypi-publish@v1.0.0a0
with:
user: __token__
password: ${{ secrets.TEST_PYPI_TOKEN }}
repository_url: https://test.pypi.org/legacy/ |
YAML | Ciphey/.github/workflows/terminaltest.yml | on: pull_request
name: Ciphey terminal test
jobs:
terminal_test:
name: On ubuntu-latest with python
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: [3.7, 3.8, 3.9]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Build ciphey
run: |
python -m pip install poetry
poetry install
- name: Test ciphey with plain text
run: |
plain_text="hello. Testing Ciphey."
ciphey_out=$(poetry run ciphey -q -t "$plain_text")
if [ "$ciphey_out" == "$plain_text" ]
then
exit 0
else
echo "Ciphey decryption on plain text failed"
exit 1
fi
- name: Test ciphey with base64 encoded text
run: |
plain_text="hello. Testing Ciphey."
base64_encoded=$(echo -n "$plain_text" | base64)
ciphey_out=$(poetry run ciphey -q -t "$base64_encoded")
if [ "$ciphey_out" == "$plain_text" ]
then
exit 0
else
echo "Ciphey decryption on base64 encoded string failed"
exit 1
fi |
YAML | Ciphey/.github/workflows/tests2.yml | name: Tests
on: [push, pull_request]
jobs:
tests:
strategy:
fail-fast: false
matrix:
python-version: ['3.7', '3.8', '3.9']
os: [ubuntu-latest, windows-latest] #macos-latest,
runs-on: ${{ matrix.os }}
name: Python ${{ matrix.python-version }} on ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
architecture: x64
- run: python -m pip install --upgrade pip
- run: pip install codespell flake8 nox poetry
- run: codespell --ignore-words-list="nd,te" --skip="translations,*.archive"
- run: flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
- run: python -m nox |
Python | Ciphey/ciphey/ciphey.py | """
βββββββββββββββββ βββ ββββββββββββββ βββ
ββββββββββββββββββββββ βββββββββββββββ ββββ
βββ βββββββββββββββββββββββββ βββββββ
βββ ββββββββββ ββββββββββββββ βββββ
ββββββββββββββ βββ βββββββββββ βββ
https://github.com/Ciphey
https://github.com/Ciphey/Ciphey/wiki
The cycle goes:
main -> argparsing (if needed) -> call_encryption -> new Ciphey object -> decrypt() -> produceProbTable ->
one_level_of_decryption -> decrypt_normal
"""
import os
import warnings
from typing import Any, Optional, Union
import click
from appdirs import AppDirs
import logging
from rich.logging import RichHandler
from rich.console import Console
from . import iface
warnings.filterwarnings("ignore")
console = Console()
def decrypt(config: iface.Config, ctext: Any) -> Union[str, bytes]:
"""A simple alias for searching a ctext and makes the answer pretty"""
res: Optional[iface.SearchResult] = config.objs["searcher"].search(ctext)
if res is None:
return "Failed to crack"
if config.verbosity < 0:
return res.path[-1].result.value
else:
return iface.pretty_search_results(res)
def get_name(ctx, param, value):
# reads from stdin if value was not supplied
if not value and not click.get_text_stream("stdin").isatty():
click.get_text_stream("stdin").read().strip()
return click.get_text_stream("stdin").read().strip()
else:
return value
def print_help(ctx):
# prints help menu
# if no arguments are passed
click.echo(ctx.get_help())
ctx.exit()
@click.command()
@click.option(
"-t",
"--text",
help="The ciphertext you want to decrypt.",
type=str,
)
@click.option(
"-q", "--quiet", help="Decrease verbosity", type=int, count=True, default=None
)
@click.option(
"-g",
"--greppable",
help="Only print the answer (useful for grep)",
type=bool,
is_flag=True,
default=None,
)
@click.option("-v", "--verbose", count=True, type=int)
@click.option("-C", "--checker", help="Use the given checker", default=None)
@click.option(
"-c",
"--config",
help="Uses the given config file. Defaults to appdirs.user_config_dir('ciphey', 'ciphey')/'config.yml'",
)
@click.option("-w", "--wordlist", help="Uses the given wordlist")
@click.option(
"-p",
"--param",
help="Passes a parameter to the language checker",
multiple=True,
)
@click.option(
"-l",
"--list-params",
help="List the parameters of the selected module",
type=bool,
)
@click.option(
"--searcher",
help="Select the searching algorithm to use",
)
# HARLAN TODO XXX
# I switched this to a boolean flag system
# https://click.palletsprojects.com/en/7.x/options/#boolean-flags
# True for bytes input, False for str
@click.option(
"-b",
"--bytes",
help="Forces ciphey to use binary mode for the input",
is_flag=True,
default=None,
)
@click.option(
"--default-dist",
help="Sets the default character/byte distribution",
type=str,
default=None,
)
@click.option(
"-m",
"--module",
help="Adds a module from the given path",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
"""Ciphey - Automated Decryption Tool
Documentation:
https://github.com/Ciphey/Ciphey/wiki\n
Discord (support here, we're online most of the day):
http://discord.skerritt.blog\n
GitHub:
https://github.com/ciphey/ciphey\n
Ciphey is an automated decryption tool using smart artificial intelligence and natural language processing. Input encrypted text, get the decrypted text back.
Examples:\n
Basic Usage: ciphey -t "aGVsbG8gbXkgbmFtZSBpcyBiZWU="
"""
"""Function to deal with arguments. Either calls with args or not. Makes Pytest work.
It gets the arguments in the function definition using locals()
if withArgs is True, that means this is being called with command line args
so go to arg_parsing() to get those args
we then update locals() with the new command line args and remove "withArgs"
This function then calls call_encryption(**result) which passes our dict of args
to the function as its own arguments using dict unpacking.
Returns:
The output of the decryption.
"""
# if user wants to know where appdirs is
# print and exit
if "appdirs" in kwargs and kwargs["appdirs"]:
dirs = AppDirs("Ciphey", "Ciphey")
path_to_config = dirs.user_config_dir
print(
f"The settings.yml file should be at {os.path.join(path_to_config, 'settings.yml')}"
)
return None
# Now we create the config object
config = iface.Config()
# Load the settings file into the config
load_msg: str
cfg_arg = kwargs["config"]
if cfg_arg is None:
# Make sure that the config dir actually exists
os.makedirs(iface.Config.get_default_dir(), exist_ok=True)
config.load_file(create=True)
load_msg = f"Opened config file at {os.path.join(iface.Config.get_default_dir(), 'config.yml')}"
else:
config.load_file(cfg_arg)
load_msg = f"Opened config file at {cfg_arg}"
# Load the verbosity, so that we can start logging
verbosity = kwargs["verbose"]
quiet = kwargs["quiet"]
if verbosity is None:
if quiet is not None:
verbosity = -quiet
elif quiet is not None:
verbosity -= quiet
if kwargs["greppable"] is not None:
verbosity -= 999
# Use the existing value as a base
config.verbosity += verbosity
config.update_log_level(config.verbosity)
logging.info(load_msg)
logging.debug(f"Got cmdline args {kwargs}")
# Now we load the modules
module_arg = kwargs["module"]
if module_arg is not None:
config.modules += list(module_arg)
# We need to load formats BEFORE we instantiate objects
if kwargs["bytes"] is not None:
config.update_format("bytes")
# Next, load the objects
params = kwargs["param"]
if params is not None:
for i in params:
key, value = i.split("=", 1)
parent, name = key.split(".", 1)
config.update_param(parent, name, value)
config.update("checker", kwargs["checker"])
config.update("searcher", kwargs["searcher"])
config.update("default_dist", kwargs["default_dist"])
config.complete_config()
logging.debug(f"Command line opts: {kwargs}")
logging.debug(f"Config finalised: {config}")
# Finally, we load the plaintext
if kwargs["text"] is None:
if kwargs["file"] is not None:
kwargs["text"] = kwargs["file"].read()
elif kwargs["text_stdin"] is not None:
kwargs["text"] = kwargs["text_stdin"]
else:
# else print help menu
print("[bold red]Error. No inputs were given to Ciphey. [bold red]")
@click.pass_context
def all_procedure(ctx):
print_help(ctx)
all_procedure()
return None
if issubclass(config.objs["format"], type(kwargs["text"])):
pass
elif config.objs["format"] == str and isinstance(kwargs["text"], bytes):
kwargs["text"] = kwargs["text"].decode("utf-8")
elif config.objs["format"] == bytes and isinstance(kwargs["text"], str):
kwargs["text"] = kwargs["text"].encode("utf-8")
else:
raise TypeError(f"Cannot load type {config.format} from {type(kwargs['text'])}")
result: Optional[str]
# if debug or quiet mode is on, run without spinner
if config.verbosity != 0:
result = decrypt(config, kwargs["text"])
else:
# else, run with spinner if verbosity is 0
with console.status("[bold green]Thinking...", spinner="moon") as status:
config.set_spinner(status)
result = decrypt(config, kwargs["text"])
if result is None:
result = "Could not find any solutions."
console.print(result) |
Python | Ciphey/ciphey/common.py | """Some useful adapters"""
from typing import Any
def id_lambda(value: Any):
"""
A function used in dynamic class generation that abstracts away a constant return value (like in getName)
"""
return lambda *args: value
def fix_case(target: str, base: str) -> str:
"""Returns the lower-case string target with the case of base"""
ret = "".join(
[
target[i].upper() if base[i].isupper() else target[i]
for i in range(len(target))
]
)
return "".join(
[
target[i].upper() if base[i].isupper() else target[i]
for i in range(len(target))
]
) |
Python | Ciphey/ciphey/mathsHelper.py | """
βββββββββββββββββ βββ ββββββββββββββ βββ
ββββββββββββββββββββββ βββββββββββββββ ββββ
βββ βββββββββββββββββββββββββ βββββββ
βββ ββββββββββ ββββββββββββββ βββββ
ββββββββββββββ βββ βββββββββββ βββ
Β© Brandon Skerritt
Github: brandonskerritt
Class to provide helper functions for mathematics
(oh, not entirely mathematics either. Some NLP stuff and sorting dicts. It's just a helper class
)
"""
from collections import OrderedDict
from string import punctuation
from typing import Optional
import logging
from rich.logging import RichHandler
class mathsHelper:
"""Class to provide helper functions for mathematics and other small things"""
def __init__(self):
# ETAOIN is the most popular letters in order
self.ETAOIN = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
self.LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
@staticmethod
def gcd(a, b) -> int:
"""Greatest common divisor.
The Greatest Common Divisor of a and b using Euclid's Algorithm.
Args:
a -> num 1
b -> num 2
Returns:
Returns GCD(a, b)
"""
# Return
while a != 0:
a, b = b % a, a
return b
@staticmethod
def mod_inv(a: int, m: int) -> Optional[int]:
"""
Returns the modular inverse of a mod m, or None if it does not exist.
The modular inverse of a is the number a_inv that satisfies the equation
a_inv * a mod m === 1 mod m
Note: This is a naive implementation, and runtime may be improved in several ways.
For instance by checking if m is prime to perform a different calculation,
or by using the extended euclidean algorithm.
"""
for i in range(1, m):
if (m * i + 1) % a == 0:
return (m * i + 1) // a
return None
@staticmethod
def percentage(part: float, whole: float) -> float:
"""Returns percentage.
Just a normal algorithm to return the percent.
Args:
part -> part of the whole number
whole -> the whole number
Returns:
Returns the percentage of part to whole.
"""
if part <= 0 or whole <= 0:
return 0
# works with percentages
return 100 * float(part) / float(whole)
def sort_prob_table(self, prob_table: dict) -> dict:
"""Sorts the probability table.
Sorts a dictionary of dictionaries (and all the sub-dictionaries).
Args:
prob_table -> The probability table returned by the neural network to sort.
Returns:
Returns the prob_table, but sorted.
"""
# for each object: prob table in dictionary
max_overall: int = 0
max_dict_pair: dict = {}
highest_key = None
empty_dict: dict = {}
# sorts the prob table before we find max, and converts it to order dicts
for key, value in prob_table.items():
prob_table[key] = self.new_sort(value)
prob_table[key] = dict(prob_table[key])
# gets maximum key then sets it to the front
counter_max: int = 0
counter_prob: int = len(prob_table)
while counter_max < counter_prob:
max_overall = 0
highest_key = None
logging.debug(
f"Running while loop in sort_prob_table, counterMax is {counter_max}"
)
for key, value in prob_table.items():
logging.debug(f"Sorting {key}")
maxLocal = 0
# for each item in that table
for key2, value2 in value.items():
logging.debug(
f"Running key2 {key2}, value2 {value2} for loop for {value.items()}"
)
maxLocal = maxLocal + value2
logging.debug(
f"MaxLocal is {maxLocal} and maxOverall is {max_overall}"
)
if maxLocal > max_overall:
logging.debug(f"New max local found {maxLocal}")
# because the dict doesn't reset
max_dict_pair = {}
max_overall = maxLocal
# so eventually, we get the maximum dict pairing?
max_dict_pair[key] = value
highest_key = key
logging.debug(f"Highest key is {highest_key}")
# removes the highest key from the prob table
logging.debug(
f"Prob table is {prob_table} and highest key is {highest_key}"
)
logging.debug(f"Removing {prob_table[highest_key]}")
del prob_table[highest_key]
logging.debug(f"Prob table after deletion is {prob_table}")
counter_max += 1
empty_dict = {**empty_dict, **max_dict_pair}
# returns the max dict (at the start) with the prob table
# this way, it should always work on most likely first.
logging.debug(
f"The prob table is {prob_table} and the maxDictPair is {max_dict_pair}"
)
logging.debug(f"The new sorted prob table is {empty_dict}")
return empty_dict
@staticmethod
def new_sort(new_dict: dict) -> dict:
"""Uses OrderedDict to sort a dictionary.
I think it's faster than my implementation.
Args:
new_dict -> the dictionary to sort
Returns:
Returns the dict, but sorted.
"""
# (f"d is {d}")
logging.debug(f"The old dictionary before new_sort() is {new_dict}")
sorted_i = OrderedDict(
sorted(new_dict.items(), key=lambda x: x[1], reverse=True)
)
logging.debug(f"The dictionary after new_sort() is {sorted_i}")
# sortedI = sort_dictionary(x)
return sorted_i
@staticmethod
def is_ascii(s: str) -> bool:
"""Returns the boolean value if is_ascii is an ascii char.
Does what it says on the tree. Stolen from
https://stackoverflow.com/questions/196345/how-to-check-if-a-string-in-python-is-in-ascii
Args:
s -> the char to check.
Returns:
Returns the boolean of the char.
"""
return bool(lambda s: len(s) == len(s.encode()))
@staticmethod
def strip_punctuation(text: str) -> str:
"""Strips punctuation from a given string.
Uses string.punctuation.
Args:
text -> the text to strip punctuation from.
Returns:
Returns string without punctuation.
"""
text: str = (str(text).translate(str.maketrans("", "", punctuation))).strip(
"\n"
)
return text |
Python | Ciphey/ciphey/__main__.py | #! /usr/bin/env python3
"""
Ciphey: https://github.com/Ciphey/Ciphey
"""
import platform
import sys
if __name__ == "__main__":
major = sys.version_info[0]
minor = sys.version_info[1]
python_version = (
str(sys.version_info[0])
+ "."
+ str(sys.version_info[1])
+ "."
+ str(sys.version_info[2])
)
if major != 3 or minor < 6:
print(
f"Ciphey requires Python 3.6+, you are using {python_version}. Please install a higher Python version. https://www.python.org/downloads/"
)
print(
"Alternatively, visit our Discord and use the Ciphey bot in #bots http://discord.skerritt.blog"
)
sys.exit(1)
if platform.system() == "Windows":
if minor > 8:
print(
"Ciphey does not currently support Python 3.9 on Windows. Please use the Discord bot at http://discord.skerritt.blog"
)
sys.exit(1)
if sys.maxsize > 2 ** 32 is False:
print(
"You are using Python 32 bit and Windows, Ciphey does not support this. Please upgrade to Python 64-bit here https://www.python.org/downloads/"
)
sys.exit(1)
from .ciphey import main
main() |
Python | Ciphey/ciphey/basemods/Checkers/any.py | from typing import Dict, Optional
from ciphey.iface import Config, ParamSpec, PolymorphicChecker, registry
@registry.register
class Any(PolymorphicChecker):
"""Should only be used for debugging, frankly"""
def getExpectedRuntime(self, text) -> float:
return 0 # TODO: actually calculate this
def __init__(self, config: Config):
super().__init__(config)
def check(self, text: str) -> Optional[str]:
return ""
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
pass |
Python | Ciphey/ciphey/basemods/Checkers/brandon.py | """
βββββββββββββββββ βββ ββββββββββββββ βββ
ββββββββββββββββββββββ βββββββββββββββ ββββ
βββ βββββββββββββββββββββββββ βββββββ
βββ ββββββββββ ββββββββββββββ βββββ
ββββββββββββββ βββ βββββββββββ βββ
Β© Brandon Skerritt
Github: brandonskerritt
Class to determine whether something is English or not.
1. Calculate the Chi Squared score of a sentence
2. If the score is significantly lower than the average score, it _might_ be English
2.1. If the score _might_ be English, then take the text and compare it to the sorted dictionary
in O(n log n) time.
It creates a percentage of "How much of this text is in the dictionary?"
The dictionary contains:
* 20,000 most common US words
* 10,000 most common UK words (there's no repetition between the two)
* The top 10,000 passwords
If the word "Looks like" English (chi-squared) and if it contains English words, we can conclude it is
very likely English. The alternative is doing the dictionary thing but with an entire 479k word dictionary (slower)
2.2. If the score is not English, but we haven't tested enough to create an average, then test it against
the dictionary
Things to optimise:
* We only run the dictionary if it's 20% smaller than the average for chi squared
* We consider it "English" if 45% of the text matches the dictionary
* We run the dictionary if there is less than 10 total chisquared test
How to add a language:
* Download your desired dictionary. Try to make it the most popular words, for example. Place this file into this
folder with languagename.txt
As an example, this comes built in with english.txt
Find the statistical frequency of each letter in that language.
For English, we have:
self.languages = {
"English":
[0.0855, 0.0160, 0.0316, 0.0387, 0.1210,0.0218, 0.0209, 0.0496, 0.0733, 0.0022,0.0081, 0.0421, 0.0253, 0.0717,
0.0747,0.0207, 0.0010, 0.0633, 0.0673, 0.0894,0.0268, 0.0106, 0.0183, 0.0019, 0.0172,0.0011]
}
In chisquared.py
To add your language, do:
self.languages = {
"English":
[0.0855, 0.0160, 0.0316, 0.0387, 0.1210,0.0218, 0.0209, 0.0496, 0.0733, 0.0022,0.0081, 0.0421, 0.0253, 0.0717,
0.0747,0.0207, 0.0010, 0.0633, 0.0673, 0.0894,0.0268, 0.0106, 0.0183, 0.0019, 0.0172,0.0011]
"German": [0.0973]
}
In alphabetical order
And you're.... Done! Make sure the name of the two match up
"""
import sys
from math import ceil
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Checker, Config, ParamSpec, T, registry
sys.path.append("..")
try:
import mathsHelper as mh
except ModuleNotFoundError:
import ciphey.mathsHelper as mh
@registry.register
class Brandon(Checker[str]):
"""
Class designed to confirm whether something is **language** based on how many words of **language** appears
Call confirmLanguage(text, language)
* text: the text you want to confirm
* language: the language you want to confirm
Find out what language it is by using chisquared.py, the highest chisquared score is the language
languageThreshold = 45
if a string is 45% **language** words, then it's confirmed to be english
"""
def getExpectedRuntime(self, text: T) -> float:
# TODO: actually work this out
# TODO its 0.2 seconds on average
return 1e-4 # 100 Β΅s
wordlist: set
def clean_text(self, text: str) -> set:
"""Cleans the text ready to be checked
Strips punctuation, makes it lower case, turns it into a set separated by spaces, removes duplicate words
Args:
text -> The text we use to perform analysis on
Returns:
text -> the text as a list, now cleaned
"""
# makes the text unique words and readable
text = text.lower()
text = self.mh.strip_punctuation(text)
text = text.split(" ")
text = filter(lambda x: len(x) > 2, text)
text = set(text)
return text
def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
"""Given text determine if it passes checker
The checker uses the variable passed to it. I.E. Stopwords list, 1k words, dictionary
Args:
text -> The text to check
threshold -> at what point do we return True? The percentage of text that is in var before we return True
text_length -> the length of the text
var -> the variable we are checking against. Stopwords list, 1k words list, dictionary list.
Returns:
boolean -> True for it passes the test, False for it fails the test."""
if text is None:
logging.debug("Checker's text is None, so returning False")
return False
if var is None:
logging.debug("Checker's input var is None, so returning False")
return False
percent = ceil(text_length * threshold)
logging.debug(f"Checker's chunks are size {percent}")
meet_threshold = 0
location = 0
end = percent
if text_length <= 0:
return False
while location <= text_length:
# chunks the text, so only gets THRESHOLD chunks of text at a time
text = list(text)
to_analyse = text[location:end]
logging.debug(f"To analyse is {to_analyse}")
for word in to_analyse:
# if word is a stopword, + 1 to the counter
if word in var:
logging.debug(
f"{word} is in var, which means I am +=1 to the meet_threshold which is {meet_threshold}"
)
meet_threshold += 1
meet_threshold_percent = meet_threshold / text_length
if meet_threshold_percent >= threshold:
logging.debug(
f"Returning true since the percentage is {meet_threshold / text_length} and the threshold is {threshold}"
)
# if we meet the threshold, return True
# otherwise, go over again until we do
# We do this in the for loop because if we're at 24% and THRESHOLD is 25
# we don't want to wait THRESHOLD to return true, we want to return True ASAP
return True
location = end
end = end + percent
logging.debug(
f"The language proportion {meet_threshold_percent} is under the threshold {threshold}"
)
return False
def __init__(self, config: Config):
# Suppresses warning
super().__init__(config)
self.mh = mh.mathsHelper()
phases = config.get_resource(self._params()["phases"])
self.thresholds_phase1 = phases["1"]
self.thresholds_phase2 = phases["2"]
self.top1000Words = config.get_resource(self._params().get("top1000"))
self.wordlist = config.get_resource(self._params()["wordlist"])
self.stopwords = config.get_resource(self._params().get("stopwords"))
self.len_phase1 = len(self.thresholds_phase1)
self.len_phase2 = len(self.thresholds_phase2)
def check(self, text: str) -> Optional[str]:
"""Checks to see if the text is in English
Performs a decryption, but mainly parses the internal data packet and prints useful information.
Args:
text -> The text we use to perform analysis on
Returns:
bool -> True if the text is English, False otherwise.
"""
logging.debug(f'In Language Checker with "{text}"')
text = self.clean_text(text)
logging.debug(f'Text split to "{text}"')
if text == "":
logging.debug("Returning None from Brandon as the text cleaned is none.")
return None
length_text = len(text)
what_to_use = {}
# this code decides what checker / threshold to use
# if text is over or equal to maximum size, just use the maximum possible checker
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase1.keys()
)
logging.debug(self.thresholds_phase1)
what_to_use = self.thresholds_phase1[str(what_to_use)]
# def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
if "check" in what_to_use:
# perform check 1k words
result = self.checker(
text, what_to_use["check"], length_text, self.top1000Words
)
elif "stop" in what_to_use:
# perform stopwords
result = self.checker(
text, what_to_use["stop"], length_text, self.stopwords
)
elif "dict" in what_to_use:
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
# If result is None, no point doing it again in phase2
if not result:
return None
else:
logging.info(f"It is neither stop or check, but instead {what_to_use}")
# return False if phase 1 fails
if not result:
return None
else:
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase2.keys()
)
what_to_use = self.thresholds_phase2[str(what_to_use)]
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
return "" if result else None
def calculateWhatChecker(self, length_text, key):
"""Calculates what threshold / checker to use
If the length of the text is over the maximum sentence length, use the last checker / threshold
Otherwise, traverse the keys backwards until we find a key range that does not fit.
So we traverse backwards and see if the sentence length is between current - 1 and current
In this way, we find the absolute lowest checker / percentage threshold.
We traverse backwards because if the text is longer than the max sentence length, we already know.
In total, the keys are only 5 items long or so. It is not expensive to move backwards, nor is it expensive to move forwards.
Args:
length_text -> The length of the text
key -> What key we want to use. I.E. Phase1 keys, Phase2 keys.
Returns:
what_to_use -> the key of the lowest checker."""
_keys = list(key)
_keys = list(map(int, _keys))
if length_text >= int(_keys[-1]):
what_to_use = list(key)[_keys.index(_keys[-1])]
else:
# this algorithm finds the smallest possible fit for the text
for counter, i in reversed(list(enumerate(_keys))):
# [0, 110, 150]
if i <= length_text:
what_to_use = i
return what_to_use
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"top1000": ParamSpec(
desc="A wordlist of the top 1000 words",
req=False,
default="cipheydists::list::english1000",
),
"wordlist": ParamSpec(
desc="A wordlist of all the words",
req=False,
default="cipheydists::list::english",
),
"stopwords": ParamSpec(
desc="A wordlist of StopWords",
req=False,
default="cipheydists::list::englishStopWords",
),
"threshold": ParamSpec(
desc="The minimum proportion (between 0 and 1) that must be in the dictionary",
req=False,
default=0.45,
),
"phases": ParamSpec(
desc="Language-specific phase thresholds",
req=False,
default="cipheydists::brandon::english",
),
} |
Python | Ciphey/ciphey/basemods/Checkers/entropy.py | from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Checker, Config, ParamSpec, T, registry
@registry.register
class Entropy(Checker[str]):
"""
Uses entropy to determine plaintext
"""
def check(self, text: T) -> Optional[str]:
logging.debug("Trying entropy checker")
pass
def getExpectedRuntime(self, text: T) -> float:
# TODO: actually bench this
# Uses benchmark from Discord
return 2e-7 * len(text)
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
pass |
Python | Ciphey/ciphey/basemods/Checkers/ezcheck.py | from typing import Dict, List, Optional
from ciphey.iface import Checker, Config, ParamSpec, T, registry
from .brandon import Brandon
from .format import JsonChecker
from .human import HumanChecker
from .quadgrams import Quadgrams
from .regex import RegexList
from .what import What
@registry.register
class EzCheck(Checker[str]):
"""
This object is effectively a prebuilt quorum (with requirement 1) of common patterns, followed by a human check
"""
def check(self, text: str) -> Optional[str]:
for checker in self.checkers:
res = checker.check(text)
if (
res is not None
and (self.decider is None or self.decider.check(text)) is not None
):
return res
return None
def getExpectedRuntime(self, text: T) -> float:
return sum(
i.getExpectedRuntime(text) for i in self.checkers
) + self.decider.getExpectedRuntime(text)
def __init__(self, config: Config):
super().__init__(config)
self.checkers: List[Checker[str]] = []
# Disable human checker for automated systems
if config.verbosity >= 0:
self.decider = config(HumanChecker)
else:
self.decider = None
# We need to modify the config for each of the objects
# First PyWhat, as it's the fastest
self.checkers.append(config(What))
# Next, the json checker
self.checkers.append(config(JsonChecker))
# Second to last, the quadgrams checker
self.checkers.append(config(Quadgrams))
# Finally, the Brandon checker, as it is the slowest
self.checkers.append(config(Brandon))
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
pass |
Python | Ciphey/ciphey/basemods/Checkers/format.py | import json
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Checker, Config, ParamSpec, T, registry
@registry.register
class JsonChecker(Checker[str]):
"""
This object is effectively a prebuilt quorum (with requirement 1) of common patterns
"""
def check(self, text: T) -> Optional[str]:
logging.debug("Trying json checker")
# https://github.com/Ciphey/Ciphey/issues/389
if text.isdigit():
return None
try:
json.loads(text)
return ""
except ValueError:
return None
def getExpectedRuntime(self, text: T) -> float:
# TODO: actually bench this
return 1e-7 * len(text) # From benchmarks I found online
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
pass |
Python | Ciphey/ciphey/basemods/Checkers/gtest.py | from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Checker, Config, ParamSpec, T, registry
@registry.register
class GTestChecker(Checker[str]):
"""
G-test of fitness, similar to Chi squared.
"""
def check(self, text: T) -> Optional[str]:
logging.debug("Trying entropy checker")
pass
def getExpectedRuntime(self, text: T) -> float:
# TODO: actually bench this
return 4e-7 * len(text)
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
pass |
Python | Ciphey/ciphey/basemods/Checkers/human.py | from typing import Dict, Optional
from ciphey.iface import Checker, Config, ParamSpec, registry
from rich.console import Console
from rich.markup import escape
console = Console()
@registry.register
class HumanChecker(Checker[str]):
"""
Uses the person's decision to determine plaintext
"""
def check(self, ctext: str) -> Optional[str]:
with self._config().pause_spinner_handle():
response = console.input(
f"Possible plaintext: [blue bold]{escape(ctext.__repr__())}[/blue bold] ([green]y[/green]/[red]N[/red]): "
)
if response == "y":
return ""
elif response in ("n", ""):
return None
else:
return self.check(ctext)
def getExpectedRuntime(self, text: str) -> float:
return 1 # About a second
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
def __init__(self, config: Config):
super().__init__(config) |
Python | Ciphey/ciphey/basemods/Checkers/quadgrams.py | import logging
import re
from math import log10
from typing import Dict, Optional
from ciphey.iface import Checker, Config, ParamSpec, T, Translation, registry
from rich.logging import RichHandler
@registry.register
class Quadgrams(Checker[str]):
"""
Uses Quadgrams to determine plaintext
"""
def check(self, ctext: T) -> Optional[str]:
logging.debug("Trying Quadgrams checker")
# Capitalize and remove everything that's not a letter
ctext = re.sub("[^A-Z]", "", ctext.upper())
quadgrams = self.QUADGRAMS_DICT
quadgrams_sum = sum(quadgrams.values())
score = 0
for key in quadgrams.keys():
quadgrams[key] = float(quadgrams[key]) / quadgrams_sum
floor = log10(0.01 / quadgrams_sum)
for i in range(len(ctext) - 4 + 1):
# Get all quadgrams from ctext and check if they're in the dict
# If yes then add the score of those quadgrams to the total score
if ctext[i : i + 4] in quadgrams:
score += quadgrams[ctext[i : i + 4]]
else:
score += floor
if len(ctext) > 0:
score = score / len(ctext)
logging.info(f"Quadgrams is {score}")
# The default threshold was found to work the best from lots of testing
if score > self.threshold:
return ""
return None
def getExpectedRuntime(self, text: T) -> float:
# TODO: actually bench this
return 2e-7 * len(text)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The quadgrams dictionary to use",
req=False,
default="cipheydists::dist::quadgrams",
),
"score": ParamSpec(
desc="The score threshold to use",
req=False,
default=0.00011,
),
}
def __init__(self, config: Config):
super().__init__(config)
self.QUADGRAMS_DICT = config.get_resource(self._params()["dict"], Translation)
self.threshold = float(self._params()["score"]) |
Python | Ciphey/ciphey/basemods/Checkers/quorum.py | from typing import Dict, Generic, Optional
from ciphey.iface import Checker, Config, ParamSpec, T, _registry
class Quorum(Generic[T], Checker[T]):
def check(self, text: T) -> Optional[str]:
left = self._params().k
results = []
for checker in self.checkers:
results.append(checker.check(text))
if results[-1] is None:
continue
left -= 1
# Early return check
if left == 0:
return str(results)
def __init__(self, config: Config):
super().__init__(config)
if self._params().k is None:
k = len(self._params()["checker"])
# These checks need to be separate, to make sure that we do not have zero members
if self._params().k == 0 or self._params().k > len(self._params()["checker"]):
raise IndexError(
"k must be between 0 and the number of checkers (inclusive)"
)
self.checkers = []
for i in self._params()["checker"]:
# This enforces type consistency
self.checkers.append(_registry.get_named(i, Checker[T]))
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"checker": ParamSpec(
req=True, desc="The checkers to be used for analysis", list=True
),
"k": ParamSpec(
req=False,
desc="The minimum quorum size. Defaults to the number of checkers",
),
} |
Python | Ciphey/ciphey/basemods/Checkers/regex.py | import re
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Checker, Config, ParamSpec, T, registry
@registry.register
class Regex(Checker[str]):
def getExpectedRuntime(self, text: T) -> float:
return 1e-5 # TODO: actually calculate this
def __init__(self, config: Config):
super().__init__(config)
self.regexes = list(map(re.compile, self._params()["regex"]))
logging.debug(f"There are {len(self.regexes)} regexes")
def check(self, text: str) -> Optional[str]:
for regex in self.regexes:
logging.debug(f"Trying regex {regex} on {text}")
res = regex.search(text)
logging.debug(f"Results: {res}")
if res:
return f"Passed with regex {regex}. Want to contribute to Ciphey? Submit your regex here to allow Ciphey to automatically get this next time https://github.com/bee-san/pyWhat/wiki/Adding-your-own-Regex\n"
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"regex": ParamSpec(
req=True,
desc="The regex that must be matched (in a substring)",
list=True,
)
}
@registry.register
class RegexList(Checker[str]):
def getExpectedRuntime(self, text: T) -> float:
return 1e-5 # TODO: actually calculate this
def __init__(self, config: Config):
super().__init__(config)
self.regexes = []
for i in self._params()["resource"]:
self.regexes += [re.compile(regex) for regex in config.get_resource(i)]
logging.debug(f"There are {len(self.regexes)} regexes")
def check(self, text: str) -> Optional[str]:
for regex in self.regexes:
logging.debug(f"Trying regex {regex} on {text}")
res = regex.search(text)
logging.debug(f"Results: {res}")
if res:
return f"passed with regex {regex}"
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"resource": ParamSpec(
req=True,
desc="A list of regexes that could be matched",
list=True,
)
} |
Python | Ciphey/ciphey/basemods/Checkers/what.py | from typing import Dict, Optional
from ciphey.iface import Checker, Config, ParamSpec, T, registry
import logging
from rich.logging import RichHandler
from pywhat import identifier
from rich.console import Console
console = Console()
@registry.register
class What(Checker[str]):
"""
Uses PyWhat to determine plaintext with regexes
https://github.com/bee-san/pyWhat
"""
def check(self, ctext: T) -> Optional[str]:
logging.debug("Trying PyWhat checker")
returned_regexes = self.id.identify(ctext)
if returned_regexes["Regexes"]:
matched_regex = returned_regexes["Regexes"]['text'][0]["Regex Pattern"]
ret = f'The plaintext is a [yellow]{matched_regex["Name"]}[/yellow]'
human = (
f'\nI think the plaintext is a [yellow]{matched_regex["Name"]}[/yellow]'
)
if "Description" in matched_regex and matched_regex["Description"]:
s = matched_regex["Description"]
# lowercases first letter so it doesn't look weird
s = f", which is {s[0].lower() + s[1:]}\n"
ret += s
human += s
# if URL is attached, include that too.
if "URL" in matched_regex and matched_regex["URL"]:
link = matched_regex["URL"] + ctext.replace(" ", "")
ret += f"\nClick here to view in browser [#CAE4F1][link={link}]{link}[/link][/#CAE4F1]\n"
# If greppable mode is on, don't print this
if self.config.verbosity >= 0:
# Print with full stop
console.print(human)
return ret
return None
def getExpectedRuntime(self, text: T) -> float:
# TODO: actually bench this
return 2e-7 * len(text)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
def __init__(self, config: Config):
super().__init__(config)
self.config = config
self.id = identifier.Identifier() |
Python | Ciphey/ciphey/basemods/Crackers/affine.py | # Community
# by https://github.com/Ozzyz
from typing import Dict, List, Optional
import cipheycore
import logging
from rich.logging import RichHandler
from ciphey.common import fix_case
from ciphey.iface import Config, Cracker, CrackInfo, CrackResult, ParamSpec, registry
from ciphey.mathsHelper import mathsHelper
@registry.register
class Affine(Cracker[str]):
"""
Each character in the Affine Cipher is encoded with the rule E(x) = (ax + b) mod m
m is the size of the alphabet, while a and b are the keys in the cipher. a must be coprime to b.
The Caesar cipher is a specific case of the Affine Cipher, with a=1 and b being the shift of the cipher.
Decryption is performed by D(x) = a_inv (x - b) mod m where a_inv is the modular multiplicative inverse of a mod m.
In this version of the Affine Cipher, we do not allow alphabets with several instances of the same letter in different cases.
For instance, the alphabet 'ABCdef123' is allowed, but 'AaBbCc' is not.
"""
def getInfo(self, ctext: str) -> CrackInfo:
return CrackInfo(
success_likelihood=0.1,
success_runtime=1e-5,
failure_runtime=1e-5,
)
@staticmethod
def getTarget() -> str:
return "affine"
def attemptCrack(self, ctext: str) -> List[CrackResult]:
"""
Brute forces all the possible combinations of a and b to attempt to crack the cipher.
"""
logging.debug("Attempting affine")
candidates = []
# a and b are coprime if gcd(a,b) is 1.
possible_a = [
a
for a in range(1, self.alphabet_length)
if mathsHelper.gcd(a, self.alphabet_length) == 1
]
logging.info(
f"Trying Affine Cracker with {len(possible_a)} a-values and {self.alphabet_length} b-values"
)
for a in possible_a:
a_inv = mathsHelper.mod_inv(a, self.alphabet_length)
# If there is no inverse, we cannot decrypt the text
if a_inv is None:
continue
for b in range(self.alphabet_length):
# Pass in lowered text. This means that we expect alphabets to not contain both 'a' and 'A'.
translated = self.decrypt(ctext.lower(), a_inv, b, self.alphabet_length)
candidate_probability = self.plaintext_probability(translated)
if candidate_probability > self.plaintext_prob_threshold:
candidates.append(
CrackResult(
value=fix_case(translated, ctext), key_info=f"a={a}, b={b}"
)
)
logging.info(f"Affine Cipher returned {len(candidates)} candidates")
return candidates
def plaintext_probability(self, translated: str) -> float:
"""
Analyses the translated text and applies the chi squared test to see if it is a probable plaintext candidate
Returns the probability of the chi-squared test.
"""
analysis = cipheycore.analyse_string(translated)
return cipheycore.chisq_test(analysis, self.expected)
def decrypt(self, text: str, a_inv: int, b: int, m: int) -> str:
"""
Each letter is decrypted at D(x) = a_inv (x - b) mod m where x is the char
We treat the char value as its index in the alphabet, so if
the alphabet is 'abcd....' and the char is 'b', it has the value 1.
"""
return "".join([self.decryptChar(char, a_inv, b, m) for char in text])
def decryptChar(self, char: str, a_inv: int, b: int, m: int) -> str:
# We lower the alphabet since both ctext and alphabet need to be in the same case in order
# to perform the shifts. The translated text will have fixed case after the translation anyways.
# This is only necessary if the specified alphabet is uppercase.
alphabet = [x.lower() for x in self.group]
# Preserve characters that are not in alphabet
if char not in alphabet:
return char
char_idx = alphabet.index(char)
decrypted_char_idx = (a_inv * (char_idx - b)) % m
return alphabet[decrypted_char_idx]
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
"group": ParamSpec(
desc="An ordered sequence of chars that make up the alphabet",
req=False,
default="abcdefghijklmnopqrstuvwxyz",
),
}
def __init__(self, config: Config):
super().__init__(config)
self.group = list(self._params()["group"])
self.expected = config.get_resource(self._params()["expected"])
self.alphabet_length = len(self.group)
self.cache = config.cache
self.plaintext_prob_threshold = 0.01 |
Python | Ciphey/ciphey/basemods/Crackers/ascii_shift.py | """
βββββββββββββββββ βββ ββββββββββββββ βββ
ββββββββββββββββββββββ βββββββββββββββ ββββ
βββ βββββββββββββββββββββββββ βββββββ
βββ ββββββββββ ββββββββββββββ βββββ
ββββββββββββββ βββ βββββββββββ βββ
Β© Brandon Skerritt
Github: brandonskerritt
"""
from typing import Dict, List, Optional
import cipheycore
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Cracker, CrackInfo, CrackResult, ParamSpec, registry
@registry.register
class Ascii_shift(Cracker[str]):
def getInfo(self, ctext: str) -> CrackInfo:
analysis = self.cache.get_or_update(
ctext,
"cipheycore::simple_analysis",
lambda: cipheycore.analyse_string(ctext),
)
return CrackInfo(
success_likelihood=cipheycore.caesar_detect(analysis, self.expected),
# TODO: actually calculate runtimes
success_runtime=1e-5,
failure_runtime=1e-5,
)
@staticmethod
def getTarget() -> str:
return "ascii_shift"
def attemptCrack(self, ctext: str) -> List[CrackResult]:
logging.info(f"Trying ASCII shift cipher on {ctext}")
logging.debug("Beginning cipheycore simple analysis")
# Hand it off to the core
analysis = self.cache.get_or_update(
ctext,
"cipheycore::simple_analysis",
lambda: cipheycore.analyse_string(ctext),
)
logging.debug("Beginning cipheycore::caesar")
possible_keys = cipheycore.caesar_crack(
analysis, self.expected, self.group, self.p_value
)
n_candidates = len(possible_keys)
logging.info(f"ASCII shift returned {n_candidates} candidates")
if n_candidates == 0:
logging.debug("Filtering for better results")
analysis = cipheycore.analyse_string(ctext, self.group)
possible_keys = cipheycore.caesar_crack(
analysis, self.expected, self.group, self.p_value
)
candidates = []
for candidate in possible_keys:
logging.debug(f"Candidate {candidate.key} has prob {candidate.p_value}")
translated = cipheycore.caesar_decrypt(ctext, candidate.key, self.group)
candidates.append(CrackResult(value=translated, key_info=candidate.key))
return candidates
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
"group": ParamSpec(
desc="An ordered sequence of chars that make up the ASCII shift cipher alphabet",
req=False,
default="""\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f""",
),
"p_value": ParamSpec(
desc="The p-value to use for standard frequency analysis",
req=False,
default=0.01,
)
# TODO: add "filter" param
}
def __init__(self, config: Config):
super().__init__(config)
self.group = list(self._params()["group"])
self.expected = config.get_resource(self._params()["expected"])
self.cache = config.cache
self.p_value = float(self._params()["p_value"]) |
Python | Ciphey/ciphey/basemods/Crackers/baconian.py | import re
from typing import Dict, List, Optional
from ciphey.iface import (
Config,
Cracker,
CrackInfo,
CrackResult,
ParamSpec,
Translation,
registry,
)
import logging
from rich.logging import RichHandler
@registry.register
class Baconian(Cracker[str]):
def getInfo(self, ctext: str) -> CrackInfo:
return CrackInfo(
success_likelihood=0.1,
success_runtime=1e-5,
failure_runtime=1e-5,
)
@staticmethod
def getTarget() -> str:
return "baconian"
def attemptCrack(self, ctext: str) -> List[CrackResult]:
"""
Attempts to decode both variants of the Baconian cipher.
"""
logging.debug("Attempting Baconian cracker")
candidates = []
result = []
ctext_decoded = ""
ctext_decoded2 = ""
# Convert to uppercase and replace delimiters and whitespace with nothing
ctext = re.sub(r"[,;:\-\s]", "", ctext.upper())
# Make sure ctext only contains A and B
if bool(re.search(r"[^AB]", ctext)) is True:
logging.debug("Failed to crack baconian due to non baconian character(s)")
return None
# Make sure ctext is divisible by 5
ctext_len = len(ctext)
if ctext_len % 5:
logging.debug(
f"Failed to decode Baconian because length must be a multiple of 5, not '{ctext_len}'"
)
return None
# Split ctext into groups of 5
ctext = " ".join(ctext[i : i + 5] for i in range(0, len(ctext), 5))
ctext_split = ctext.split(" ")
baconian_keys = self.BACONIAN_DICT.keys()
# Decode I=J and U=V variant
for i in ctext_split:
if i in baconian_keys:
ctext_decoded += self.BACONIAN_DICT[i]
# Decode variant that assigns each letter a unique code
for i in ctext_split:
if "+" + i in baconian_keys:
ctext_decoded2 += self.BACONIAN_DICT["+" + i]
candidates.append(ctext_decoded)
candidates.append(ctext_decoded2)
for candidate in candidates:
if candidate != "":
if candidate == candidates[0]:
result.append(CrackResult(value=candidate, key_info="I=J & U=V"))
else:
result.append(CrackResult(value=candidate))
logging.debug(f"Baconian cracker - Returning results: {result}")
return result
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
"dict": ParamSpec(
desc="The Baconian alphabet dictionary to use",
req=False,
default="cipheydists::translate::baconian",
),
}
def __init__(self, config: Config):
super().__init__(config)
self.BACONIAN_DICT = config.get_resource(self._params()["dict"], Translation)
self.expected = config.get_resource(self._params()["expected"])
self.cache = config.cache |
Python | Ciphey/ciphey/basemods/Crackers/caesar.py | """
βββββββββββββββββ βββ ββββββββββββββ βββ
ββββββββββββββββββββββ βββββββββββββββ ββββ
βββ βββββββββββββββββββββββββ βββββββ
βββ ββββββββββ ββββββββββββββ βββββ
ββββββββββββββ βββ βββββββββββ βββ
Β© Brandon Skerritt
Github: brandonskerritt
"""
from distutils import util
from typing import Dict, List, Optional, Union
import cipheycore
import logging
from rich.logging import RichHandler
from ciphey.common import fix_case
from ciphey.iface import Config, Cracker, CrackInfo, CrackResult, ParamSpec, registry
@registry.register
class Caesar(Cracker[str]):
def getInfo(self, ctext: str) -> CrackInfo:
analysis = self.cache.get_or_update(
ctext,
"cipheycore::simple_analysis",
lambda: cipheycore.analyse_string(ctext),
)
return CrackInfo(
success_likelihood=cipheycore.caesar_detect(analysis, self.expected),
# TODO: actually calculate runtimes
success_runtime=1e-5,
failure_runtime=1e-5,
)
@staticmethod
def getTarget() -> str:
return "caesar"
def attemptCrack(self, ctext: str) -> List[CrackResult]:
logging.info(f"Trying caesar cipher on {ctext}")
# Convert it to lower case
#
# TODO: handle different alphabets
if self.lower:
message = ctext.lower()
else:
message = ctext
logging.debug("Beginning cipheycore simple analysis")
# Hand it off to the core
analysis = self.cache.get_or_update(
ctext,
"cipheycore::simple_analysis",
lambda: cipheycore.analyse_string(ctext),
)
logging.debug("Beginning cipheycore::caesar")
possible_keys = cipheycore.caesar_crack(
analysis, self.expected, self.group, self.p_value
)
n_candidates = len(possible_keys)
logging.info(f"Caesar returned {n_candidates} candidates")
if n_candidates == 0:
logging.debug("Filtering for better results")
analysis = cipheycore.analyse_string(ctext, self.group)
possible_keys = cipheycore.caesar_crack(
analysis, self.expected, self.group, self.p_value
)
candidates = []
for candidate in possible_keys:
logging.debug(f"Candidate {candidate.key} has prob {candidate.p_value}")
translated = cipheycore.caesar_decrypt(message, candidate.key, self.group)
candidates.append(
CrackResult(value=fix_case(translated, ctext), key_info=candidate.key)
)
return candidates
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
"group": ParamSpec(
desc="An ordered sequence of chars that make up the caesar cipher alphabet",
req=False,
default="abcdefghijklmnopqrstuvwxyz",
),
"lower": ParamSpec(
desc="Whether or not the ciphertext should be converted to lowercase first",
req=False,
default=True,
),
"p_value": ParamSpec(
desc="The p-value to use for standard frequency analysis",
req=False,
default=0.01,
)
# TODO: add "filter" param
}
def __init__(self, config: Config):
super().__init__(config)
self.lower: Union[str, bool] = self._params()["lower"]
if not isinstance(self.lower, bool):
self.lower = util.strtobool(self.lower)
self.group = list(self._params()["group"])
self.expected = config.get_resource(self._params()["expected"])
self.cache = config.cache
self.p_value = float(self._params()["p_value"]) |
Python | Ciphey/ciphey/basemods/Crackers/hash.py | """
This is Hashbuster but slightly modified to work with Ciphey.
Why reinvent the wheel?
Changes (that I can remember)
* timeout set, as hashbuster took AGES before timeout was set.
https://github.com/s0md3v/Hash-Buster
"""
import re
from typing import Dict, List, Optional
import requests
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Cracker, CrackInfo, CrackResult, ParamSpec, T, registry
thread_count = 4
def alpha(ctext, hashtype):
return None
def beta(ctext, hashtype):
try:
response = requests.get(
"https://hashtoolkit.com/reverse-hash/?hash=", ctext, timeout=5
).text
except requests.exceptions.ReadTimeout as e:
logging.info(f"Beta failed timeout {e}")
match = re.search(r'/generate-hash/?text=.*?"', response)
if match:
return match.group(1)
return None
def gamma(ctext, hashtype):
try:
response = requests.get(
"https://www.nitrxgen.net/md5db/" + ctext, timeout=5
).text
except requests.exceptions.ReadTimeout as e:
logging.info(f"Gamma failed with {e}")
if response:
return response
else:
return None
def delta(ctext, hashtype):
return None
def theta(ctext, hashtype):
try:
response = requests.get(
"https://md5decrypt.net/Api/api.php?hash=%s&hash_type=%s&email=deanna_abshire@proxymail.eu&code=1152464b80a61728"
% (ctext, hashtype),
timeout=5,
).text
except requests.exceptions.ReadTimeout as e:
logging.info(f"Gamma failed with {e}")
if len(response) != 0:
return response
else:
return None
md5 = [gamma, alpha, beta, theta, delta]
sha1 = [alpha, beta, theta, delta]
sha256 = [alpha, beta, theta]
sha384 = [alpha, beta, theta]
sha512 = [alpha, beta, theta]
result = {}
def crack(ctext):
raise "Error Crack is called"
def threaded(ctext):
resp = crack(ctext)
if resp:
print(ctext + " : " + resp)
result[ctext] = resp
@registry.register
class HashBuster(Cracker[str]):
@staticmethod
def getTarget() -> str:
return "hash"
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def priority() -> float:
return 0.05
def getInfo(self, ctext: T) -> CrackInfo:
# TODO calculate these properly
return CrackInfo(
success_likelihood=0.5,
success_runtime=5,
failure_runtime=5,
)
def attemptCrack(self, ctext: T) -> List[CrackResult]:
logging.info("Starting to crack hashes")
result = False
candidates = []
if len(ctext) == 32:
for api in md5:
r = api(ctext, "md5")
if result is not None or r is not None:
logging.debug("MD5 returns True {r}")
candidates.append(result, "MD5")
elif len(ctext) == 40:
for api in sha1:
r = api(ctext, "sha1")
if result is not None and r is not None:
logging.debug("sha1 returns true")
candidates.append(result, "SHA1")
elif len(ctext) == 64:
for api in sha256:
r = api(ctext, "sha256")
if result is not None and r is not None:
logging.debug("sha256 returns true")
candidates.append(result, "SHA256")
elif len(ctext) == 96:
for api in sha384:
r = api(ctext, "sha384")
if result is not None and r is not None:
logging.debug("sha384 returns true")
candidates.append(result, "SHA384")
elif len(ctext) == 128:
for api in sha512:
r = api(ctext, "sha512")
if result is not None and r is not None:
logging.debug("sha512 returns true")
candidates.append(result, "SHA512")
# TODO what the fuck is this code?
logging.debug(f"Hash buster returning {result}")
# TODO add to 5.1 make this return multiple possible candidates
return [CrackResult(value=candidates[0][0], misc_info=candidates[1][1])]
def __init__(self, config: Config):
super().__init__(config) |
Python | Ciphey/ciphey/basemods/Crackers/rot47.py | """
βββββββββββββββββ βββ ββββββββββββββ βββ
ββββββββββββββββββββββ βββββββββββββββ ββββ
βββ βββββββββββββββββββββββββ βββββββ
βββ ββββββββββ ββββββββββββββ βββββ
ββββββββββββββ βββ βββββββββββ βββ
Β© Brandon Skerritt
Github: brandonskerritt
"""
from typing import Dict, List, Optional
import cipheycore
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Cracker, CrackInfo, CrackResult, ParamSpec, registry
@registry.register
class Rot47(Cracker[str]):
def getInfo(self, ctext: str) -> CrackInfo:
analysis = self.cache.get_or_update(
ctext,
"cipheycore::simple_analysis",
lambda: cipheycore.analyse_string(ctext),
)
return CrackInfo(
success_likelihood=cipheycore.caesar_detect(analysis, self.expected),
# TODO: actually calculate runtimes
success_runtime=1e-5,
failure_runtime=1e-5,
)
@staticmethod
def getTarget() -> str:
return "rot47"
def attemptCrack(self, ctext: str) -> List[CrackResult]:
logging.info(f"Trying ROT47 cipher on {ctext}")
logging.debug("Beginning cipheycore simple analysis")
# Hand it off to the core
analysis = self.cache.get_or_update(
ctext,
"cipheycore::simple_analysis",
lambda: cipheycore.analyse_string(ctext),
)
logging.debug("Beginning cipheycore::caesar")
possible_keys = cipheycore.caesar_crack(
analysis, self.expected, self.group, self.p_value
)
n_candidates = len(possible_keys)
logging.info(f"ROT47 returned {n_candidates} candidates")
if n_candidates == 0:
logging.debug("Filtering for better results")
analysis = cipheycore.analyse_string(ctext, self.group)
possible_keys = cipheycore.caesar_crack(
analysis, self.expected, self.group, self.p_value
)
candidates = []
for candidate in possible_keys:
logging.debug(f"Candidate {candidate.key} has prob {candidate.p_value}")
translated = cipheycore.caesar_decrypt(ctext, candidate.key, self.group)
candidates.append(CrackResult(value=translated, key_info=candidate.key))
return candidates
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
"group": ParamSpec(
desc="An ordered sequence of chars that make up the ROT47 cipher alphabet",
req=False,
default="""!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~""",
),
"p_value": ParamSpec(
desc="The p-value to use for standard frequency analysis",
req=False,
default=0.01,
)
# TODO: add "filter" param
}
def __init__(self, config: Config):
super().__init__(config)
self.group = list(self._params()["group"])
self.expected = config.get_resource(self._params()["expected"])
self.cache = config.cache
self.p_value = float(self._params()["p_value"]) |
Python | Ciphey/ciphey/basemods/Crackers/soundex.py | import re
from typing import Dict, List, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import (
Config,
Cracker,
CrackInfo,
CrackResult,
ParamSpec,
Translation,
registry,
)
@registry.register
class Soundex(Cracker[str]):
def getInfo(self, ctext: str) -> CrackInfo:
return CrackInfo(
success_likelihood=0.1,
success_runtime=1e-5,
failure_runtime=1e-5,
)
@staticmethod
def getTarget() -> str:
return "soundex"
def attemptCrack(self, ctext: str) -> List[CrackResult]:
"""
Attempts to crack Soundex by generating all possible combinations.
"""
logging.debug("Attempting Soundex cracker")
word_list = []
sentences = []
result = []
# Convert to uppercase and replace delimiters and whitespace with nothing
ctext = re.sub(r"[,;:\-\s]", "", ctext.upper())
# Make sure ctext contains only A-Z and 0-9
if bool(re.search(r"[^A-Z0-9]", ctext)) is True:
logging.debug("Failed to crack soundex due to non soundex character(s)")
return None
# Make sure ctext is divisible by 4
ctext_len = len(ctext)
if ctext_len % 4:
logging.debug(
f"Failed to decode Soundex because length must be a multiple of 4, not '{ctext_len}'"
)
return None
# Split ctext into groups of 4
ctext = " ".join(ctext[i : i + 4] for i in range(0, len(ctext), 4))
ctext_split = ctext.split(" ")
soundex_keys = self.SOUNDEX_DICT.keys()
# Find all words that correspond to each given soundex code
for code in ctext_split:
if code in soundex_keys:
word_list.append(self.SOUNDEX_DICT[code])
logging.info(f"Possible words for given encoded text: {word_list}")
# Find all possible sentences
self.getSentenceCombo(
word_list,
sentences,
self.frequency_dict,
self.sentence_freq,
self.word_freq,
)
sorted_sentences = self.sortlistwithdict(sentences, self.frequency_dict)
for sentence in sorted_sentences:
result.append(CrackResult(value=sentence))
logging.debug(f"Soundex cracker - Returning results: {result}")
return result
def sortlistwithdict(self, listtosort, hashes):
"""
This function uses the sum of ranks (based on frequency) of each word in each
sentence and sorts them according to it.
"""
return sorted(listtosort, key=lambda x: hashes[x])
def getSentenceCombo(
self, A, sentences, frequency_dict, sentence_freq, word_freq, result="", n=0
):
"""
This function uses recursion to generate a list of sentences from all possible
words for a given set of soundex codes.
"""
logging.debug("Creating all possible sentences from Soundex")
if n == len(A):
sentences.append(result[1:])
for word in result[1:].split():
# Adding the rank of each word to find out the sentence's net frequency
if word in word_freq:
sentence_freq += word_freq.index(word)
# If the word isn't in the frequency list then it's a very uncommon word
# so we add a large number (5000)
else:
sentence_freq += 5000
frequency_dict[result[1:]] = sentence_freq
sentence_freq = 0
return
for word in A[n]:
out = result + " " + word
self.getSentenceCombo(
A, sentences, frequency_dict, sentence_freq, word_freq, out, n + 1
)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The Soundex dictionary to use",
req=False,
default="cipheydists::translate::soundex",
),
"freq": ParamSpec(
desc="The word frequency dictionary to use",
req=False,
default="cipheydists::list::English5000Freq",
),
}
def __init__(self, config: Config):
super().__init__(config)
self.SOUNDEX_DICT = config.get_resource(self._params()["dict"], Translation)
self.word_freq = config.get_resource(self._params()["freq"], Translation)
self.frequency_dict = {}
self.sentence_freq = 0 |
Python | Ciphey/ciphey/basemods/Crackers/vigenere.py | """
βββββββββββββββββ βββ ββββββββββββββ βββ
ββββββββββββββββββββββ βββββββββββββββ ββββ
βββ βββββββββββββββββββββββββ βββββββ
βββ ββββββββββ ββββββββββββββ βββββ
ββββββββββββββ βββ βββββββββββ βββ
Β© Brandon Skerritt
Github: brandonskerritt
"""
from distutils import util
from typing import Dict, List, Optional, Union
import cipheycore
import logging
from rich.logging import RichHandler
from ciphey.common import fix_case
from ciphey.iface import Config, Cracker, CrackInfo, CrackResult, ParamSpec, registry
@registry.register
class Vigenere(Cracker[str]):
def getInfo(self, ctext: str) -> CrackInfo:
if self.keysize is not None:
analysis = self.cache.get_or_update(
ctext,
f"vigenere::{self.keysize}",
lambda: cipheycore.analyse_string(
ctext.lower(), self.keysize, self.group
),
)
val = cipheycore.vigenere_detect(analysis, self.expected)
logging.info(f"Vigenere has likelihood {val}")
return CrackInfo(
success_likelihood=val,
# TODO: actually calculate runtimes
success_runtime=1e-3,
failure_runtime=1e-2,
)
likely_lens = self.cache.get_or_update(
ctext,
"vigenere::likely_lens",
lambda: cipheycore.vigenere_likely_key_lens(
ctext.lower(), self.expected, self.group, self.detect_p_value
),
)
# Filter out the lens that make no sense
likely_lens = [i for i in likely_lens if i.len <= self.max_key_length]
for keysize in likely_lens:
# Store the analysis
analysis = self.cache.get_or_update(
ctext, f"vigenere::{keysize.len}", lambda: keysize.tab
)
if len(likely_lens) == 0:
return CrackInfo(
success_likelihood=0,
# TODO: actually calculate runtimes
success_runtime=2e-3,
failure_runtime=2e-2,
)
logging.info(
f"Vigenere has likelihood {likely_lens[0].p_value} with lens {[i.len for i in likely_lens]}"
)
return CrackInfo(
success_likelihood=likely_lens[0].p_value,
# TODO: actually calculate runtimes
success_runtime=2e-4,
failure_runtime=2e-4,
)
@staticmethod
def getTarget() -> str:
return "vigenere"
def crackOne(
self, ctext: str, analysis: cipheycore.windowed_analysis_res, real_ctext: str
) -> List[CrackResult]:
possible_keys = cipheycore.vigenere_crack(
analysis, self.expected, self.group, self.p_value
)
if len(possible_keys) > self.clamp:
possible_keys = possible_keys[: self.clamp]
logging.debug(
f"Vigenere crack got keys: {[[i for i in candidate.key] for candidate in possible_keys]}"
)
return [
CrackResult(
value=fix_case(
cipheycore.vigenere_decrypt(ctext, candidate.key, self.group),
real_ctext,
),
key_info="".join([self.group[i] for i in candidate.key]),
misc_info=f"p-value was {candidate.p_value}",
)
for candidate in possible_keys[: min(len(possible_keys), 10)]
]
def attemptCrack(self, ctext: str) -> List[CrackResult]:
logging.info("Trying vigenere cipher")
# Convert it to lower case
if self.lower:
message = ctext.lower()
else:
message = ctext
# Analysis must be done here, where we know the case for the cache
if self.keysize is not None:
return self.crackOne(
message,
self.cache.get_or_update(
ctext,
f"vigenere::{self.keysize}",
lambda: cipheycore.analyse_string(
message, self.keysize, self.group
),
),
ctext,
)
arrs = []
likely_lens = self.cache.get_or_update(
ctext,
"vigenere::likely_lens",
lambda: cipheycore.vigenere_likely_key_lens(
message, self.expected, self.group
),
)
possible_lens = [i for i in likely_lens]
possible_lens.sort(key=lambda i: i.p_value)
logging.debug(f"Got possible lengths {[i.len for i in likely_lens]}")
# TODO: work out length
for i in possible_lens:
arrs.extend(
self.crackOne(
message,
self.cache.get_or_update(
ctext,
f"vigenere::{i.len}",
lambda: cipheycore.analyse_string(message, i.len, self.group),
),
ctext,
)
)
logging.info(f"Vigenere returned {len(arrs)} candidates")
return arrs
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
"group": ParamSpec(
desc="An ordered sequence of chars that make up the caesar cipher alphabet",
req=False,
default="abcdefghijklmnopqrstuvwxyz",
),
"lower": ParamSpec(
desc="Whether or not the ciphertext should be converted to lowercase first",
req=False,
default=True,
),
"keysize": ParamSpec(
desc="A key size that should be used. If not given, will attempt to work it out",
req=False,
),
"p_value": ParamSpec(
desc="The p-value to use for windowed frequency analysis",
req=False,
default=0.5,
),
"detect_p_value": ParamSpec(
desc="The p-value to use for the detection of Vigenere length",
req=False,
default=0.01,
),
"clamp": ParamSpec(
desc="The maximum number of candidates that can be returned per key len",
req=False,
default=10,
),
}
def __init__(self, config: Config):
super().__init__(config)
self.lower: Union[str, bool] = self._params()["lower"]
if not isinstance(self.lower, bool):
self.lower = util.strtobool(self.lower)
self.group = list(self._params()["group"])
self.expected = config.get_resource(self._params()["expected"])
self.cache = config.cache
self.keysize = self._params().get("keysize")
if self.keysize is not None:
self.keysize = int(self.keysize)
self.p_value = float(self._params()["p_value"])
self.detect_p_value = float(self._params()["detect_p_value"])
self.clamp = int(self._params()["clamp"])
self.max_key_length = 16 |
Python | Ciphey/ciphey/basemods/Crackers/xandy.py | import re
from typing import Dict, List, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Cracker, CrackInfo, CrackResult, ParamSpec, registry
@registry.register
class Xandy(Cracker[str]):
def getInfo(self, ctext: str) -> CrackInfo:
return CrackInfo(
success_likelihood=0.1,
success_runtime=1e-5,
failure_runtime=1e-5,
)
@staticmethod
def binary_to_ascii(variant):
# Convert the binary string to an integer with base 2
binary_int = int(variant, 2)
byte_number = binary_int.bit_length() + 7 // 8
# Convert the resulting int to a bytearray and then decode it to ASCII text
binary_array = binary_int.to_bytes(byte_number, "big")
try:
ascii_text = binary_array.decode()
logging.debug(f"Found possible solution: {ascii_text[:32]}")
return ascii_text
except UnicodeDecodeError as e:
logging.debug(f"Failed to crack X-Y due to a UnicodeDecodeError: {e}")
return ""
@staticmethod
def getTarget() -> str:
return "xandy"
def attemptCrack(self, ctext: str) -> List[CrackResult]:
"""
Checks an input if it only consists of two or three different letters.
If this is the case, it attempts to regard those letters as
0 and 1 (with the third characters as an optional delimiter) and then
converts it to ASCII text.
"""
logging.debug("Attempting X-Y replacement")
variants = []
candidates = []
result = []
# Convert the ctext to all-lowercase and regex-match & replace all whitespace
ctext = re.sub(r"\s+", "", ctext.lower(), flags=re.UNICODE)
# cset contains every unique value in the ctext
cset = list(set(list(ctext)))
cset_len = len(cset)
if not 1 < cset_len < 4:
# We only consider inputs with two or three unique values
logging.debug(
"Failed to crack X-Y due to not containing two or three unique values"
)
return None
logging.debug(f"String contains {cset_len} unique values: {cset}")
# In case of three unique values, we regard the least frequent character as the delimiter
if cset_len == 3:
# Count each unique character in the set to determine the least frequent one
counting_list = []
for char in cset:
counting_list.append(ctext.count(char))
val, index = min((val, index) for (index, val) in enumerate(counting_list))
delimiter = cset[index]
logging.debug(
f"{delimiter} occurs {val} times and is the probable delimiter"
)
# Remove the delimiter from the ctext and compute new cset
ctext = ctext.replace(delimiter, "")
cset = list(set(list(ctext)))
# Form both variants of the substitution
for i in range(2):
if i:
variants.append(ctext.replace(cset[0], "1").replace(cset[1], "0"))
else:
variants.append(ctext.replace(cset[0], "0").replace(cset[1], "1"))
# Apply function to both variants and strip stray NULL characters
for variant in variants:
candidates.append(self.binary_to_ascii(variant).strip("\x00"))
for i, candidate in enumerate(candidates):
if candidate != "":
keyinfo = f"{cset[0]} -> {i} & {cset[1]} -> {str(int(not i))}"
result.append(CrackResult(value=candidate, key_info=keyinfo))
logging.debug(f"X-Y cracker - Returning results: {result}")
return result
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
)
}
def __init__(self, config: Config):
super().__init__(config)
self.expected = config.get_resource(self._params()["expected"])
self.cache = config.cache |
Python | Ciphey/ciphey/basemods/Crackers/xortool.py | """
βββββββββββββββββ βββ ββββββββββββββ βββ
ββββββββββββββββββββββ βββββββββββββββ ββββ
βββ βββββββββββββββββββββββββ βββββββ
βββ ββββββββββ ββββββββββββββ βββββ
ββββββββββββββ βββ βββββββββββ βββ
Β© Brandon Skerritt
Github: bee-san
"""
from typing import Dict, List, Optional
import logging
from rich.logging import RichHandler
from xortool_ciphey import tool_main
from ciphey.iface import Config, Cracker, CrackInfo, CrackResult, ParamSpec, registry
@registry.register
class XorTool(Cracker[str]):
def getInfo(self, ctext: str) -> CrackInfo:
return CrackInfo(
success_likelihood=0.1,
# TODO: actually calculate runtimes
success_runtime=1e-8,
failure_runtime=1e-8,
)
@staticmethod
def getTarget() -> str:
return "xortool"
def attemptCrack(self, ctext: str) -> List[CrackResult]:
logging.debug("Trying xortool cipher")
# TODO handle different charsets
# TODO allow more config over xortool
logging.debug(f"{ctext}")
# https://github.com/Ciphey/xortool/discussions/4
# for docs on this function
try:
result = tool_main.api(str.encode(ctext))
except:
logging.debug("Xor failed.")
return
result = CrackResult(value=result[1]["Dexored"], key_info=result[0]["keys"])
return [result]
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
"p_value": ParamSpec(
desc="The p-value to use for standard frequency analysis",
req=False,
default=0.01,
),
}
@staticmethod
def score_utility() -> float:
return 1.5
def __init__(self, config: Config):
super().__init__(config)
self.expected = config.get_resource(self._params()["expected"])
self.cache = config.cache
self.p_value = self._params()["p_value"] |
Python | Ciphey/ciphey/basemods/Crackers/__init__.py | from . import (
affine,
ascii_shift,
baconian,
caesar,
rot47,
soundex,
vigenere,
xandy,
xortool,
) |
Python | Ciphey/ciphey/basemods/Decoders/a1z26.py | import re
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class A1z26(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs A1Z26 decoding
"""
logging.debug("Attempting A1Z26")
ctext_converted = []
ctext_split = re.split(r"[ ,;:\-\n]", ctext)
delimiters = set(sorted(re.sub(r"[^ ,;:\-\n]", "", ctext)))
ctext_num = re.sub(r"[,;:\-\s]", "", ctext)
ctext_decoded = ""
if ctext_num.isnumeric() is False:
logging.debug("Failed to decode A1Z26 due to non numeric character(s)")
return None
try:
for i in ctext_split:
val = int(i)
if val > 26 or val < 1:
logging.debug(
f"Failed to decode A1Z26 due to invalid number '{val}'"
)
return None
val2 = int(i) + 96
ctext_converted.append(chr(val2))
ctext_decoded = "".join(ctext_converted)
logging.info(
f"A1Z26 successful, returning '{ctext_decoded}' with delimiter(s) {delimiters}"
)
return ctext_decoded
except Exception:
return None
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "a1z26" |
Python | Ciphey/ciphey/basemods/Decoders/atbash.py | from typing import Dict, Optional
from ciphey.common import fix_case
from ciphey.iface import Config, Decoder, ParamSpec, T, U, WordList, registry
@registry.register
class Atbash(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Takes an encoded string and attempts to decode it according to the Atbash cipher.
The Atbash cipher is a very simple substitution cipher without a key.
It operates by replacing every letter in the input by its 'counterpoint'
in the alphabet. Example: A -> Z, B -> Y, ... , M -> N and vice versa.
"""
result = ""
atbash_dict = {self.ALPHABET[i]: self.ALPHABET[::-1][i] for i in range(26)}
for letter in ctext.lower():
if letter in atbash_dict.keys():
# Match every letter of the input to its atbash counterpoint
result += atbash_dict[letter]
else:
# If the current character is not in the defined alphabet,
# just accept it as-is (useful for numbers, punctuation, etc.)
result += letter
return fix_case(result, ctext)
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.1
def __init__(self, config: Config):
super().__init__(config)
self.ALPHABET = config.get_resource(self._params()["dict"], WordList)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The alphabet used for the atbash operation.",
req=False,
default="cipheydists::list::englishAlphabet",
)
}
@staticmethod
def getTarget() -> str:
return "atbash" |
Python | Ciphey/ciphey/basemods/Decoders/base58_bitcoin.py | from typing import Dict, Optional
import base58
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base58_bitcoin(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base58 (Bitcoin) decoding
"""
try:
return base58.b58decode(ctext).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base58_bitcoin" |
Python | Ciphey/ciphey/basemods/Decoders/base58_flickr.py | from typing import Dict, Optional
import base58
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base58_flickr(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base58 (Flickr) decoding
"""
FLICKR_ALPHABET = b"123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ"
try:
return base58.b58decode(ctext, alphabet=FLICKR_ALPHABET).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base58_flickr" |
Python | Ciphey/ciphey/basemods/Decoders/base58_ripple.py | from typing import Dict, Optional
import base58
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base58_ripple(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base58 (Ripple) decoding
"""
try:
return base58.b58decode(ctext, alphabet=base58.RIPPLE_ALPHABET).decode(
"utf-8"
)
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base58_ripple" |
Python | Ciphey/ciphey/basemods/Decoders/base62.py | from typing import Dict, Optional
import base62
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base62(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base62 decoding
"""
try:
return base62.decodebytes(ctext).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base62" |
Python | Ciphey/ciphey/basemods/Decoders/base64_url.py | import base64
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base64_url(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base64 URL decoding
"""
ctext_padding = ctext + "=" * (4 - len(ctext) % 4)
try:
return base64.urlsafe_b64decode(ctext_padding).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base64_url" |
Python | Ciphey/ciphey/basemods/Decoders/base65536.py | from typing import Dict, Optional
import base65536
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base65536(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base65536 decoding
"""
try:
return base65536.decode(ctext).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base65536" |
Python | Ciphey/ciphey/basemods/Decoders/base69.py | # Translated to Python and adapted for Ciphey from the JS original at https://github.com/pshihn/base69
import re
from math import ceil
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, U, WordList, registry
@registry.register
class Base69(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base69 decoding
"""
# Remove whitespace
try:
ctext = re.sub(r"\s+", "", ctext, flags=re.UNICODE)
extra_bytes = 0
clen = len(ctext)
if ctext[:-1] == "=":
extra_bytes = int(ctext[clen - 2])
CHUNK_COUNT = ceil(clen / 16)
result = [0 for _ in range(CHUNK_COUNT * 7 - extra_bytes)]
for i in range(CHUNK_COUNT):
chunk_string = ctext[i * 16 : (i + 1) * 16]
if extra_bytes and (i == CHUNK_COUNT - 1):
insert = self.decode_chunk(chunk_string)
for n, elem in enumerate(insert[0 : 7 - extra_bytes]):
result[n + i * 7] = elem
else:
insert = self.decode_chunk(chunk_string)
for n, elem in enumerate(insert):
result[n + i * 7] = elem % 256
return bytearray(result).decode().strip("\x00")
except Exception:
return None
def decode_chunk(self, s: str):
padded_bytes = s.endswith("=")
decoded = [0 for _ in range(8)]
for i in range(8):
decoded[i] = (
0
if i == 7 and padded_bytes
else self.chars_to_byte(s[i * 2 : i * 2 + 2])
)
result = [0 for _ in range(7)]
for i in range(7):
t1 = decoded[i] << (i + 1)
t2 = decoded[i + 1] >> (7 - i - 1)
result[i] = t1 | t2
return result
def chars_to_byte(self, s: str):
return (69 * self.CHARS.index(s[1])) + (self.CHARS.index(s[0]))
@staticmethod
def priority() -> float:
# If this becomes lower or equal to the reverse, it breaks.
# So I'll set it to 0.2 for now since it is very fast anyways.
return 0.2
def __init__(self, config: Config):
super().__init__(config)
self.CHARS = config.get_resource(self._params()["dict"], WordList)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The charset used for the decoder.",
req=False,
default="cipheydists::list::base69",
)
}
@staticmethod
def getTarget() -> str:
return "base69" |
Python | Ciphey/ciphey/basemods/Decoders/base91.py | from typing import Dict, Optional
import base91
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base91(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base91 decoding
"""
try:
return base91.decode(ctext).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base91" |
Python | Ciphey/ciphey/basemods/Decoders/bases.py | import base64
import types
from typing import Any, Callable, Optional
import logging
from rich.logging import RichHandler
import re
from ciphey.common import id_lambda
from ciphey.iface import Decoder, registry
def _dispatch(self: Any, ctext: str, func: Callable[[str], bytes]) -> Optional[bytes]:
logging.debug(f"Attempting {self.getTarget()}")
try:
# remove all whitespace
ctext = re.sub(r"\s+", "", ctext, re.UNICODE)
result = func(ctext)
logging.info(f"{self.getTarget()} successful, returning {result}")
return result
except ValueError:
logging.debug(f"Failed to decode {self.getTarget()}")
return None
_bases = {
"base16": (base64.b16decode, 0.4),
"base32": (base64.b32decode, 0.01),
"base64": (base64.b64decode, 0.4),
"base85": (base64.b85decode, 0.01),
"ascii85": (base64.a85decode, 0.1),
}
def gen_class(name, decoder, priority, ns):
ns["_get_func"] = id_lambda(decoder)
ns["decode"] = lambda self, ctext: _dispatch(self, ctext, self._get_func())
ns["getParams"] = id_lambda(None)
ns["getTarget"] = id_lambda(name)
ns["priority"] = id_lambda(priority)
ns["__init__"] = lambda self, config: super(type(self), self).__init__(config)
for name, (decoder, priority) in _bases.items():
t = types.new_class(
name,
(Decoder[str],),
exec_body=lambda x: gen_class(name, decoder, priority, x),
)
registry.register(t) |
Python | Ciphey/ciphey/basemods/Decoders/baudot.py | import re
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
@registry.register
class Baudot(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
result = ""
switch_to_digit_map = 0
if re.search("^[01]{5}$", ctext.split()[0]):
for i in ctext.split():
if i == "11011":
switch_to_digit_map = 1
if i == "11111":
switch_to_digit_map = 0
if switch_to_digit_map == 1:
result += self.BAUDOT_DICT["+" + i]
if switch_to_digit_map == 0:
result += self.BAUDOT_DICT[i]
return result
else:
return None
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
self.BAUDOT_DICT = config.get_resource(self._params()["dict"], Translation)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The baudot alphabet dictionary to use",
req=False,
default="cipheydists::translate::baudot",
)
}
@staticmethod
def getTarget() -> str:
return "baudot" |
Python | Ciphey/ciphey/basemods/Decoders/binary.py | import re
from typing import Dict, List, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Binary(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
try:
ctext = re.sub(r"[^\S \n]", " ", ctext, flags=re.UNICODE)
ctext = ctext.replace("\n", " ")
existing_split = self.try_split(ctext.split(" "))
if existing_split is not None:
return existing_split
# Now we try our own grouping
# Remove final bit of whitespace
ctext = ctext.replace(" ", "")
# Split into bytes, and test
return self.try_split([ctext[i : i + 8] for i in range(0, len(ctext), 8)])
# Catch bad octal chars
except ValueError:
return None
def try_split(self, split_text: List[str]):
ret = []
for i in split_text:
if len(i) == 0:
continue
val = int(i, 2)
if val > 255 or val < 0:
return None
ret.append(val)
if len(ret) != 0:
ret = bytes(ret)
logging.info(f"binary successful, returning {ret.__repr__()}")
return ret
@staticmethod
def priority() -> float:
return 0.3
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "binary" |
Python | Ciphey/ciphey/basemods/Decoders/braille.py | import re
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
import logging
from rich.logging import RichHandler
@registry.register
class Braille(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Braille decoding
"""
logging.debug("Attempting Braille")
ctext_decoded = ""
braille_matches = 0
for symbol in self.BRAILLE_DICT_INV.values():
if symbol in ctext:
braille_matches += 1
else:
continue
if braille_matches == 0:
logging.debug("Failed to decode Braille due to invalid characters")
return None
for pattern, value in self.BRAILLE_DICT.items():
ctext = re.sub(pattern, value, ctext)
wordArr = []
for word in ctext.split(" "):
# If two commas are in front of a word, uppercase the word and remove the comma
if word[:2].find(",,") != -1:
wordArr.append(word.replace(",,", "").upper())
else:
wordArr.append(word)
result = []
for word in wordArr:
# If one comma is in front of a word, capitalize the word and remove the comma
if word[0].find(",") != -1:
result.append(word.replace(",", "").capitalize())
else:
result.append(word)
ctext_decoded = " ".join(result)
logging.info(f"Braille successful, returning '{ctext_decoded}'")
return ctext_decoded
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
self.BRAILLE_DICT = config.get_resource(self._params()["dict"], Translation)
self.BRAILLE_DICT_INV = {v: k for k, v in self.BRAILLE_DICT.items()}
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The Braille dictionary to use",
req=False,
default="cipheydists::translate::braille",
)
}
@staticmethod
def getTarget() -> str:
return "braille" |
Python | Ciphey/ciphey/basemods/Decoders/brainfuck.py | import re
import time
from typing import Dict, Optional, Tuple
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, U, WordList, registry
@registry.register
class Brainfuck(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Takes a ciphertext and treats it as a Brainfuck program,
interpreting it and saving the output as a string to return.
Brainfuck is a very simple, Turing-complete esoteric language.
Below is a simplified interpreter that attempts to check whether a
given ciphertext is a brainfuck program that would output a string.
A program that can be "decoded" like this is one that:
* Does not require user input ("," instruction)
* Includes at least one putchar instruction (".")
* Does not contain anything but the main 7 instructions,
(excluding ",") and whitespace
Details:
* This implementation wraps the memory pointer for ">" and "<"
* It is time-limited to 60 seconds, to prevent hangups
* The program starts with 100 memory cells, chosen arbitrarily
"""
logging.debug("Attempting brainfuck")
result = ""
memory = [0] * 100
codeptr, memptr = 0, 0 # Instruction pointer and stack pointer
timelimit = 60 # The timeout in seconds
bracemap, isbf = self.bracemap_and_check(ctext)
# If it doesn't appear to be valid brainfuck code
if not isbf:
logging.debug("Failed to interpret brainfuck due to invalid characters")
return None
# Get start time
start = time.time()
while codeptr < len(ctext):
current = time.time()
# Return none if we've been running for over a minute
if current - start > timelimit:
logging.debug("Failed to interpret brainfuck due to timing out")
return None
cmd = ctext[codeptr]
if cmd == "+":
if memory[memptr] < 255:
memory[memptr] = memory[memptr] + 1
else:
memory[memptr] = 0
elif cmd == "-":
if memory[memptr] > 0:
memory[memptr] = memory[memptr] - 1
else:
memory[memptr] = 255
elif cmd == ">":
if memptr == len(memory) - 1:
memory.append(0)
memptr += 1
elif cmd == "<":
if memptr == 0:
memptr = len(memory) - 1
else:
memptr -= 1
# If we're at the beginning of the loop and the memory is 0, exit the loop
elif cmd == "[" and memory[memptr] == 0:
codeptr = bracemap[codeptr]
# If we're at the end of the loop and the memory is >0, jmp to the beginning of the loop
elif cmd == "]" and memory[memptr]:
codeptr = bracemap[codeptr]
# Store the output as a string instead of printing it out
elif cmd == ".":
result += chr(memory[memptr])
codeptr += 1
logging.info(f"Brainfuck successful, returning '{result}'")
return result
def bracemap_and_check(self, program: str) -> Tuple[Optional[Dict], bool]:
"""
Create a bracemap of brackets in the program, to compute jmps.
Maps open -> close brackets as well as close -> open brackets.
Also returns True if the program is valid Brainfuck code. If False, we
won't even try to run it.
"""
open_stack = []
bracemap = dict()
legal_instructions = {"+", "-", ">", "<", "[", "]", "."}
legal_count = 0
# If the program actually outputs anything (contains ".")
prints = False
for idx, instruction in enumerate(program):
# If instruction is brainfuck (without input) or whitespace, it counts
if instruction in legal_instructions or re.match(r"\s", instruction):
legal_count += 1
if not prints and instruction == ".":
# If there are no "." instructions then this program will not output anything
prints = True
elif instruction == "[":
open_stack.append(idx)
elif instruction == "]":
try:
opbracket = open_stack.pop()
bracemap[opbracket] = idx
bracemap[idx] = opbracket
except IndexError:
# Mismatched braces, not a valid program
# Closing braces > opening braces
return (None, False)
# 1. All characters are instructions or whitespace
# 2. There are no extra open braces
# 3. There is at least one character to be "printed"
# (result is >=1 in length)
is_brainfuck = legal_count == len(program) and len(open_stack) == 0 and prints
return bracemap, is_brainfuck
@staticmethod
def priority() -> float:
# Not uncommon, but not very common either. It's also slow.
return 0.08
def __init__(self, config: Config):
super().__init__(config)
self.ALPHABET = config.get_resource(self._params()["dict"], WordList)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="Brainfuck alphabet (default English)",
req=False,
default="cipheydists::list::englishAlphabet",
)
}
@staticmethod
def getTarget() -> str:
return "brainfuck" |
Python | Ciphey/ciphey/basemods/Decoders/decimal.py | import re
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Decimal(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Decimal decoding
"""
logging.debug("Attempting decimal")
ctext_converted = []
ctext_split = re.split(r"[ ,;:\-\n]", ctext)
delimiters = set(sorted(re.sub(r"[^ ,;:\-\n]", "", ctext)))
ctext_num = re.sub(r"[,;:\-\s]", "", ctext)
ctext_decoded = ""
if ctext_num.isnumeric() is False:
logging.debug("Failed to decode decimal due to non numeric character(s)")
return None
try:
for i in ctext_split:
val = int(i)
if val > 255 or val < 0:
logging.debug(
f"Failed to decode decimal due to invalid number '{val}'"
)
return None
ctext_converted.append(chr(val))
ctext_decoded = "".join(ctext_converted)
logging.info(
f"Decimal successful, returning '{ctext_decoded}' with delimiter(s) {delimiters}"
)
return ctext_decoded
except Exception:
return None
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "decimal" |
Python | Ciphey/ciphey/basemods/Decoders/dna.py | import re
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
@registry.register
class Dna(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs DNA decoding
"""
logging.debug("Attempting DNA decoder")
ctext_decoded = ""
ctext = re.sub(r"[,;:\-\s]", "", ctext)
ctext = " ".join(ctext[i : i + 3] for i in range(0, len(ctext), 3))
ctext_split = ctext.split(" ")
dna_keys = self.DNA_DICT.keys()
for i in ctext_split:
if i in dna_keys:
ctext_decoded += self.DNA_DICT[i]
else:
return None
logging.info(f"DNA successful, returning '{ctext_decoded}'")
return ctext_decoded
@staticmethod
def priority() -> float:
return 0.2
def __init__(self, config: Config):
super().__init__(config)
self.DNA_DICT = config.get_resource(self._params()["dict"], Translation)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The DNA alphabet dictionary to use",
req=False,
default="cipheydists::translate::dna",
)
}
@staticmethod
def getTarget() -> str:
return "dna" |
Python | Ciphey/ciphey/basemods/Decoders/dtmf.py | import re
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
@registry.register
class Dtmf(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs DTMF decoding
"""
logging.debug("Attempting DTMF decoder")
ctext_decoded = ""
ctext = re.sub(r"[,;:\-\/\s]", "", ctext)
ctext = " ".join(ctext[i : i + 7] for i in range(0, len(ctext), 7))
ctext_split = ctext.split(" ")
dtmf_keys = self.DTMF_DICT.keys()
for i in ctext_split:
if i in dtmf_keys:
ctext_decoded += self.DTMF_DICT[i]
else:
return None
logging.info(f"DTMF successful, returning '{ctext_decoded}'")
return ctext_decoded
@staticmethod
def priority() -> float:
return 0.2
def __init__(self, config: Config):
super().__init__(config)
self.DTMF_DICT = config.get_resource(self._params()["dict"], Translation)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The DTMF alphabet dictionary to use",
req=False,
default="cipheydists::translate::dtmf",
)
}
@staticmethod
def getTarget() -> str:
return "dtmf" |
Python | Ciphey/ciphey/basemods/Decoders/galactic.py | from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
@registry.register
class Galactic(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Takes a string written in the 'Standard Galactic Alphabet'
(aka Minecraft Enchanting Table Symbols) and translates it to ASCII text.
"""
logging.debug("Attempting Standard Galactic Alphabet decoder")
# To avoid complications, only move forward with the decoding if we can
# reasonably assume that the input string is written in the galactic alphabet
galactic_matches = 0
for symbol in self.GALACTIC_DICT.keys():
# These symbols are assumed to be frequent enough in regular
# text to be skipped when counting the matches. All others are counted.
if symbol in ctext and symbol not in ["!", "|"]:
galactic_matches += 1
else:
continue
if galactic_matches == 0:
logging.debug(
"No matching galactic alphabet letters found. Skipping galactic decoder"
)
return None
logging.debug(f"{galactic_matches} galactic alphabet letters found. ")
result = ""
# Take out the problematic characters consisting of multiple symbols
ctext = (
ctext.replace("||", "|")
.replace("/", "")
.replace("Β‘", "")
.replace(" Μ£ ", "")
.replace("Μ", "x")
)
logging.debug(f"Modified string is {ctext}")
for letter in ctext:
if letter in self.GALACTIC_DICT.keys():
# Match every letter of the input to its galactic counterpoint
result += self.GALACTIC_DICT[letter]
else:
# If the current character is not in the defined alphabet,
# just accept it as-is (useful for numbers, punctuation, etc.)
result += letter
# Remove the trailing space (appearing as a leading space)
# from the x that results from the diacritic replacement
result = result.replace("x ", "x")
logging.debug(f"Decoded string is {result}")
return result
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.01
def __init__(self, config: Config):
super().__init__(config)
self.GALACTIC_DICT = config.get_resource(self._params()["dict"], Translation)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The galactic alphabet dictionary to use",
req=False,
default="cipheydists::translate::galactic",
)
}
@staticmethod
def getTarget() -> str:
return "galactic" |
Python | Ciphey/ciphey/basemods/Decoders/gzip.py | import zlib
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Gzip(Decoder[bytes]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Gzip decoding
"""
try:
return zlib.decompress(ctext, 16 + zlib.MAX_WBITS).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "gzip" |
Python | Ciphey/ciphey/basemods/Decoders/hexadecimal.py | from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Hexadecimal(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Hexadecimal decoding
"""
ctext_decoded = ""
try:
ctext_decoded = bytearray.fromhex(ctext).decode("utf-8")
return ctext_decoded
except Exception:
return None
@staticmethod
def priority() -> float:
return 0.015
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "hexadecimal" |
Python | Ciphey/ciphey/basemods/Decoders/leetspeak.py | from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
@registry.register
class Leetspeak(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
for src, dst in self.translate.items():
ctext = ctext.replace(src, dst)
return ctext
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
self.translate = config.get_resource(self._params()["dict"], Translation)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The leetspeak dictionary to use",
req=False,
default="cipheydists::translate::leet",
)
}
@staticmethod
def getTarget() -> str:
return "leetspeak" |
Ciphey/ciphey/basemods/Decoders/letters.archive | """
Not yet implemented.
"""
class letters:
"""Deals with Nato Strings / first letter of every word"""
def __init__(self):
None
def __name__(self):
return "Letters"
def decrypt(self, text: str) -> dict:
return text
def first_letter_every_word(self, text):
"""
This should be supplied a string like "hello my name is"
"""
text = text.split(".")
new_text = []
for sentence in text:
for word in sentence.split(" "):
new_text.append(word[0])
# Applies a space after every sentence
# which might be every word
new_text.append(" ") |
|
Python | Ciphey/ciphey/basemods/Decoders/morse_code.py | from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
@registry.register
class Morse_code(Decoder[str]):
# A priority list for char/word boundaries
BOUNDARIES = {" ": 1, "/": 2, "\n": 3}
PURGE = {ord(c): None for c in BOUNDARIES.keys()}
MAX_PRIORITY = 3
ALLOWED = {".", "-", " ", "/", "\n"}
MORSE_CODE_DICT: Dict[str, str]
MORSE_CODE_DICT_INV: Dict[str, str]
def decode(self, ctext: T) -> Optional[U]:
logging.debug("Attempting Morse code decoder")
char_boundary = word_boundary = None
char_boundary = word_boundary = None
char_priority = word_priority = 0
# Custom loop allows early break
for i in ctext:
i_priority = self.BOUNDARIES.get(i)
if i_priority is None:
if i in self.ALLOWED:
continue
logging.debug(f"Non-morse char '{i}' found")
return None
if i_priority <= char_priority or i == char_boundary or i == word_boundary:
continue
# Default to having a char boundary over a word boundary
if (
i_priority > word_priority
and word_boundary is None
and char_boundary is not None
):
word_priority = i_priority
word_boundary = i
continue
char_priority = i_priority
char_boundary = i
logging.debug(
f"Char boundary is unicode {ord(char_boundary)}, and word boundary is unicode {ord(word_boundary) if word_boundary is not None else None}"
)
result = ""
for word in ctext.split(word_boundary) if word_boundary else [ctext]:
logging.debug(f"Attempting to decode word {word}")
for char in word.split(char_boundary):
char = char.translate(self.PURGE)
if len(char) == 0:
continue
try:
m = self.MORSE_CODE_DICT_INV[char]
except KeyError:
logging.debug(f"Invalid codeword '{char}' found")
return None
result = result + m
# after every word add a space
result = result + " "
if len(result) == 0:
logging.debug("Morse code failed to match")
return None
# Remove trailing space
result = result[:-1]
logging.info(f"Morse code successful, returning {result}")
return result.strip().upper()
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
self.MORSE_CODE_DICT = config.get_resource(self._params()["dict"], Translation)
self.MORSE_CODE_DICT_INV = {v: k for k, v in self.MORSE_CODE_DICT.items()}
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The morse code dictionary to use",
req=False,
default="cipheydists::translate::morse",
)
}
@staticmethod
def getTarget() -> str:
return "morse_code" |
Python | Ciphey/ciphey/basemods/Decoders/multi_tap.py | from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Multi_tap(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
result = ""
for x in ctext.split():
if x == self.SPACE_DIGIT: # Check if it's a space
result += " "
elif not Multi_tap.valid_code_part(x):
return None
else:
result += self.decode_num_to_char(x)
return result
@staticmethod
def valid_code_part(code: str) -> bool:
if not code.isdigit():
return False
# if not all the digits are the same
if not Multi_tap.is_all_dup(code):
return False
if int(code[0]) not in range(2, 10):
return False
if len(code) > 4:
return False
return True
@staticmethod
def decode_num_to_char(number: str) -> str:
index = Multi_tap.calculate_index(number)
return Multi_tap.number_index_to_char(index)
@staticmethod
def is_all_dup(code):
return len(set(code)) == 1
@staticmethod
def calculate_index(number: str) -> int:
first_number_as_int = int(number[0])
number_index = Multi_tap.get_index_from_first_digit(first_number_as_int)
# Add to index the number of the char : "22" -> index += 1
num_rest_numbers = len(number) - 1
number_index += num_rest_numbers
return number_index
@staticmethod
def number_index_to_char(index_number: int) -> str:
start_ascii_value = ord("A")
return chr(start_ascii_value + index_number)
@staticmethod
def get_index_from_first_digit(first_digit: int) -> int:
number_index = 0
if first_digit >= 8: # s have 4 chars
number_index += 1
first_digit -= 2 # start in 200
number_index += first_digit * 3 # jump 3 every time
return number_index
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
self.SPACE_DIGIT = "0"
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "multi_tap" |
Python | Ciphey/ciphey/basemods/Decoders/octal.py | from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Octal(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Octal decoding
"""
str_converted = []
octal_seq = ctext.split(" ")
if len(octal_seq) == 1:
# Concatted octal must be formed of octal triplets
if len(ctext) % 3 != 0:
return None
octal_seq = [ctext[i : i + 3] for i in range(0, len(ctext), 3)]
logging.debug(f"Trying chunked octal {octal_seq}")
try:
for octal_char in octal_seq:
if len(octal_char) > 3:
logging.debug("Octal subseq too long")
return None
n = int(octal_char, 8)
if (
n < 0
): # n cannot be greater than 255, as we checked that with the earlier length check
logging.debug(f"Non octal char {octal_char}")
return None
str_converted.append(n)
return bytes(str_converted)
# Catch bad octal chars
except ValueError:
return None
@staticmethod
def priority() -> float:
return 0.025
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "octal" |
Python | Ciphey/ciphey/basemods/Decoders/reverse.py | from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Reverse(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
return ctext[::-1]
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "reverse" |
Python | Ciphey/ciphey/basemods/Decoders/tap_code.py | # by https://github.com/RustyDucky and https://github.com/lukasgabriel
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
@registry.register
class Tap_code(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Tap code decoding
"""
try:
result = ""
combinations = ctext.split(" ")
for fragment in combinations:
result += self.TABLE.get(fragment)
return result
except Exception:
return None
@staticmethod
def priority() -> float:
return 0.06
def __init__(self, config: Config):
super().__init__(config)
self.TABLE = config.get_resource(self._params()["dict"], Translation)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The table of letters used for the tap code interpretation.",
req=False,
default="cipheydists::translate::tap_code",
)
}
@staticmethod
def getTarget() -> str:
return "tap_code" |
Python | Ciphey/ciphey/basemods/Decoders/unicode.py | from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Utf8(Decoder[bytes]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs UTF-8 decoding
"""
logging.debug("Attempting UTF-8 decoder")
result = ""
try:
result = ctext.decode("utf-8")
if result != ctext:
logging.info(f"UTF-8 successful, returning '{result}'")
return result
else:
return None
except Exception:
return None
@staticmethod
def priority() -> float:
return 0.9
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "utf8" |
Python | Ciphey/ciphey/basemods/Decoders/url.py | from typing import Dict, Optional
from urllib.parse import unquote_plus
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Url(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs URL decoding
"""
logging.debug("Attempting URL")
result = ""
try:
result = unquote_plus(ctext, errors="strict")
if result != ctext:
logging.info(f"URL successful, returning '{result}'")
return result
else:
return None
except Exception:
logging.debug("Failed to decode URL")
return None
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "url" |
Python | Ciphey/ciphey/basemods/Decoders/uuencode.py | from binascii import a2b_uu
from codecs import decode
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Uuencode(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
UUEncode (Unix to Unix Encoding) is a symmetric encryption
based on conversion of binary data (split into 6-bit blocks) into ASCII characters.
This function decodes the input string 'ctext' if it has been encoded using 'uuencoder'
It will return None otherwise
"""
logging.debug("Attempting UUencode")
result = ""
try:
# UUencoded messages may begin with prefix "begin" and end with suffix "end"
# In that case, we use the codecs module in Python
ctext_strip = ctext.strip()
if ctext_strip.startswith("begin") and ctext_strip.endswith("end"):
result = decode(bytes(ctext, "utf-8"), "uu").decode()
else:
# If there isn't a "being" prefix and "end" suffix, we use the binascii module instead
# It is possible that the ctext has multiple lines, so convert each line and append
ctext_split = list(filter(None, ctext.splitlines()))
for _, value in enumerate(ctext_split):
result += a2b_uu(value).decode("utf-8")
logging.info(f"UUencode successful, returning '{result}'")
return result
except Exception:
logging.debug("Failed to decode UUencode")
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "uuencode" |
Python | Ciphey/ciphey/basemods/Decoders/z85.py | from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from zmq.utils import z85
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Z85(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Z85 decoding
"""
ctext_len = len(ctext)
if ctext_len % 5:
logging.debug(
f"Failed to decode Z85 because length must be a multiple of 5, not '{ctext_len}'"
)
return None
try:
return z85.decode(ctext).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "z85" |
Python | Ciphey/ciphey/basemods/Decoders/__init__.py | from . import (
a1z26,
atbash,
base58_bitcoin,
base58_ripple,
base62,
base69,
base91,
bases,
baudot,
binary,
braille,
brainfuck,
decimal,
dna,
dtmf,
galactic,
gzip,
hexadecimal,
leetspeak,
morse_code,
multi_tap,
octal,
reverse,
tap_code,
unicode,
url,
uuencode,
) |
Python | Ciphey/ciphey/basemods/Resources/cipheydists.py | from functools import lru_cache
from typing import Any, Dict, Optional, Set
import cipheydists
import logging
from ciphey.iface import (
Config,
Distribution,
ParamSpec,
ResourceLoader,
Translation,
WordList,
registry,
)
@registry.register_multi(WordList, Distribution, Translation)
class CipheyDists(ResourceLoader):
# _wordlists: Set[str] = frozenset({"english", "english1000", "englishStopWords"})
# _brandons: Set[str] = frozenset({"english"})
# _dists: Set[str] = frozenset({"twist"})
# _translates: Set[str] = frozenset({"morse"})
_getters = {
"list": cipheydists.get_list,
"dist": cipheydists.get_dist,
"brandon": cipheydists.get_brandon,
"translate": cipheydists.get_translate,
}
def whatResources(self) -> Optional[Set[str]]:
pass
@lru_cache()
def getResource(self, name: str) -> Any:
logging.debug(f"Loading cipheydists resource {name}")
prefix, name = name.split("::", 1)
return self._getters[prefix](name)
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None |
Python | Ciphey/ciphey/basemods/Resources/files.py | import csv
import json
from functools import lru_cache
from typing import Dict, Generic, Optional, Set
from ciphey.iface import (
Config,
Distribution,
ParamSpec,
ResourceLoader,
T,
WordList,
registry,
)
# We can use a generic resource loader here, as we can instantiate it later
@registry.register_multi(WordList, Distribution)
class Json(ResourceLoader):
def whatResources(self) -> T:
return self._names
@lru_cache()
def getResource(self, name: str) -> T:
prefix, name = name.split("::", 1)
return {"wordlist": (lambda js: {js}), "dist": (lambda js: js)}[prefix](
json.load(open(self._paths[int(name) - 1]))
)
@staticmethod
def getName() -> str:
return "json"
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {"path": ParamSpec(req=True, desc="The path to a JSON file", list=True)}
def __init__(self, config: Config):
super().__init__(config)
self._paths = self._params()["path"]
self._names = set(range(1, len(self._paths)))
# We can use a generic resource loader here, as we can instantiate it later
@registry.register_multi(WordList, Distribution)
class Csv(Generic[T], ResourceLoader[T]):
def whatResources(self) -> Set[str]:
return self._names
@lru_cache()
def getResource(self, name: str) -> T:
prefix, name = name.split("::", 1)
return {
"wordlist": (lambda reader: {i[0] for i in reader}),
"dist": (lambda reader: {i[0]: float(i[1]) for i in reader}),
}[prefix](csv.reader(open(self._paths[int(name) - 1])))
@staticmethod
def getName() -> str:
return "csv"
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {"path": ParamSpec(req=True, desc="The path to a CSV file", list=True)}
def __init__(self, config: Config):
super().__init__(config)
self._paths = self._params()["path"]
self._names = set(range(1, len(self._paths))) |
Python | Ciphey/ciphey/basemods/Searchers/astar.py | import cipheycore
class Node:
"""
A node has a value associated with it
Calculated from the heuristic
"""
def __init__(
self,
config,
h: float = None,
edges: (any, float) = None,
ctext: str = None,
):
self.weight = h
# Edges is a list of other nodes it can connect to
self.edges = edges
self.ctext = ctext
self.h = h
self.path = []
self.information_content = config.cache.get_or_update(
self.text,
"cipheycore::info_content",
lambda: cipheycore.info_content(self.ctext),
)
def __le__(self, node2):
# if self is less than other
return self.x <= node2.x
def __lt__(self, node2):
return self.x < node2.x
def append_edge(self, edge):
self.edges.append(edge)
def get_edges(self):
return self.edges
class Graph:
# example of adjacency list (or rather map)
# adjacency_list = {
# 'A': [('B', 1), ('C', 3), ('D', 7)],
# 'B': [('D', 5)],
# 'C': [('D', 12)]
# }
def __init__(self, adjacency_list):
"""
adjacency list: basically the graph
"""
self.adjacency_list = adjacency_list
self.original_input = cipheycore.info_content(input)
def get_neighbors(self, v):
try:
return self.adjacency_list[v]
except KeyError:
# If we have exhausted the adjacency list
return []
# heuristic function with equal values for all nodes
def heuristic(self, n: Node):
return n.info_content / self.original_input
def a_star_algorithm(self, start_node: Node, stop_node: Node):
# TODO store the graph as an attribute
# open_list is a list of nodes which have been visited, but who's neighbors
# haven't all been inspected, starts off with the start node
# closed_list is a list of nodes which have been visited
# and who's neighbors have been inspected
open_list = set([start_node])
closed_list = set([])
# g contains current distances from start_node to all other nodes
# the default value (if it's not found in the map) is +infinity
g = {}
g[start_node] = 0
# parents contains an adjacency map of all nodes
parents = {}
parents[start_node] = start_node
while len(open_list) > 0:
print(f"The open list is {open_list}")
n = None
# find a node with the lowest value of f() - evaluation function
for v in open_list:
# TODO if v == decoder, run the decoder
print(f"The for loop node v is {v}")
if n is None or g[v] + self.h(v) < g[n] + self.h(n):
n = v
print(f"The value of n is {n}")
if n is None:
print("Path does not exist!")
return None
# if the current node is the stop_node
# then we begin reconstructin the path from it to the start_node
# NOTE Uncomment this for an exit condition
# TODO Make it exit if decrypter returns True
# TODO We need to append the decryption methods to each node
# So when we reconstruct the path we can reconstruct the decryptions
# used
if n == stop_node:
print("n is the stop node, we are stopping!")
reconst_path = []
while parents[n] != n:
reconst_path.append(n)
n = parents[n]
reconst_path.append(start_node)
reconst_path.reverse()
print("Path found: {}".format(reconst_path))
return reconst_path
print(n)
for (m, weight) in self.get_neighbors(n):
print(f"And the iteration is ({m}, {weight})")
# if the current node isn't in both open_list and closed_list
# add it to open_list and note n as it's parent
if m not in open_list and m not in closed_list:
open_list.add(m)
parents[m] = n
g[m] = g[n] + weight
# otherwise, check if it's quicker to first visit n, then m
# and if it is, update parent data and g data
# and if the node was in the closed_list, move it to open_list
else:
if g[m] > g[n] + weight:
g[m] = g[n] + weight
parents[m] = n
if m in closed_list:
closed_list.remove(m)
open_list.add(m)
# remove n from the open_list, and add it to closed_list
# because all of his neighbors were inspected
# open_list.remove(node)
# closed_list.add(node)
open_list.remove(n)
closed_list.add(n)
print("\n")
print("Path does not exist!")
return None
adjacency_list = {
"A": [("B", 1), ("C", 3), ("D", 7)],
"B": [("D", 5)],
"C": [("D", 12)],
}
A = Node(1)
B = Node(7)
C = Node(9)
D = Node(16)
A.edges = [(B, 1), (C, 3), (D, 7)]
B.edges = [(D, 5)]
C.edges = [(D, 12)]
# TODO use a dictionary comprehension to make this
adjacency_list = {
A: A.edges,
B: B.edges,
C: C.edges,
}
graph1 = Graph(adjacency_list)
graph1.a_star_algorithm(A, D)
"""
Maybe after it
""" |
Markdown | Ciphey/ciphey/basemods/Searchers/atar.md | function reconstruct_path(cameFrom, current)
total_path := {current}
while current in cameFrom.Keys:
current := cameFrom[current]
total_path.prepend(current)
return total_path
// A* finds a path from start to goal.
// h is the heuristic function. h(n) estimates the cost to reach goal from node n.
function A_Star(graph, start, h)
// The set of discovered nodes that may need to be (re-)expanded.
// Initially, only the start node is known.
// This is usually implemented as a min-heap or priority queue rather than a hash-set.
openSet := {start}
// For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start
// to n currently known.
cameFrom := an empty map
// For node n, gScore[n] is the cost of the cheapest path from start to n currently known.
gScore := map with default value of Infinity
gScore[start] := 0
// For node n, fScore[n] := gScore[n] + h(n). fScore[n] represents our current best guess as to
// how short a path from start to finish can be if it goes through n.
fScore := map with default value of Infinity
fScore[start] := h(start)
// the exit condition is set to True when LC returns True
exit_condition = False
while not exit_condition
// This operation can occur in O(1) time if openSet is a min-heap or a priority queue
current := the node in openSet having the lowest fScore[] value
if current = goal
return reconstruct_path(cameFrom, current)
openSet.Remove(current)
for each neighbor of current
decodings = neighbor.decoders()
// d(current,neighbor) is the weight of the edge from current to neighbor
// tentative_gScore is the distance from start to the neighbor through current
tentative_gScore := gScore[current] + d(current, neighbor)
if tentative_gScore < gScore[neighbor]
// This path to neighbor is better than any previous one. Record it!
cameFrom[neighbor] := current
gScore[neighbor] := tentative_gScore
fScore[neighbor] := gScore[neighbor] + h(neighbor)
if neighbor not in openSet
openSet.add(neighbor)
# run the cracker on the object
crack(node.ctext)
if crack:
# if cracker returns true, reconstruct path and exiti
exit_condition = True
reconstruct(start, node)
else:
# else add the new children of the cracker to openSet
openSet.append(node: crack)
// Open set is empty but goal was never reached
return failure
function reconstruct_path(cameFrom, current)
total_path := {current}
while current in cameFrom.Keys:
current := cameFrom[current]
total_path.prepend(current)
return total_path
// A* finds a path from start to goal.
// h is the heuristic function. h(n) estimates the cost to reach goal from node n.
function A_Star(graph, start, h)
// The set of discovered nodes that may need to be (re-)expanded.
// Initially, only the start node is known.
// This is usually implemented as a min-heap or priority queue rather than a hash-set.
openSet := {start}
// For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start
// to n currently known.
cameFrom := an empty map
// For node n, gScore[n] is the cost of the cheapest path from start to n currently known.
gScore := map with default value of Infinity
gScore[start] := 0
// For node n, fScore[n] := gScore[n] + h(n). fScore[n] represents our current best guess as to
// how short a path from start to finish can be if it goes through n.
fScore := map with default value of Infinity
fScore[start] := h(start)
// the exit condition is set to True when LC returns True
exit_condition = False
while not exit_condition
// This operation can occur in O(1) time if openSet is a min-heap or a priority queue
current := the node in openSet having the lowest fScore[] value
if current = goal
return reconstruct_path(cameFrom, current)
openSet.Remove(current)
for each neighbor of current
decodings = neighbor.decoders()
// d(current,neighbor) is the weight of the edge from current to neighbor
// tentative_gScore is the distance from start to the neighbor through current
tentative_gScore := gScore[current] + d(current, neighbor)
if tentative_gScore < gScore[neighbor]
// This path to neighbor is better than any previous one. Record it!
cameFrom[neighbor] := current
gScore[neighbor] := tentative_gScore
fScore[neighbor] := gScore[neighbor] + h(neighbor)
if neighbor not in openSet
openSet.add(neighbor)
# run the cracker on the object
crack(node.ctext)
if crack:
# if cracker returns true, reconstruct path and exiti
exit_condition = True
reconstruct(start, node)
else:
# else add the new children of the cracker to openSet
openSet.append(node: crack)
// Open set is empty but goal was never reached
function calculate_new_children(node):
class Node:
"""
A node has a value associated with it
Calculated from the heuristic
"""
def __init__(self, h: float = None, edges: (any, float) = None, ctext: str = None):
self.weight = h
# Edges is a list of other nodes it can connect to
self.edges = edges
self.ctext = ctext
self.h = h
self.path = []
self.information_content = config.cache.get_or_update(
self.ctext,
"cipheycore::info_content",
lambda: cipheycore.info_content(self.ctext),
)
def __le__(self, node2):
# if self is less than other
return self.x <= node2.x
def __lt__(self, node2):
return self.x < node2.x
def append_edge(self, edge):
self.edges.append(edge)
def get_edges(self):
return self.edges |
Python | Ciphey/ciphey/basemods/Searchers/ausearch.py | import bisect
import distutils
import math
from copy import copy
from dataclasses import dataclass
from functools import lru_cache
from typing import Any, Dict, Generic, List, Optional, TypeVar, Union
import logging
from rich.logging import RichHandler
from ciphey.iface import (
Checker,
Config,
Cracker,
CrackInfo,
CrackResult,
Decoder,
ParamSpec,
Searcher,
SearchLevel,
SearchResult,
T,
registry,
)
"""
We are using a tree structure here, because that makes searching and tracing back easier
As such, when we encounter another possible parent, we remove that edge
"""
class DuplicateNode(Exception):
pass
@dataclass
class AuSearchSuccessful(Exception):
target: "Node"
info: str
@dataclass
class Node:
# The root has no parent edge
level: SearchLevel
parent: Optional["Edge"] = None
depth: int = 0
@staticmethod
def decoding(
config: Config, route: Union[Cracker, Decoder], result: Any, source: "Node"
) -> "Node":
if not config.cache.mark_ctext(result):
raise DuplicateNode()
checker: Checker = config.objs["checker"]
ret = Node(
parent=None,
level=SearchLevel(
name=type(route).__name__.lower(), result=CrackResult(value=result)
),
depth=source.depth + 1,
)
edge = Edge(source=source, route=route, dest=ret)
ret.parent = edge
check_res = checker(result)
if check_res is not None:
raise AuSearchSuccessful(target=ret, info=check_res)
return ret
@staticmethod
def cracker(config: Config, edge_template: "Edge", result: CrackResult) -> "Node":
if not config.cache.mark_ctext(result.value):
raise DuplicateNode()
checker: Checker = config.objs["checker"]
# Edges do not directly contain containers, so this is fine
edge = copy(edge_template)
ret = Node(
parent=edge,
level=SearchLevel(name=type(edge.route).__name__.lower(), result=result),
depth=edge.source.depth + 1,
)
edge.dest = ret
check_res = checker(result.value)
if check_res is not None:
raise AuSearchSuccessful(target=ret, info=check_res)
return ret
@staticmethod
def root(config: Config, ctext: Any):
if not config.cache.mark_ctext(ctext):
raise DuplicateNode()
return Node(parent=None, level=SearchLevel.input(ctext))
def get_path(self):
if self.parent is None:
return [self.level]
return self.parent.source.get_path() + [self.level]
@dataclass
class AusearchEdge:
# TODO: This is just CrackInfo with failure probability added...
success_probability: float
failure_probability: float
success_time: float
failure_time: float
def __init__(self, success_probability, success_time, failure_time):
self.success_probability = success_probability
self.failure_probability = 1.0 - success_probability
self.success_time = success_time
self.failure_time = failure_time
@dataclass
class AusearchResult:
weight: float
index: int
def calculate_score(info: CrackInfo):
return info.success_likelihood / \
(info.success_runtime * info.success_likelihood + info.failure_runtime * (1-info.success_likelihood))
@dataclass
class Edge:
source: Node
route: Union[Cracker, Decoder]
dest: Optional[Node] = None
# Info is not filled in for Decoders
score: Optional[float] = None
PriorityType = TypeVar("PriorityType")
class PriorityWorkQueue(Generic[PriorityType, T]):
_sorted_priorities: List[PriorityType]
_queues: Dict[Any, List[T]]
def add_work(self, priority: PriorityType, work: List[T]) -> None:
logging.debug(f"""Adding work at depth {priority}""")
idx = bisect.bisect_left(self._sorted_priorities, priority)
if (
idx == len(self._sorted_priorities)
or self._sorted_priorities[idx] != priority
):
self._sorted_priorities.insert(idx, priority)
self._queues.setdefault(priority, []).extend(work)
def get_work(self) -> T:
best_priority = self._sorted_priorities[0]
target = self._queues[best_priority]
ret = target.pop(0)
if len(target) == 0:
self._sorted_priorities.pop()
return ret
def get_work_chunk(self) -> List[T]:
"""Returns the best work for now"""
if len(self._sorted_priorities) == 0:
return []
best_priority = self._sorted_priorities.pop(0)
return self._queues.pop(best_priority)
def empty(self):
return len(self._sorted_priorities) == 0
def __init__(self):
self._sorted_priorities = []
self._queues = {}
@registry.register
class AuSearch(Searcher):
# Deeper paths get done later
work: PriorityWorkQueue[int, Edge]
@staticmethod
def get_crackers_for(t: type):
return registry[Cracker[t]]
@lru_cache() # To save extra sorting
def get_decoders_for(self, t: type):
ret = registry[Decoder[t]]
ret.sort(key=lambda x: x.priority(), reverse=True)
return ret
# def expand(self, edge: Edge) -> List[Edge]:
# """Evaluates the destination of the given, and adds its child edges to the pool"""
# edge.dest = Node(parent=edge, level=edge.route(edge.source.level.result.value))
def expand_crackers(self, node: Node) -> None:
if node.depth >= self.max_cipher_depth:
return
res = node.level.result.value
additional_work = []
for i in self.get_crackers_for(type(res)):
inst = self._config()(i)
info = inst.getInfo(res)
if info.success_likelihood < self.p_threshold:
continue
additional_work.append(
Edge(source=node, route=inst, score=calculate_score(inst.getInfo(res)))
)
priority = min(node.depth, self.priority_cap)
if self.invert_priority:
priority = -priority
self.work.add_work(priority, additional_work)
def expand_decodings(self, node: Node) -> None:
val = node.level.result.value
for decoder in self.get_decoders_for(type(val)):
inst = self._config()(decoder)
res = inst(val)
if res is None:
continue
try:
new_node = Node.decoding(
config=self._config(), route=inst, result=res, source=node
)
except DuplicateNode:
continue
logging.debug("Nesting encodings")
self.recursive_expand(new_node, False)
def recursive_expand(self, node: Node, nested: bool = True) -> None:
if node.depth >= self.max_depth:
return
logging.debug(f"Expanding depth {node.depth}")
self.expand_decodings(node)
# Doing this last allows us to catch simple nested encodings faster
if not nested or self.enable_nested:
self.expand_crackers(node)
def search(self, ctext: Any) -> Optional[SearchResult]:
logging.debug(
f"""Beginning AuSearch with {"inverted" if self.invert_priority else "normal"} priority"""
)
try:
root = Node.root(self._config(), ctext)
except DuplicateNode:
return None
check_res = self._config().objs["checker"](ctext)
if check_res is not None:
return SearchResult(check_res=check_res, path=[root.level])
try:
self.recursive_expand(root, False)
while True:
if self.work.empty():
break
# Get the highest level result
chunk = self.work.get_work_chunk()
chunk.sort(key=lambda i: i.score)
# Work through all of this level's results
while len(chunk) != 0:
logging.debug(f"{len(chunk)} remaining on this level")
# TODO Cyclic uses some tricky C++ here
# I know because it's sorted the one at the back (the anti-weight)
# is the most likely
edge: Edge = chunk.pop(-1)
# Expand the node
res = edge.route(edge.source.level.result.value)
if res is None:
continue
for i in res:
try:
node = Node.cracker(
config=self._config(), edge_template=edge, result=i
)
self.recursive_expand(node)
except DuplicateNode:
continue
except AuSearchSuccessful as e:
logging.info("AuSearch succeeded")
return SearchResult(path=e.target.get_path(), check_res=e.info)
logging.info("AuSearch failed")
def __init__(self, config: Config):
super().__init__(config)
self._checker: Checker = config.objs["checker"]
self.work = PriorityWorkQueue() # Has to be defined here because of sharing
self.invert_priority = bool(
distutils.util.strtobool(self._params()["invert_priority"])
)
self.priority_cap = int(self._params()["priority_cap"])
self.enable_nested = bool(
distutils.util.strtobool(self._params()["enable_nested"])
)
self.max_cipher_depth = int(self._params()["max_cipher_depth"])
if self.max_cipher_depth == 0:
self.max_cipher_depth = math.inf
self.max_depth = int(self._params()["max_depth"])
if self.max_depth == 0:
self.max_depth = math.inf
self.p_threshold = float(self._params()["p_threshold"])
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"enable_nested": ParamSpec(
req=False,
desc="Enables nested ciphers. "
"Incredibly slow, and not guaranteed to terminate",
default="False",
),
"invert_priority": ParamSpec(
req=False,
desc="Causes more complex encodings to be looked at first. "
"Good for deeply buried encodings.",
default="False",
),
"max_cipher_depth": ParamSpec(
req=False,
desc="The depth at which we stop trying to crack ciphers. "
"Set to 0 to disable",
default="0",
),
"max_depth": ParamSpec(
req=False,
desc="The depth at which we give up. "
"Set to 0 to disable",
default="0",
),
"priority_cap": ParamSpec(
req=False,
desc="Sets the maximum depth before we give up ordering items.",
default="2",
),
"p_threshold": ParamSpec(
req=False,
desc="Will skip any crackers which have less than this likelihood of succeeding. "
"Set to 0 to disable",
default="0.01",
),
} |
Python | Ciphey/ciphey/basemods/Searchers/imperfection.py | import heapq
class Imperfection:
"""The graph is a Node: [List of nodes]
Where each item in the list of nodes can also have a node with a list of nodes
The result is that we can keep track of edges, while also keeping it small
To calculate current, we push the entire graph to A*
And it calculates the next node to choose, as well as increasing the size
of the graph with values
We're using a heap, meaning the element at [0] is always the smallest element
So we choose that and return it.
The current A* implementation has an end, we simply do not let it end as LC will make it
end far before it reaches Searcher again.
Current is the start position, so if we say we always start at the start of the graph it'll
go through the entire graph
graph = {
Node: [
{Node :
{
node
}
}
]
}
For encodings we just do them straight out
The last value of parents from abstract
"""
"""
graph = {'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}"""
def __init__(self):
None
def findBestNode(self, nodes):
"""Finds the best decryption module"""
return next(iter(nodes))
# def aStar(self, graph, current, end):
# """The A* search algorithm
# We're using heaps to find the minimum element (the one that will be the next current)
# Heaps are like sets with O(1) lookup time, but maintain the lowest element as [0]
# Sets insert in O(1), heaps in O(log N).
# https://stackoverflow.com/questions/4159331/python-speed-up-an-a-star-pathfinding-algorithm
# Current appears to be the list of all new tiles we can reach from current location
# End is the end node, that won't actually run bc LC will make it return before it hits aSTar function
# so tbh I'll just make it infinite unless something else forces a return
# The graph is the actual data structure used. According to StackOverflow, it looks like this:
# graph = {'A': ['B', 'C'],
# 'B': ['C', 'D'],
# 'C': ['D'],
# 'D': ['C'],
# 'E': ['F'],
# 'F': ['C']}
# """
# # Runs decodings first
# openSet = set()
# openHeap = []
# closedSet = set()
# def retracePath(c):
# # Retraces a path back to the start
# path = [c]
# while c.parent is not None:
# c = c.parent
# path.append(c)
# path.reverse()
# return path
# # Adds the current location (start) to the heap and set
# openSet.add(current)
# openHeap.append((0, current))
# # while openSet contains items
# while openSet:
# # TODO change openSet to a heap?
# # gets the 2nd element from the first element of the heap
# # so the heap is (0, current)
# # which means we pop current
# # this makes me think that current isn't the first?
# current = heapq.heappop(openHeap)[1]
# # We don't actually want to end, so I'm commenting this:
# # XXX
# if current == end:
# return retracePath(current)
# # Removes it from todo and into done i think
# # closedSet appears to be the set of things we have done
# openSet.remove(current)
# closedSet.add(current)
# """
# Okay so our graph looks like this:
# graph = {
# Node: [
# {Node :
# {
# node
# }
# }
# ]
# }
# graph[current] **SHOULD** be the list of nodes which contains dictionaries of nodes
# """
# for tile in graph[current]:
# # ClosedSet appears to be the list of visited nodes
# # TODO place this as a class attribute
# if tile not in closedSet:
# # This is the heuristic
# # TODO expected_time/probability + k * heuristic, for some experimentally determined value of k
# tile.H = (abs(end.x - tile.x) + abs(end.y - tile.y)) * 10
# # if tile is not in the openSet, add it and then pop it from the heap
# if tile not in openSet:
# openSet.add(tile)
# heapq.heappush(openHeap, (tile.H, tile))
# # I have no idea where this code is called lol
# tile.parent = current
# # This returns Nothing
# # I need to modify it so it finds the best item from Current
# # So basically, return item 0 of openHeap
# # return openHeap[0]
# # Since the [0] item is always minimum
# return []
def aStar(self, graph, current, end):
print(f"The graph is {graph}\nCurrent is {current}\n and End is {end}")
openSet = set()
openHeap = []
closedSet = set()
def retracePath(c):
print("Calling retrace path")
path = [c]
while c.parent is not None:
c = c.parent
path.append(c)
path.reverse()
return path
print("\n")
openSet.add(current)
openHeap.append((0, current))
while openSet:
print(f"Openset is {openSet}")
print(f"OpenHeap is {openHeap}")
print(f"ClosedSet is {closedSet}")
print(f"Current is {current}")
print(f"I am popping {openHeap} with the first element")
current = heapq.heappop(openHeap)[1]
print(f"Current is now {current}")
print(f"Graph current is {graph[current]}")
if current == end:
return retracePath(current)
openSet.remove(current)
closedSet.add(current)
for tile in graph[current]:
if tile not in closedSet:
tile.H = (abs(end.x - tile.x) + abs(end.y - tile.y)) * 10
tile.H = 1
if tile not in openSet:
openSet.add(tile)
heapq.heappush(openHeap, (tile.H, tile))
tile.parent = current
print("\n")
return []
class Node:
"""
A node has a value associated with it
Calculated from the heuristic
"""
def __init__(self, h):
self.h = h
self.x = self.h
self.y = 0.6
def __le__(self, node2):
# if self is less than other
return self.x <= node2.x
def __lt__(self, node2):
return self.x < node2.x
if __name__ == "__main__":
obj = Imperfection()
graph = {
"A": ["B", "C"],
"B": ["C", "D"],
"C": ["D"],
"D": ["C"],
"E": ["F"],
"F": ["C"],
}
# Makes the graph
y = Node(0.5)
x = Node(0.3)
p = Node(0.7)
q = Node(0.9)
graph = {y: [x, p], p: q}
print(obj.aStar(graph, y, q)) |
Python | Ciphey/ciphey/basemods/Searchers/perfection.py | from typing import Dict, Optional, Set
from ciphey.iface import Config, ParamSpec, registry
from .ausearch import AuSearch, Node
@registry.register
class Perfection(AuSearch):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
def findBestNode(self, nodes: Set[Node]) -> Node:
return next(iter(nodes))
def __init__(self, config: Config):
super().__init__(config) |
Python | Ciphey/ciphey/iface/_config.py | import datetime
import os
import pydoc
from typing import Any, Callable, Dict, List, Optional, Type, Union
import appdirs
import yaml
import logging
from rich.logging import RichHandler
from . import _fwd
from ._modules import PolymorphicChecker, ResourceLoader, Searcher
class Cache:
"""Used to track state between levels of recursion to stop infinite loops, and to optimise repeating actions"""
def __init__(self):
self._cache: Dict[Any, Dict[str, Any]] = {}
def mark_ctext(self, ctext: Any) -> bool:
if (isinstance(ctext, str) or isinstance(ctext, bytes)) and len(ctext) < 4:
logging.debug(f"Candidate {ctext.__repr__()} too short!")
return False
if ctext in self._cache:
logging.debug(f"Deduped {ctext.__repr__()}")
return False
logging.debug(f"New ctext {ctext.__repr__()}")
self._cache[ctext] = {}
return True
def get_or_update(self, ctext: Any, keyname: str, get_value: Callable[[], Any]):
# Should have been marked first
target = self._cache[ctext]
res = target.get(keyname)
if res is not None:
return res
val = get_value()
target[keyname] = val
return val
def try_get(self, ctext: Any, keyname: str):
return self._cache[ctext].get(keyname)
def split_resource_name(full_name: str) -> (str, str):
return full_name.split("::", 1)
class Config:
def __init__(self):
self.verbosity: int = 0
self.searcher: str = "ausearch"
self.params: Dict[str, Dict[str, Union[str, List[str]]]] = {}
self.format: str = "str"
self.modules: List[str] = []
self.checker: str = "ezcheck"
self.default_dist: str = "cipheydists::dist::english"
self.timeout: Optional[int] = None
self._inst: Dict[type, Any] = {}
self.objs: Dict[str, Any] = {}
self.cache: Cache = Cache()
@staticmethod
def get_default_dir() -> str:
return appdirs.user_config_dir("ciphey")
def merge_dict(self, config_file: Optional[Dict[str, Any]]):
if config_file is None:
return
for a, b in config_file.items():
self.update(a, b)
def load_file(
self,
path: str = os.path.join(get_default_dir.__func__(), "config.yml"),
create=False,
):
try:
with open(path, "r+") as file:
return self.merge_dict(yaml.safe_load(file))
except FileNotFoundError:
if create:
open(path, "w+")
def instantiate(self, t: type) -> Any:
"""
Used to enable caching of a instantiated type after the configuration has settled
"""
# We cannot use set default as that would construct it again, and throw away the result
res = self._inst.get(t)
if res is not None:
return res
ret = t(self)
self._inst[t] = ret
return ret
def __call__(self, t: type) -> Any:
return self.instantiate(t)
def update(self, attrname: str, value: Optional[Any]):
if value is not None:
setattr(self, attrname, value)
def update_param(self, owner: str, name: str, value: Optional[Any]):
if value is None:
return
target = self.params.setdefault(owner, {})
if _fwd.registry.get_named(owner).getParams()[name].list:
target.setdefault(name, []).append(value)
else:
target[name] = value
def update_format(self, value: Optional[str]):
if value is not None:
self.format = value
def load_objs(self):
# Basic type conversion
if self.timeout is not None:
self.objs["timeout"] = datetime.timedelta(seconds=int(self.timeout))
self.objs["format"] = pydoc.locate(self.format)
# Checkers do not depend on any other config object
logging.debug(f"Registry is {_fwd.registry._reg[PolymorphicChecker]}")
self.objs["checker"] = self(
_fwd.registry.get_named(self.checker, PolymorphicChecker)
)
# Searchers only depend on checkers
self.objs["searcher"] = self(_fwd.registry.get_named(self.searcher, Searcher))
def update_log_level(self, verbosity: Optional[int]):
if verbosity is None:
return
self.verbosity = verbosity
if verbosity == 0:
self.verbosity = logging.WARNING
elif verbosity == 1:
self.verbosity = logging.INFO
elif verbosity >= 2:
self.verbosity = logging.DEBUG
else:
logging.disable(logging.CRITICAL)
return
# https://rich.readthedocs.io/en/latest/logging.html for more on RichHandler
logging.basicConfig(
level=self.verbosity,
datefmt="[%X]",
handlers=[RichHandler(markup=True, rich_tracebacks=True)],
)
logging.debug(f"Verbosity set to level {verbosity}")
def load_modules(self):
import importlib.util
for i in self.modules:
spec = importlib.util.spec_from_file_location("ciphey.module_load_site", i)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
logging.info(f"Loaded modules {_fwd.registry.get_all_names()}")
def complete_config(self) -> "Config":
"""This does all the loading for the config, and then returns itself"""
self.load_modules()
self.load_objs()
self.update_log_level(self.verbosity)
return self
def get_resource(self, res_name: str, t: Optional[Type] = None) -> Any:
logging.debug(f"Loading resource {res_name} of type {t}")
# FIXME: Actually returns obj of type `t`, but python is bad
loader, name = split_resource_name(res_name)
if t is None:
return self(_fwd.registry.get_named(loader, ResourceLoader))(name)
else:
return self(_fwd.registry.get_named(loader, ResourceLoader[t]))(name)
# Setter methods for cleaner library API
def set_verbosity(self, i):
self.update_log_level(i)
return self
def set_spinner(self, spinner):
self.objs["spinner"] = spinner
def pause_spinner_handle(self):
spinner = self.objs.get("spinner")
class PausedSpinner:
def __enter__(self):
if spinner is not None:
spinner.stop()
def __exit__(self, exc_type, exc_val, exc_tb):
if spinner is not None:
spinner.start()
return PausedSpinner()
@staticmethod
def library_default():
"""The default config for use in a library"""
return Config().set_verbosity(-1)
def __str__(self):
return str(
{
"verbosity": self.verbosity,
"searcher": self.searcher,
"params": self.params,
"format": self.format,
"modules": self.modules,
"checker": self.checker,
"default_dist": self.default_dist,
"timeout": self.timeout,
}
)
_fwd.config = Config |
Python | Ciphey/ciphey/iface/_modules.py | from abc import ABC, abstractmethod
from typing import Any, Dict, Generic, List, NamedTuple, Optional, Set, Type, TypeVar
from rich import box
from rich.console import Console
from rich.markup import escape
from rich.table import Table
from ._fwd import config as Config
T = TypeVar("T")
U = TypeVar("U")
console = Console()
class ParamSpec(NamedTuple):
"""
Attributes:
req Whether this argument is required
desc A description of what this argument does
default The default value for this argument. Ignored if req == True or configPath is not None
config_ref The path to the config that should be the default value
list Whether this parameter is in the form of a list, and can therefore be specified more than once
visible Whether the user can tweak this via the command line
"""
req: bool
desc: str
default: Optional[Any] = None
list: bool = False
config_ref: Optional[List[str]] = None
visible: bool = True
class ConfigurableModule(ABC):
@staticmethod
@abstractmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
"""
Returns a dictionary of `argument name: argument specification`
"""
pass
def _checkParams(self):
"""
Fills the given params dict with default values where arguments are not given,
using None as the default value for default values
"""
params = self._params()
config = self._config()
for key, value in self.getParams().items():
# If we already have it, then we don't need to do anything
if key in params:
continue
# If we don't have it, but it's required, then fail
if value.req:
raise KeyError(
f"Missing required param {key} for {type(self).__name__.lower()}"
)
# If it's a reference by default, fill that in
if value.config_ref is not None:
tmp = getattr(config, value.config_ref[0])
params[key] = (
tmp[value.config_ref[1:]] if len(value.config_ref) > 1 else tmp
)
# Otherwise, put in the default value (if it exists)
elif value.default is not None:
params[key] = value.default
def _params(self):
return self._params_obj
def _config(self):
return self._config_obj
@abstractmethod
def __init__(self, config: Config):
self._config_obj = config
if self.getParams() is not None:
self._params_obj = config.params.setdefault(type(self).__name__.lower(), {})
self._checkParams()
class Targeted(ABC):
@staticmethod
@abstractmethod
def getTarget() -> str:
"""Should return the target that this object attacks/decodes"""
pass
class PolymorphicChecker(ConfigurableModule):
@abstractmethod
def check(self, text) -> Optional[str]:
"""Should return some description (or an empty string) on success, otherwise return None"""
pass
@abstractmethod
def getExpectedRuntime(self, text) -> float:
pass
def __call__(self, *args):
return self.check(*args)
@abstractmethod
def __init__(self, config: Config):
super().__init__(config)
class Checker(Generic[T], ConfigurableModule):
@abstractmethod
def check(self, text: T) -> Optional[str]:
"""Should return some description (or an empty string) on success, otherwise return None"""
pass
@abstractmethod
def getExpectedRuntime(self, text: T) -> float:
pass
def __call__(self, *args):
return self.check(*args)
@abstractmethod
def __init__(self, config: Config):
super().__init__(config)
@classmethod
def convert(cls, expected: Set[type]):
class PolyWrapperClass(PolymorphicChecker):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return cls.getParams()
def check(self, text) -> Optional[str]:
"""Should return some description (or an empty string) on success, otherwise return None"""
if type(text) not in expected:
return None
else:
return self._base.check(text)
def getExpectedRuntime(self, text) -> float:
if type(text) not in expected:
return 0
else:
return self._base.getExpectedRuntime(text)
def __init__(self, config: Config):
super().__init__(config)
# This is easier than inheritance
self._base = cls(config)
PolyWrapperClass.__name__ = cls.__name__
return PolyWrapperClass
# class Detector(Generic[T], ConfigurableModule, KnownUtility, Targeted):
# @abstractmethod
# def scoreLikelihood(self, ctext: T) -> Dict[str, float]:
# """Should return a dictionary of (cipher_name: score)"""
# pass
#
# def __call__(self, *args): return self.scoreLikelihood(*args)
#
# @abstractmethod
# def __init__(self, config: Config): super().__init__(config)
class Decoder(Generic[T], ConfigurableModule, Targeted):
"""Represents the undoing of some encoding into a different (or the same) type"""
@abstractmethod
def decode(self, ctext: T) -> Optional[U]:
pass
@staticmethod
@abstractmethod
def priority() -> float:
"""What proportion of decodings are this?"""
pass
def __call__(self, *args):
return self.decode(*args)
@abstractmethod
def __init__(self, config: Config):
super().__init__(config)
class DecoderComparer:
value: Type[Decoder]
def __le__(self, other: "DecoderComparer"):
return self.value.priority() <= other.value.priority()
def __ge__(self, other: "DecoderComparer"):
return self.value.priority() >= other.value.priority()
def __lt__(self, other: "DecoderComparer"):
return self.value.priority() < other.value.priority() and self != other
def __gt__(self, other: "DecoderComparer"):
return self.value.priority() > other.value.priority() and self != other
def __init__(self, value: Type[Decoder]):
self.value = value
def __repr__(self):
return f"<DecoderComparer {self.value}:{self.value.priority()}>"
class CrackResult(NamedTuple):
# TODO consider using Generic[T] again for value's type once
# https://bugs.python.org/issue36517 is resolved
value: Any
key_info: Optional[str] = None
misc_info: Optional[str] = None
class CrackInfo(NamedTuple):
success_likelihood: float
success_runtime: float
failure_runtime: float
class Cracker(Generic[T], ConfigurableModule, Targeted):
@abstractmethod
def getInfo(self, ctext: T) -> CrackInfo:
"""Should return some informed guesses on resource consumption when run on `ctext`"""
pass
@abstractmethod
def attemptCrack(self, ctext: T) -> List[CrackResult]:
"""
This should attempt to crack the cipher `target`, and return a list of candidate solutions
"""
# FIXME: Actually CrackResult[T], but python complains
pass
def __call__(self, *args):
return self.attemptCrack(*args)
@abstractmethod
def __init__(self, config: Config):
super().__init__(config)
class ResourceLoader(Generic[T], ConfigurableModule):
@abstractmethod
def whatResources(self) -> Optional[Set[str]]:
"""
Return a set of the names of instances T you can provide.
The names SHOULD be unique amongst ResourceLoaders of the same type
These names will be exposed as f"{self.__name__}::{name}", use split_resource_name to recover this
If you cannot reasonably determine what resources you provide, return None instead
"""
pass
@abstractmethod
def getResource(self, name: str) -> T:
"""
Returns the requested distribution
The behavior is undefined if `name not in self.what_resources()`
"""
pass
def __call__(self, *args):
return self.getResource(*args)
def __getitem__(self, *args):
return self.getResource(*args)
@abstractmethod
def __init__(self, config: Config):
super().__init__(config)
class SearchLevel(NamedTuple):
name: str
result: CrackResult
@staticmethod
def input(ctext: Any):
return SearchLevel(name="input", result=CrackResult(ctext))
class SearchResult(NamedTuple):
path: List[SearchLevel]
check_res: str
class Searcher(ConfigurableModule):
"""A very basic interface for code that plans out how to crack the ciphertext"""
@abstractmethod
def search(self, ctext: Any) -> Optional[SearchResult]:
"""Returns the path to the correct ciphertext"""
pass
@abstractmethod
def __init__(self, config: Config):
super().__init__(config)
def pretty_search_results(res: SearchResult, display_intermediate: bool = False) -> str:
# TODO what is display_intermediate
ret: str = ""
table = Table(show_header=False, box=box.ROUNDED, safe_box=False)
# Only print the checker if we need to. Normal people don't know what
# "quadgrams", "brandon", "json checker" is.
# We print the checker if its regex or another language, so long as it starts with:
# "The" like "The plaintext is a Uniform Resource Locator (URL)."
if len(res.check_res) != 0 and ("The" == res.check_res[0:3] or "Passed" == res.check_res[0:6]):
ret += f"{res.check_res}\n"
def add_one():
out = ""
if i.name == "utf8":
out += f" [#808080]{i.name}[/#808080]\n"
else:
out += f" {i.name}"
already_broken = False
if i.result.key_info is not None:
out += f":\n Key: {i.result.key_info}\n"
already_broken = True
if i.result.misc_info is not None:
if not already_broken:
out += ":\n"
out += f" Misc: {i.result.misc_info}\n"
already_broken = True
if display_intermediate:
if not already_broken:
out += ":\n"
out += f' Value: "{i.result.value}"\n'
already_broken = True
if not already_broken:
out += "\n"
return out, already_broken
# Skip the 'input' and print in order
already_broken = False
out = ""
for i in res.path[1:]:
output, already_broken = add_one()
out += output
if out:
if len(out.split("\n")) > 1:
ret += "Formats used:\n"
else:
ret += "Format used:\n"
ret += out
# Remove trailing newline
ret = ret[:-1]
# If we didn't show intermediate steps, then print the final result
if already_broken:
ret += f"""\nPlaintext: [bold green]"{escape(res.path[-1].result.value)}"[bold green]"""
else:
ret += f"""Plaintext: [bold green]"{escape(res.path[-1].result.value)}"[bold green]"""
table.add_row(ret)
return table
# Some common collection types
Distribution = Dict[str, float]
Translation = Dict[str, str]
WordList = Set[str] |
Python | Ciphey/ciphey/iface/_registry.py | from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union
try:
from typing import get_args, get_origin
except ImportError:
from typing_inspect import get_origin, get_args
from . import _fwd
from ._modules import *
class Registry:
# I was planning on using __init_subclass__, but that is incompatible with dynamic type creation when we have
# generic keys
RegElem = Union[List[Type], Dict[Type, "RegElem"]]
_reg: Dict[Type, RegElem] = {}
_names: Dict[str, Tuple[Type, Set[Type]]] = {}
_targets: Dict[str, Dict[Type, List[Type]]] = {}
_modules = {Checker, Cracker, Decoder, ResourceLoader, Searcher, PolymorphicChecker}
def _register_one(self, input_type, module_base, module_args):
if len(module_args) == 0:
self._reg.setdefault(module_base, []).append(input_type)
return
target_reg = self._reg.setdefault(module_base, {})
# Seek to the given type
for subtype in module_args[0:-1]:
target_reg = target_reg.setdefault(subtype, {})
target_reg.setdefault(module_args[-1], []).append(input_type)
def _real_register(self, input_type: type, *args) -> Type:
name = input_type.__name__.lower()
name_target = self._names[name] = (input_type, set())
if issubclass(input_type, Targeted):
target = input_type.getTarget()
else:
target = None
if issubclass(input_type, Searcher):
module_type = module_base = Searcher
module_args = ()
else:
module_type: Optional[Type] = None
module_base = None
# Work out what module type this is
if len(args) == 0 and hasattr(input_type, "__orig_bases__"):
for i in input_type.__orig_bases__:
if module_type is not None:
raise TypeError(
f"Type derived from multiple registrable base classes {i} and {module_type}"
)
module_base = get_origin(i)
if module_base not in self._modules:
continue
module_type = i
else:
for i in self._modules:
if not issubclass(input_type, i):
continue
if module_type is not None:
raise TypeError(
f"Type derived from multiple registrable base classes {i} and {module_type}"
)
module_type = i
if module_type is None:
raise TypeError("No registrable base class")
# Replace input type with polymorphic checker if required
if issubclass(input_type, Checker):
if len(args) == 0:
arg = [
get_args(i)
for i in input_type.__orig_bases__
if get_origin(i) == Checker
][0]
if len(arg) != 1:
raise TypeError("No argument for Checker")
input_type = input_type.convert({arg[0]})
else:
input_type = input_type.convert(set(args))
self._register_one(input_type, PolymorphicChecker, [])
# Refresh the names with the new type
name_target = self._names[name] = (input_type, {PolymorphicChecker})
# Now handle the difference between register and register_multi
if len(args) == 0:
if module_type is PolymorphicChecker:
module_base = PolymorphicChecker
elif module_base is None:
raise TypeError("No type argument given")
self._register_one(input_type, module_base, get_args(module_type))
name_target[1].add(module_base)
else:
if module_base is not None:
raise TypeError(f"Redundant type argument for {module_type}")
module_base = module_type
for module_args in args:
# Correct missing brackets
if not isinstance(module_args, tuple):
module_args = (module_args,)
self._register_one(input_type, module_base, module_args)
name_target[1].add(module_type[module_args])
name_target[1].add(module_type)
if target is not None and issubclass(module_base, Targeted):
self._targets.setdefault(target, {}).setdefault(module_type, []).append(
input_type
)
return input_type
def register(self, input_type):
return self._real_register(input_type)
def register_multi(self, *x):
return lambda input_type: self._real_register(input_type, *x)
def __getitem__(self, i: type) -> Optional[Any]:
target_type = get_origin(i)
# Check if this is a non-generic type, and return the whole dict if it is
if target_type is None:
return self._reg[i]
target_subtypes = get_args(i)
target_list = self._reg.setdefault(target_type, {})
for subtype in target_subtypes:
target_list = target_list.setdefault(subtype, {})
return target_list
def get_named(self, name: str, type_constraint: Type = None) -> Any:
ret = self._names[name.lower()]
if type_constraint and type_constraint not in ret[1]:
raise TypeError(f"Type mismatch: wanted {type_constraint}, got {ret[1]}")
return ret[0]
def get_targeted(
self, target: str, type_constraint: Type = None
) -> Optional[Union[Dict[Type, Set[Type]], Set[Type]]]:
x = self._targets.get(target)
if x is None or type_constraint is None:
return x
return x.get(type_constraint)
def get_all_names(self) -> List[str]:
return list(self._names.keys())
def __str__(self):
return f"ciphey.iface.Registry {{_reg: {self._reg}, _names: {self._names}, _targets: {self._targets}}}"
_fwd.registry = Registry() |
Python | Ciphey/ciphey/iface/__init__.py | from ._config import Config
from ._modules import (
Checker,
Cracker,
CrackInfo,
CrackResult,
Decoder,
DecoderComparer,
Distribution,
ParamSpec,
PolymorphicChecker,
ResourceLoader,
Searcher,
SearchLevel,
SearchResult,
T,
Translation,
U,
WordList,
pretty_search_results,
)
from ._registry import get_args, get_origin
from ._fwd import registry |
Markdown | Ciphey/tests/brandon_interface.md | If I'm reading this correctly:
> I would suggest a simple lower bound test: we pass if we get more than 25%,and fail if we get lower than 5% (or smth idk) for n consecutive windows.
You're suggesting that we run all tests and see if we get 25% Imo that would be much slower. What do you mean by `n windows`?
Okay, Chi squared is out then!
> Perhaps we can return an object from the cracker which states what tests have been performed, to save time on redundant analysis. With such information, brandon could make an intelligent decision to just use a wordlist if enough analysis was performed, and the more detailed analysis if it wasn't.
This is entirely possible. I will add support to `brandon` checker to skip phase 1 if it receives an dictionary with key `"phase1": True` for `True == skip phase 1`.
If you have more tests, let me know and I can factor them in.
In your first reply:
https://github.com/Ciphey/Ciphey/issues/90#issuecomment-645046918
Point 3:
> Be aware that the stuff passed to the checker will most likely be complete gibberish (with a similar freq dist) OR the correct result. A user will not care about an extra second spent on the final correct result, but really will care that every false candidate takes an extra second. The current suggestion seems to be pessimal for the gibberish inputs: maybe add some sanity checks (have I failed to match any word, have I failed to lemmatize any word, etc.)
I decided to test how well `lem` worked as phase 1. To do this, I created this program:
```python
"""
TL;DR
Tested over 20,000 times
Maximum sentence size is 15 sentences
1/2 chance of getting 'gibberish' (encrypted text)
1/2 chance of getting English text
Each test is timed using Time module.
The accuracy is calculated as to how many true positives we get over the entire run
"""
import spacy
import random
import time
from statistics import mean
import enciphey
from alive_progress import alive_bar
nlp = spacy.load("en_core_web_sm")
f = open("hansard.txt", encoding="ISO-8859-1").read()
f = f.split(".")
enciph = enciphey.encipher()
def lem(text):
sentences = nlp(text)
return set([word.lemma_ for word in sentences])
def get_random_sentence():
if random.randint(0, 1) == 0:
x = None
while x is None:
x = (True, " ".join(random.sample(f, k=random.randint(1, 50))))
return x
else:
x = None
while x is None:
x = enciph.getRandomEncryptedSentence()
x = x["Encrypted Texts"]["EncryptedText"]
return (False, x)
# Now to time it and take measurements
def perform():
# calculate accuracy
total = 0
true_returns = 0
# calculate average time
time_list = []
# average sentence size
sent_size_list = []
items = range(20000)
with alive_bar(len(items)) as bar:
for i in range(0, 20000):
sent = get_random_sentence()
text = sent[1]
truthy = sent[0]
sent_size_list.append(len(text))
# should be length of chars
old = len(text)
# timing the function
tic = time.perf_counter()
new = lem(text)
tok = time.perf_counter()
# checking for accuracy
new = len(new)
# the and here means we only count True Positives
if new < old and truthy:
true_returns += 1
total += 1
# appending the time
t = tok - tic
time_list.append(t)
bar()
print(
f"The accuracy is {str((true_returns / total) * 100)} \n and the time it took is {str(round(mean(time_list), 2))}. \n The average string size was {str(mean(sent_size_list))}"
)
perform()
```
The results were fascinating, to say the least.
With a 50/50 chance of the text being gibberish (ciphertext from enCiphey) or sentences from Hansard.txt, we had these results for using lemmatization as phase 1:
```
The accuracy is 49.63%
and the time it took is 0.02 seconds on average.
The average string size was 1133.63255.
```
**We get a 50% accuracy with a speed of 0.02 seconds on average, across 20k tests with the average size of a string being 1133 chars. **
The accuracy is quite bad considering that a coin flip is 50/50.
On average, the user would expect Phase 2 to be entered 50% of the time, which is annoying as phase 2 is quite slow. But by itself it's quite fast.
I am going to build the "2nd phase" of phase 1 using the While Loop we saw earlier. If we can combine just one more metric, we would see much higher accuracy and again - likely incredibly low latency.
I will create a table of my results:
## Table of max sentence length == 50
| Name | Speed | Accuracy | String Size Average Chars | Epochs | Max Sentence Size |
| -------------------------- | ---------------------------- | -------- | ------------------------- | ------ | ----------------- |
| Lemmatization (lem) | 0.02 seconds | 50% | 1580 | 20,000 | 50 |
| Stop word removal | 3.05465052884756e-05 seconds | 96% | 1596 | 20,000 | 50 |
| Check1000Words | 0.0005 seconds | 96% | 1597 | 20,000 | 50 |
| Word endings | 0.0009 seconds | 95% | 1597 | 20,000 | 50 |
## Table of max sentence length == 5
| Name | Speed | Accuracy | String Size Average Chars | Epochs | Max Sentence Size |
| -------------------------- | ------------------------------ | -------- | ------------------------- | ------ | ----------------- |
| Lemmatization (lem) |
| Stop word removal | 1.1574924453998391e-05 seconds | 93% | 569 | 20,000 | 5 |
| Check1000Words | 0.0006 seconds | 95% | 586 | 20,000 | 5 |
| Word endings | 0.0003 seconds | 92% | 482 | 20,000 | 5 |
## Table of max sentence length == 1
| Name | Speed | Accuracy | Threshold | String Size Average Chars | Epochs | Max Sentence Size |
| -------------------------- | ------------------------------- | -------- | ------ |------------------------- | ------ | ----------------- |
| Lemmatization (lem) |
| Stop word removal | 1.2532061150591289e-05. seconds | 50% | 481 | 20,000 | 1 |
| Check1000Words | 0.0006 seconds | 95% | 586 | 20,000 | 5 |
| Word endings | 0.0002 seconds | 86% | 15| 482 | 20,000 | 1 |
## Confusion Matrices & Notes
### Lemmatization
```
Positive Negative
Positive 10031 9967
Negative 2 0
```
### Stop Words
This test was performed where the text was not `.lower()`, so the actual accuracy _may_ be a little tiny bit higher since the stop words list is all lowercase.
50 sentence limit
```
Positive Negative
Positive 9913 855
Negative 56 9176
```
5 sentence limit:
```
Positive Negative
Positive 9513 967
Negative 530 8990
```
### Check 1000 words
50 sentence limit
```
Positive Negative
Positive 10008 552
Negative 56 9384
```
5 sentence limit
```
Positive Negative
Positive 9563 597
Negative 397 9443
```
# Analysis
**I believe that the best Brandon checker will look at the length of the text, and adjust the % threshold and the exact phase 1 checker per text.**
The below data is taken from calculations performed over many hours. it shows the best threshold % for the best phase 1 checker with the highest accuracy. These checkers were chosen as others showed a maximum accuracy of 58%.
```
{'check 1000 words': {1: {'Accuracy': 0.925, 'Threshold': 2},
2: {'Accuracy': 0.95, 'Threshold': 68},
3: {'Accuracy': 0.975, 'Threshold': 62},
4: {'Accuracy': 0.98, 'Threshold': 5},
5: {'Accuracy': 0.985, 'Threshold': 54}},
'stop words': {1: {'Accuracy': 0.865, 'Threshold': 50},
2: {'Accuracy': 0.93, 'Threshold': 19},
3: {'Accuracy': 0.965, 'Threshold': 15},
4: {'Accuracy': 0.97, 'Threshold': 28},
5: {'Accuracy': 0.985, 'Threshold': 29}}
```
Where the numbers are:
```
1 : The mean is 87.62
2 : The mean is 110.47925
3 : The mean is 132.20016666666666
4 : The mean is 154.817125
5 : The mean is 178.7297
```
Looking at this test, it is clear that stopwords is better than check 1000 words for speed, but the accuracy is a little bit slower. Stop words is incredibly faster than check 1k words, but on a smaller input the stopwords checker breaks.
Therefore, we should use stopword checker on larger texts, and check 1k words on smaller texts.
More specifically, stopwords checker for len == 110 has an optimal threshold of 19, whereas check 1k words has an optimal threshold of 68. This means that while stopwords can potentially end earlier and only search the first 19% of the list, check 1k words would search 68% of the list.
Stopwords has a lower accuracy by 2%, but it is much, much faster and its optimal threshold is greatly reduced.
So ideally, we would have this algorithm:
1. Sentence length less than 110:
1. Use check 1k words with threshold of 2%
2. Sentence length > 110:
1. use Stopwords with threshold of 15
3. Sentence length > 150:
1. Stopwords threshold increases to 28
This is the ideal optimal phase 1 algorithm for `brandon` checker.
# Phase 2
Phase 2 is the dictionary checker.
Firstly, we check to find the best thresholds for the dictionary checker.
```
'checker': {1: {'Accuracy': 0.97, 'Threshold': 99},
2: {'Accuracy': 0.98, 'Threshold': 98},
3: {'Accuracy': 0.965, 'Threshold': 68},
4: {'Accuracy': 0.99, 'Threshold': 93},
5: {'Accuracy': 0.97, 'Threshold': 92}},
```
The accuracies are good, but the thresholds are simply too high. We're overfitting!
To fix this, I thought that because the dictionary contained chars <= 2 such as "a" or "an" it was setting off the completion too much, resulting in a much higher threshold.
To fix this, I only let the checker consider words that are more then 2 chars.
This is the result:
```
'checker': {1: {'Accuracy': 0.965, 'Threshold': 60},
2: {'Accuracy': 0.98, 'Threshold': 77},
3: {'Accuracy': 0.985, 'Threshold': 67},
4: {'Accuracy': 0.985, 'Threshold': 99},
5: {'Accuracy': 0.98, 'Threshold': 47}},
```
The accuracy stayed around the same, but the threshold went down. Although the threshold was still kind of high. 99% threshold for 4? I restricted the threshold to 75% and:
```
'checker': {1: {'Accuracy': 0.945, 'Threshold': 66},
2: {'accuracy': 0.975, 'threshold': 69},
3: {'accuracy': 0.98, 'threshold': 71},
4: {'accuracy': 0.99, 'threshold': 65},
5: {'accuracy': 0.98, 'threshold': 38}},
```
We can see that the accuracy stayed roughly the same, but the threshold went down a lot. The mean appears to be 66% (from just looking at it).
However, the accuracy for smaller sentence sizes tanked.
The highest accuracy we had was with the original one. Words <= 2 chars and no limit on threshold.
If possible, we want to combine the high accuracy on smaller texts while maintaining the generalization found in the latter checker results.
The reason we want a smaller threshold is that due to the chunking procedure, it will be much faster on larger texts. The lower the sentence length the higher the threshold is allowed to be.
For phase 2, we are not concerned with speed. We are however concerned with accuracy.
I believe that threshold > 90% is overfitting. I cannot reasonably see this successfully working within Ciphey itself.
My next test will be max threshold of 100% with no chars less than or equal to 1.
```
'checker': {1: {'Accuracy': 0.97, 'Threshold': 93},
2: {'Accuracy': 0.975, 'Threshold': 82},
3: {'Accuracy': 0.97, 'Threshold': 96},
4: {'Accuracy': 0.965, 'Threshold': 31},
5: {'Accuracy': 0.965, 'Threshold': 74}},
```
the accuracy is 97% with a threshold of 93. This is much higher than the latter test. I think for lower texts, since we don't care about speed, we should use a higher threshold. This test was ran 20,000 times. I will run the tests once much to see if the threshold significantly changes.
The test results were:
```
'checker': {1: {'Accuracy': 0.96, 'Threshold': 92},
2: {'Accuracy': 0.97, 'Threshold': 95},
3: {'Accuracy': 0.965, 'Threshold': 81},
4: {'Accuracy': 0.96, 'Threshold': 38},
5: {'Accuracy': 0.975, 'Threshold': 52}},
```
One last test. No threshold limit with no char limit.
```
'checker': {1: {'Accuracy': 0.98, 'Threshold': 92},
2: {'Accuracy': 0.99, 'Threshold': 91},
3: {'Accuracy': 0.97, 'Threshold': 83},
4: {'Accuracy': 0.97, 'Threshold': 71},
5: {'Accuracy': 0.975, 'Threshold': 74}},
```
In total, we want these ones:
```
{1: {'Accuracy': 0.98, 'Threshold': 92},
2: {'accuracy': 0.975, 'threshold': 69},
3: {'accuracy': 0.98, 'threshold': 71},
4: {'accuracy': 0.99, 'threshold': 65},
5: {'accuracy': 0.98, 'threshold': 38}},
^^ with 75% threshold limit
```
Lower thresholds, accuracies look good too. |
Python | Ciphey/tests/cli.py | import subprocess
from sys import exit
result = subprocess.check_output(["ciphey", "-q", "-t 'hello'"])
if "hello" in result:
exit(0)
else:
exit(1) |
Python | Ciphey/tests/dict.py | import unittest
import logging
from rich.logging import RichHandler
from ciphey.basemods.Checkers.brandon import Brandon
config = dict()
lc = config["checker"](config)
logging.remove()
class testDictionary(unittest.TestCase):
def test_english_yes(self):
dc = Brandon()
result = dc.confirmlanguage(
"hello again my friend this is my name and I like dogs!", "English"
)
self.assertEqual(result, True)
def test_english_yes_two(self):
dc = Brandon()
result = dc.confirmlanguage(
"hello my name is brandon and this is a normal english text timtable fuse kindle hormone",
"English",
)
self.assertEqual(result, True)
def test_english_false(self):
dc = Brandon()
result = dc.confirmlanguage("jdajj kop9u0r 9jjidasjp", "English")
self.assertEqual(result, False)
def test_english_false_two(self):
dc = Brandon()
result = dc.confirmlanguage(
"pink jdajj red 9jjidasjp october whisky odiajdq", "English"
)
self.assertEqual(result, True)
# def test_english_percentage(self):
# dc = Brandon()
# result = dc.confirmlanguage(
# "The password for my computer is tyu456q and the username is admin",
# "English",
# )
# self.assertEqual(dc.languagePercentage, 90.0)
def test_english_perfect(self):
dc = Brandon()
result = dc.confirmlanguage(
"Archimedes famously said: βGive me a lever long enough and a fulcrum on which to place it, and I shall move the world.β But what we are talking about here is not physical leverage. It is the leverage of ideas. When you create content, people can access your knowledge without taking your time. You no longer need to sell knowledge by the hour. Your ideas are the most valuable currency in a knowledge-driven economy. Just as an investment account allows your money to grow day and night without your involvement, content does the same with your ideas. Until recently, the average person wasnβt able to publish and distribute their ideas at a reasonable cost. But on the Internet, anybody, in any corner of the world, in any time zone, can access your best thinking. 24 hours a day. 7 days a week. 365 days a year. When you publish ideas, you create your own βSerendipity Vehicleβ β a magnet for ideas and people and opportunities from potentially every corner of the globe. If your ideas resonate with people, people will discover you and bring you unexpected opportunities. Theyβll open doors you never knew existed.",
"English",
)
self.assertEqual(result, True) |
Python | Ciphey/tests/enciphey.py | import base64
import binascii
import random
import re
import string
import base58
import base62
import cipheycore
import cipheydists
import nltk
from nltk.tokenize.treebank import TreebankWordDetokenizer
class encipher:
"""Generates encrypted text. Used for the NN and test_generator"""
def __init__(self): # pragma: no cover
"""Inits the encipher object """
self.text = self.read_text()
self.MAX_SENTENCE_LENGTH = 5
self.crypto = encipher_crypto()
def read_text(self): # pragma: no cover
f = open("hansard.txt", encoding="ISO-8859-1")
x = f.read()
splits = nltk.tokenize.sent_tokenize(x)
return splits
def getRandomSentence(self, size): # pragma: no cover
return TreebankWordDetokenizer().detokenize(
random.sample(self.text, random.randint(1, size))
)
def getRandomEncryptedSentence(self, size): # pragma: no cover
sents = self.getRandomSentence(size)
sentsEncrypted = self.crypto.randomEncrypt(sents)
return {"PlainText Sentences": sents, "Encrypted Texts": sentsEncrypted}
class encipher_crypto: # pragma: no cover
"""Holds the encryption functions
can randomly select an encryption function use on text
returns:
{"text": t, "plaintext": c, "cipher": p, "succeeds": False}
where succeeds is whether or not the text is really encrypted or falsely decrypted
Uses Cyclic3's module generate pseudo random text"""
def __init__(self): # pragma: no cover
self.methods = [
self.Base64,
self.Ascii,
self.Base16,
self.Base32,
self.Binary,
self.Hex,
self.MorseCode,
self.Reverse,
self.Vigenere,
self.base58_bitcoin,
self.base58_ripple,
self.b62,
]
self.morse_dict = dict(cipheydists.get_translate("morse"))
self.letters = string.ascii_lowercase
self.group = cipheydists.get_charset("english")["lcase"]
# pragma: no cover
def random_key(self, text) -> str: # pragma: no cover
if len(text) < 8:
length = 3
else:
length = 8
return self.random_string(length)
def random_string(self, length) -> str: # pragma: no cover
return "".join(random.sample(self.letters, length))
def randomEncrypt(self, text: str) -> str: # pragma: no cover
"""Randomly encrypts string with an encryption"""
func__use = random.choice(self.methods)
encryptedText = func__use(text)
name = func__use.__name__
return {"PlainText": text, "EncryptedText": encryptedText, "CipherUsed": name}
def Base64(self, text: str) -> str: # pragma: no cover
"""Turns text into Base64 using Python library
args:
text -> text convert
returns:
text -> as Base64"""
return base64.b64encode(bytes(text, "utf-8")).decode("utf-8")
def Caesar(self, s, k): # pragma: no cover
"""Iterates through each letter and constructs the cipher text"""
new_message = ""
facr = k % 26
for c in s:
new_message += self.apply_rotation(c, facr)
return new_message
def apply_rotation(self, c, facr): # pragma: no cover
"""Applies a shift of facr the letter denoted by c"""
if c.isalpha():
lower = ord("A") if c.isupper() else ord("a")
c = chr(lower + ((ord(c) - lower + facr) % 26))
return c
def Base32(self, text: str) -> str: # pragma: no cover
"""Turns text in Base32 using Python library
args:
text -> text convert
returns:
text -> as Base32"""
return base64.b32encode(bytes(text, "utf-8")).decode("utf-8")
def Base16(self, text: str) -> str: # pragma: no cover
"""Turns text in Base16 using Python library
args:
text -> text convert
returns:
text -> as Base16"""
return base64.b16encode(bytes(text, "utf-8")).decode("utf-8")
def Binary(self, text: str) -> str: # pragma: no cover
return " ".join(format(ord(x), "b") for x in text)
# pragma: no cover
def Ascii(self, text: str) -> str: # pragma: no cover
res = [ord(c) for c in text]
return " ".join([str(x) for x in res])
def Hex(self, text: str) -> str: # pragma: no cover
return binascii.hexlify(text.encode()).decode("utf-8")
def MorseCode(self, text: str) -> str: # pragma: :wno cover
morse = []
for i in text:
m = self.morse_dict.get(i.upper())
if m is None:
m = ""
morse.append(m)
output = morse
# output = " ".join(MORSE_CODE_DICT.get(i.upper()) for i in text)
return " ".join(output)
def Reverse(self, text: str) -> str:
return text[::-1]
def Vigenere(self, plaintext):
key = self.vig_key(plaintext, self.random_key(plaintext))
cipheycore.vigenere_encrypt(plaintext, key, self.group)
def vig_key(self, msg, key):
tab = dict()
for counter, i in enumerate(self.group):
tab[self.group[counter]] = counter
real_key = []
for i in key:
real_key.append(tab[i])
return real_key
# vigenere_encrypt(msg, real_key, group)
def base58_bitcoin(self, text: str):
return base58.b58encode(bytes(text, "utf-8")).decode("utf-8")
def base58_ripple(self, text: str):
return base58.b58encode(
bytes(text, "utf-8"), alphabet=base58.RIPPLE_ALPHABET
).decode("utf-8")
def b62(self, text: str):
return base62.decode(str(re.sub(r"[^A-Za-z1-9]+", "", text))) |
Python | Ciphey/tests/generate_tests.py | """
Create a class that can generate encryptions that ciphey can decrypt
This class takes a random string from a large corpus of data and returns it as :
{"Cipher": c, "Plaintext": p, "CipherUsed": cu, "Succeeds": true}
It would also be good if it could return randomly generate text / plaintext too, so we can get some failure test cases.
This class is used to create the class that contains the tests.
So it'll have a format like:
def test_description(self):
assert(t, equal)
where t is the decrypted text from Ciphey, and equal is the decrypted text.
So this function does like:
for i in range(1, 20000):
grabCipher = grabCipher()
# this returns a random cipher, encrypted text and plaintext combo
toAppend ='''
def test_{cipher}_{succeeds}_{plaintext[0:10]}(textToTest):
cipheyObj = ciphey(text)
output = cipheyObj.decrypt()
assert(output, {plaintext})
'''
file.append()
"""
import random
import string
import enciphey
from rich.progress import track
class test_generator:
def __init__(self):
self.HOW_MANY_TESTS = 30
self.enCiphey_obj = enciphey.encipher()
def main(self):
with open("test_main_generated.py", "w") as f:
f.write("from ciphey.__main__ import main, make_default_config")
print("Opened fild")
for i in track(range(1, self.HOW_MANY_TESTS)):
print("In the for loop")
x = self.enCiphey_obj.getRandomEncryptedSentence()
print(x)
# if x["CipherUsed"] == "MorseCode":
# self.make_test_lc_true_template(cipher=x)
to_append = self.make_test_lc_true_template(cipher=x)
print(f"Adding {to_append}")
f.write(to_append)
def make_test_true_template(self, cipher):
id = self.randomString(8)
return f"""
def test_{cipher['Encrypted Texts']['CipherUsed']}_{id}():
# {cipher}
res = ciphey.main('''{cipher['Encrypted Texts']['EncryptedText']}''', config={"offline": True})
assert(res == {cipher['Encrypted Texts']['PlainText']})
"""
def make_test_lc_true_template(self, cipher):
id = self.randomString(8)
return f"""
def test_{cipher['Encrypted Texts']['CipherUsed']}_{id}():
# {cipher}
cfg = make_default_config('''{cipher['Encrypted Texts']['EncryptedText']}''')
cfg["debug"] = "TRACE"
result = main(cfg)
assert result["IsPlaintext?"] == True
"""
def randomString(self, stringLength):
letters = string.ascii_letters
return "".join(random.choice(letters) for i in range(stringLength))
t = test_generator()
t.main()
# return {"PlainText": text, "EncryptedText": encryptedText, "CipherUsed": name} |
Python | Ciphey/tests/lukas.py | import random
import cipheydists
class galactic_encode:
"""
(Attempts to) encode an input string with the Standard Galactic Alphabet.
"""
def __init__(self, text: str):
self.text = text.lower()
self.ctext = ""
imported = dict(cipheydists.get_translate("galactic"))
self.galactic_dict = {value: key for (key, value) in imported.items()}
def encode(self):
for char in self.text:
if char in self.galactic_dict.keys():
self.ctext += self.galactic_dict[char]
else:
self.ctext += char
return self.ctext
class atbash_encode:
"""
Encodes an input string with the Atbash cipher.
"""
def __init__(self, text: str):
self.text = text.lower()
self.letters = list("abcdefghijklmnopqrstuvwxyz")
self.atbash_dict = {self.letters[::-1][i]: self.letters[i] for i in range(26)}
self.ctext = ""
def encode(self):
for letter in self.text:
if letter in self.atbash_dict.keys():
# Match every letter of the input to its atbash counterpoint
self.ctext += self.atbash_dict[letter]
else:
# If the current character is not in the defined alphabet,
# just accept it as-is (useful for numbers, punctuation,...)
self.ctext += letter
return self.ctext
class XY_encrypt:
"""
Encrypts an input string using binary substitution (called XandY in Ciphey) in which
first, the input string is converted to its binary representation and then the 0s and 1s
of the binary string are replaced with any two characters.
- flip: Which of the two possible rotations of the substitute characters is used?
- randomize: If True, random spaces are inserted into the cstring, which Ciphey can handle.
- key: Which two characters are used to represent the 0s and 1s?
"""
def __init__(
self,
text: str,
flip: bool = bool(random.randint(0, 1)),
randomize: bool = True,
key: list = None,
):
self.ASCII = cipheydists.get_charset("asciiTable")
self.text = text.lower()
self.ctext = ""
self.flip = flip
self.randomize = randomize
self.key = key
def randomizer(self):
s = list(self.ctext)
for i in range(len(s) - 1):
while random.randrange(2):
s[i] = s[i] + " "
return "".join(s)
def to_binary(self):
return " ".join(f"{ord(i):08b}" for i in self.text)
def encrypt(self):
self.ctext = self.to_binary().replace(" ", "")
if self.key:
one, two = self.key[0], self.key[1]
else:
one, two = random.choice(self.ASCII), random.choice(self.ASCII)
self.ctext = self.ctext.replace(str(int(self.flip)), one).replace(
str(int(not self.flip)), two
)
self.ctext = self.randomizer() if self.randomize is True else self.ctext
return self.ctext |
Ciphey/tests/speed_test.archive | """
TL;DR
Tested over 20,000 times
Maximum sentence size is 15 sentences
1/2 chance of getting 'gibberish' (encrypted text)
1/2 chance of getting English text
Each test is timed using Time module.
The accuracy is calculated as to how many true positives we get over the entire run
"""
import random
import time
from statistics import mean
import ciphey
import enciphey
from alive_progress import alive_bar
from spacy.lang.en.stop_words import STOP_WORDS
import cipheydists
import cipheycore
import pprint
from math import ceil
class tester:
def __init__(self):
self.nlp = spacy.load("en_core_web_sm")
self.f = open("hansard.txt", encoding="ISO-8859-1").read()
self.f = self.f.split(".")
# self.analysis = cipheycore.start_analysis()
# for word in self.f:
# cipheycore.continue_analysis(self.analysis, word)
# cipheycore.finish_analysis(self.analysis)
self.enciph = enciphey.encipher()
# all stopwords
self.all_stopwords = set(self.nlp.Defaults.stop_words)
self.top1000Words = cipheydists.get_list("english1000")
self.wordlist = cipheydists.get_list("english")
self.endings = set(
[
"al",
"y",
"sion",
"tion",
"ize",
"ic",
"ious",
"ness",
"ment",
"ed",
"ify",
"ence",
"fy",
"less",
"ance",
"ship",
"ate",
"dom",
"ist",
"ish",
"ive",
"en",
"ical",
"ful",
"ible",
"ise",
"ing",
"ity",
"ism",
"able",
"ty",
"er",
"or",
"esque",
"acy",
"ous",
]
)
self.endings_3_letters = list(filter(lambda x: len(x) > 3, self.endings))
self.best_thresholds = {
"word endings": {
1: {"Threshold": 0, "Accuracy": 0},
2: {"Threshold": 0, "Accuracy": 0},
3: {"Threshold": 0, "Accuracy": 0},
4: {"Threshold": 0, "Accuracy": 0},
5: {"Threshold": 0, "Accuracy": 0},
},
"word endngs with just 3 chars": {
1: {"Threshold": 0, "Accuracy": 0},
2: {"Threshold": 0, "Accuracy": 0},
3: {"Threshold": 0, "Accuracy": 0},
4: {"Threshold": 0, "Accuracy": 0},
5: {"Threshold": 0, "Accuracy": 0},
},
"stop words": {
1: {"Threshold": 0, "Accuracy": 0},
2: {"Threshold": 0, "Accuracy": 0},
3: {"Threshold": 0, "Accuracy": 0},
4: {"Threshold": 0, "Accuracy": 0},
5: {"Threshold": 0, "Accuracy": 0},
},
"check 1000 words": {
1: {"Threshold": 0, "Accuracy": 0},
2: {"Threshold": 0, "Accuracy": 0},
3: {"Threshold": 0, "Accuracy": 0},
4: {"Threshold": 0, "Accuracy": 0},
5: {"Threshold": 0, "Accuracy": 0},
},
"checker": {
1: {"Threshold": 0, "Accuracy": 0},
2: {"Threshold": 0, "Accuracy": 0},
3: {"Threshold": 0, "Accuracy": 0},
4: {"Threshold": 0, "Accuracy": 0},
5: {"Threshold": 0, "Accuracy": 0},
},
}
# text = "hello my name is Bee and I really like flowers"
# def checker(self, text: str, threshold: float, text_length: int) -> bool:
# x = self.checker(text=text, threshold=0.55, text_length=len(text))
def lem(self, text, thresold):
sentences = self.nlp(text)
return set([word.lemma_ for word in sentences])
def stop(self, text, threshold):
for word in text:
if word in self.all_stopwords:
return True
else:
return False
# x = [word for word in text if not word in self.all_stopwords]
# return True if len(x) < len(text) else False
def check1000Words(self, text, threshold):
"""Checks to see if word is in the list of 1000 words
the 1000words is a dict, so lookup is O(1)
Args:
text -> The text we use to text (a word)
Returns:
bool -> whether it's in the dict or not.
"""
# If we have no wordlist, then we can't reject the candidate on this basis
if text is None:
return False
# If any of the top 1000 words in the text appear
# return true
for word in text:
# I was debating using any() here, but I think they're the
# same speed so it doesn't really matter too much
if word in self.top1000Words:
return True
return False
def get_random_sentence(self, size):
# if random.randint(0, 1) == 0:
# x = None
# while x is None:
# x = (True, " ".join(random.sample(self.f, k=random.randint(1, size))))
# return x
# else:
# x = None
# while x is None:
# x = self.enciph.getRandomEncryptedSentence(size)
# x = x["Encrypted Texts"]["EncryptedText"]
# return (False, x)
x = (True, " ".join(random.sample(self.f, k=random.randint(1, size))))
return x
def get_words(self, text):
doc = self.nlp(text)
toReturn = []
for token in doc:
toReturn.append((token.text).lower())
return toReturn
def word_endings(self, text, thresold):
total = len(text)
if total == 0:
return False
positive = 0
# as soon as we hit 25%, we exit and return True
for word in text:
for word2 in self.endings:
if word.endswith(word2):
positive += 1
# if total / positive >= 0.25:
# return True
# return False
if positive == 0:
return False
return True if positive / total > thresold else False
def word_endings_3(self, text, threshold):
"""Word endings that only end in 3 chars, may be faster to compute"""
positive = 0
total = len(text)
if total == 0:
return False
for word in text:
if word[::-3] in self.endings_3_letters:
positive += 1
if positive != 0:
return True if total / positive > threshold else False
else:
return False
# Now to time it and take measurements
def perform(self, function, sent_size, threshold):
threshold = threshold / 100
# calculate accuracy
total = 0
true_positive_returns = 0
true_negative_returns = 0
false_positive_returns = 0
false_negatives_returns = 0
# calculate aveager time
time_list = []
# average sentance size
sent_size_list = []
test_range = 200
for i in range(0, test_range):
sent = self.get_random_sentence(sent_size)
text = sent[1]
truthy = sent[0]
sent_size_list.append(len(text))
# should be length of chars
text = self.get_words(text)
old = len(text)
# timing the function
# def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
tic = time.perf_counter()
result = function(text=text, threshold=threshold, text_length=old)
tok = time.perf_counter()
# new = len(result)
# print(
# f"The old text is \n {''.join(text)}\n and the new text is \n {''.join(result)} \n\n"
# )
# result = new < old
# checking for accuracy
# new = len(new)
# the and here means we only count True Positives
# result = new < old
if result and truthy:
true_positive_returns += 1
elif result:
false_positive_returns += 1
elif not result and truthy:
false_negatives_returns += 1
elif not result:
true_negative_returns += 1
else:
print("ERROR")
total += 1
# appending the time
t = tok - tic
time_list.append(t)
print(
f"The accuracy is {str((true_positive_returns+true_negative_returns) / total)} \n and the time it took is {str(mean(time_list))}. \n The average string size was {str(mean(sent_size_list))}"
)
print(
f"""
Positive Negative
Positive {true_positive_returns} {false_positive_returns}
Negative {false_negatives_returns} {true_negative_returns}
"""
)
return {
"Name": function,
"Threshold": threshold,
"Accuracy": (true_positive_returns + true_negative_returns) / total,
"Average_time": mean(time_list),
"Average_string_len": mean(sent_size_list),
"Sentence length": sent_size,
"confusion_matrix": [
[true_positive_returns, false_positive_returns],
[false_negatives_returns, true_negative_returns],
],
}
def perform_3_sent_sizes(self, threshold):
"""
Gives us the average accuracy and time etc
"""
# funcs = [obj.checker, obj.stop, obj.check1000Words]
funcs = [obj.checker]
# funcs = [obj.word_endings]
names = [
"checker",
# "stop words",
# "check 1000 words",
]
# names = ["checker"]
sent_sizes = [1, 2, 3, 4, 5]
x = {
# "stop words": {1: None, 2: None, 3: None, 4: None, 5: None, 20: None},
# "check 1000 words": {1: None, 2: None, 3: None, 4: None, 5: None, 20: None},
"checker": {1: None, 2: None, 3: None, 4: None, 5: None, 20: None},
}
for i in range(0, len(funcs)):
func = funcs[i]
for y in sent_sizes:
# print("Hello this runsss")
x[names[i]][y] = self.perform(func, y, threshold)
return x
def perform_best_percentages(self):
"""
Tells us the optimal percentage thresholds
"""
"""
TODO I need to record thresholds for each length of text
"""
# "word endings with just 3 chars": {
# "Sentence Size": {"Threshold": 0, "Accuracy": 0}
# },
# "stop words": {"Sentence Size": {"Threshold": 0, "Accuracy": 0}},
# "check 1000 words": {"Sentence Size": {"Threshold": 0, "Accuracy": 0}},
# }
items = range(100)
with alive_bar(len(items)) as bar:
for i in range(1, 101):
x = self.perform_3_sent_sizes(threshold=i)
pprint.pprint(x)
for key, value in x.items():
# getting max keyLs
for y in [1, 2, 3, 4, 5]:
pprint.pprint(x[key])
# size = x[key][y]
size = y
# print(f"**** Size is {size}")
temp1 = x[key][y]["Accuracy"]
# print(f"Accuracy is {temp1}")
temp2 = self.best_thresholds[key][size]["Accuracy"]
if temp1 > temp2:
temp2 = temp1
# print(f"Self best is {self.best_thresholds[key][size]}")
self.best_thresholds[key][size]["Threshold"] = i
self.best_thresholds[key][size]["Accuracy"] = temp1
pprint.pprint(x)
bar()
pprint.pprint(self.best_thresholds)
def calculate_average_sentence_size(self):
sent_sizes = [1, 2, 3, 4, 5]
lengths = []
for x in sent_sizes:
for i in range(0, 2000):
y = self.get_random_sentence(x)
lengths.append(len(y[1]))
print(f"{x} : The mean is {mean(lengths)}")
def checker(self, text: str, threshold: float, text_length: int) -> bool:
"""Given text determine if it passes checker
The checker uses the vairable passed to it. I.E. Stopwords list, 1k words, dictionary
Args:
text -> The text to check
threshold -> at what point do we return True? The percentage of text that is in var before we return True
text_length -> the length of the text
var -> the variable we are checking against. Stopwords list, 1k words list, dictionray list.
Returns:
boolean -> True for it passes the test, False for it fails the test."""
percent = ceil(text_length * threshold)
meet_threshold = 0
location = 0
end = percent
while location <= text_length:
# chunks the text, so only gets THRESHOLD chunks of text at a time
to_analyse = text[location:end]
for word in to_analyse:
# if len(word) <= 1:
# continue
# if word is a stopword, + 1 to the counter
if word in self.wordlist:
meet_threshold += 1
if meet_threshold / text_length >= threshold:
# if we meet the threshold, return True
# otherwise, go over again until we do
# We do this in the for loop because if we're at 24% and THRESHOLD is 25
# we don't want to wait THRESHOLD to return true, we want to return True ASAP
return True
location += 1
return False
obj = tester()
# X = obj.perform_3_sent_sizes(50)
# x = obj.perform_best_percentages()
x = obj.calculate_average_sentence_size() |
|
Python | Ciphey/tests/test_advanced_ciphers.py | import pytest
from click.testing import CliRunner
import mock
import re
from ciphey import decrypt
from ciphey.iface import Config
from ciphey.ciphey import main
from ciphey.basemods.Checkers import human
def test_xor():
res = decrypt(Config().library_default().complete_config(),"Uihr!hr!`!udru!gns!YNS-!hu!hr!sd`mmx!mnof!un!l`jd!rtsd!ui`u!YNSunnm!b`o!fdu!hu/!Bhqidx!*!YNSunnm!hr!bnnm/")
assert re.findall("This is a test for XOR", res)
@pytest.mark.skip("Skipping because it matches on Discover card, this is a PyWhat bug that's being fixed.")
@mock.patch("ciphey.basemods.Checkers.human.HumanChecker.check", return_value = "")
def test_xor_tui_multi_byte(mock_click):
# https://github.com/Ciphey/Ciphey/issues/655
runner = CliRunner()
mock_click.return_value = "y"
result = runner.invoke(main, ['-vvv', '-t', '360w0x11450x114504421611100x0y0545000x06171y1511070145150x110z45081709110y45071y1100423w2z3045120z0x060z450x1145080w170042060z0u1509071w45160w040x45160y0y020v0045001x1107453w2w374y422x0y1111000301450w03450w0y091y4510110x0y05450442160x0x02090745071y110042030z104504420v001y45120z0x060z450x11450003161x42110z42071717110042030z10060042041642110w071700420x16420z0y0v1x4550505342150z11160x000x090y11001149450u1009160x45001x1107450v071x1642060z170901420w04140045160w0z170416030y0111450z0445150w16160y070x0v0x110716450u040v0y0y02420x11420w04100145160z450017101600030w1706074y453z2z37160z0z0v450x1145041500160w08004207000104101100450y114501040y42061703060v42070z160w45110x0y05090042071x160045030y014208100v110x42071x1600453z2z3742000y01171x121100064511071w114x452z0x060042260x120w001y450w0316453z2z37160z0z0v450x0x1100051704160001420x0y160z450y1149420x1142120x0v0945000045111015071745030804180x0y0545040x0145150x090v451012021703010042260x120w001y45110w450707450400090042110z42061703060v42060z0u1509071w453z2z3742000y01171x121100064511071w114x45320z1x450y1645160w0x114511071w1142160z42090z0x025z4227000104101100453z2z37160z0z0v45060w1009060y4216450610040609450x1645120z000y422x450u040107450x1645160z0z171600174x455u4z'])
assert result.exit_code == 0
assert re.findall("This is a string encrypted with multi", str(result.output))
@mock.patch("ciphey.basemods.Checkers.human.HumanChecker.check", return_value = "")
def test_xor_tui(mock_click):
# https://github.com/Ciphey/Ciphey/issues/655
runner = CliRunner()
mock_click.return_value = "y"
result = runner.invoke(main, ['-t', 'Uihr!hr!`!udru!gns!YNS-!hu!hr!sd`mmx!mnof!un!l`jd!rtsd!ui`u!YNSunnm!b`o!fdu!hu/!Bhqidx!*!YNSunnm!hr!bnnm/'])
assert result.exit_code == 0
assert re.findall("This is a test for XOR", str(result.output))
@mock.patch("ciphey.basemods.Checkers.human.HumanChecker.check", return_value = "")
def test_xor_tui_verbose_mode_doesnt_break(mock_click):
# We had a bug where verbose mode broke xor
# https://discord.com/channels/754001738184392704/814565556027654214/853183178104373310
runner = CliRunner()
mock_click.return_value = "y"
result = runner.invoke(main, ['-v', '-t', 'Uihr!hr!`!udru!gns!YNS-!hu!hr!sd`mmx!mnof!un!l`jd!rtsd!ui`u!YNSunnm!b`o!fdu!hu/!Bhqidx!*!YNSunnm!hr!bnnm/'])
assert result.exit_code == 0
assert re.findall("This is a test for XOR", str(result.output))
def test_xor_atbash():
# Frsi!si!{!fwif!tmh!BMH-!sf!si!hw{nnc!nmlu!fm!o{qw!ighw!fr{f!BMHfmmn!y{l!uwf!sf/!Ysjrwc!*!BMHfmmn.si!ymmn/
# This is a test for XOR, it is really long to make sure that XORtool can get it. Ciphey + XORtool/is cool.
# Previously xor only worked on level 1, this test ensures it always works on levels > 1
res = decrypt(Config().library_default().complete_config(),"Frsi!si!{!fwif!tmh!BMH-!sf!si!hw{nnc!nmlu!fm!o{qw!ighw!fr{f!BMHfmmn!y{l!uwf!sf/!Ysjrwc!*!BMHfmmn.si!ymmn/")
assert re.findall("This is a test for XOR", res) |
Python | Ciphey/tests/test_click.py | from click.testing import CliRunner
from ciphey.ciphey import main
from ciphey.basemods.Checkers import human
import mock
def test_hello_world():
runner = CliRunner()
result = runner.invoke(main, ["-g", "-t", "hello"])
assert result.exit_code == 0
assert result.output == "hello\n"
def test_ip_address():
runner = CliRunner()
result = runner.invoke(main, ["-g", "-t", "MTkyLjE2OC4wLjE="])
assert result.exit_code == 0
assert result.output == "192.168.0.1\n"
@mock.patch("ciphey.basemods.Checkers.human.HumanChecker.check", return_value="")
def test_quick_visual_output(mock_click):
# https://github.com/Ciphey/Ciphey/issues/655
runner = CliRunner()
mock_click.return_value = "y"
result = runner.invoke(main, ["-t", "NB2HI4DTHIXS6Z3PN5TWYZJOMNXW2==="])
assert result.exit_code == 0
assert "base32" in result.output |
Python | Ciphey/tests/test_click_printing.py | from click.testing import CliRunner
from ciphey.ciphey import main
from ciphey.basemods.Checkers import human
import mock
@mock.patch("ciphey.basemods.Checkers.human.HumanChecker.check", return_value="")
def test_fix_for_655(mock_click):
# https://github.com/Ciphey/Ciphey/issues/655
runner = CliRunner()
mock_click.return_value = "y"
result = runner.invoke(main, ["-t", "NB2HI4DTHIXS6Z3PN5TWYZJOMNXW2==="])
assert result.exit_code == 0
assert "base32" in result.output
"""
TODO Mock
360d0c11450c114504421611100c0b0545000c06171b1511070145150c110a45081709110b45071b1100423d2a3045120a0c060a450c1145080d170042060a0f1509071d45160d040c45160b0b020e0045001c1107453d2d374b422c0b1111000301450d03450d0b091b4510110c0b05450442160c0c02090745071b110042030a104504420e001b45120a0c060a450c11450003161c42110a42071717110042030a10060042041642110d071700420c16420a0b0e1c4550505342150a11160c000c090b11001149450f1009160c45001c1107450e071c1642060a170901420d04140045160d0a170416030b0111450a0445150d16160b070c0e0c110716450f040e0b0b02420c11420d04100145160a450017101600030d1706074b453a2a37160a0a0e450c1145041500160d08004207000104101100450b114501040b42061703060e42070a160d45110c0b05090042071c160045030b014208100e110c42071c1600453a2a3742000b01171c121100064511071d114c452a0c060042260c120d001b450d0316453a2a37160a0a0e450c0c1100051704160001420c0b160a450b1149420c1142120c0e0945000045111015071745030804180c0b0545040c0145150c090e451012021703010042260c120d001b45110d450707450400090042110a42061703060e42060a0f1509071d453a2a3742000b01171c121100064511071d114c45320a1c450b1645160d0c114511071d1142160a42090a0c025a4227000104101100453a2a37160a0a0e45060d1009060b4216450610040609450c1645120a000b422c450f040107450c1645160a0a171600174c455f4a
As it passes as a discover card
""" |
Python | Ciphey/tests/test_quick.py | import pytest
from ciphey import decrypt
from ciphey.iface import Config
from click.testing import CliRunner
from ciphey.ciphey import main
from ciphey.basemods.Checkers import human
import mock
answer_str = "Hello my name is bee and I like dog and apple and tree"
def test_quick_base32():
res = decrypt(
Config().library_default().complete_config(),
"JBSWY3DPEBWXSIDOMFWWKIDJOMQGEZLFEBQW4ZBAJEQGY2LLMUQGI33HEBQW4ZBAMFYHA3DFEBQW4ZBAORZGKZI=",
)
assert res.lower() == answer_str.lower()
def test_quick_base58_ripple():
res = decrypt(
Config().library_default().complete_config(),
"aqY64A1PhaM8hgyagyw4C1Mmp5cwxGEwag8EjVm9F6YHebyfPZmsvt65XxS7ffteQgTEGbHNT8",
)
assert res.lower() == answer_str.lower()
def test_quick_greppable_works_with_ip_address():
runner = CliRunner()
result = runner.invoke(main, ["-g", "-t", "MTkyLjE2OC4wLjE="])
assert result.exit_code == 0
assert result.output == "192.168.0.1\n"
@mock.patch("ciphey.basemods.Checkers.human.HumanChecker.check", return_value="")
def test_quick_visual_output(mock_click):
# https://github.com/Ciphey/Ciphey/issues/655
runner = CliRunner()
mock_click.return_value = "y"
result = runner.invoke(main, ["-t", "NB2HI4DTHIXS6Z3PN5TWYZJOMNXW2==="])
assert result.exit_code == 0
assert "base32" in result.output |
Python | Ciphey/tests/test_regex.py | import pytest
from ciphey import decrypt
from ciphey.iface import Config
def test_regex_ip():
res = decrypt(
Config().library_default().complete_config(),
"MTkyLjE2MC4wLjE=",
)
assert res == "192.160.0.1"
def test_regex_domain():
res = decrypt(
Config().library_default().complete_config(),
"aHR0cHM6Ly9nb29nbGUuY29t",
)
assert res == "https://google.com"
def test_regex_bitcoin():
res = decrypt(
Config().library_default().complete_config(),
"M0ZaYmdpMjljcGpxMkdqZHdWOGV5SHVKSm5rTHRrdFpjNQ==",
)
assert res == "3FZbgi29cpjq2GjdwV8eyHuJJnkLtktZc5" |