Shaleen123
commited on
Commit
•
a164e13
1
Parent(s):
e7f5447
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .github/workflows/pre-commit.yml +39 -0
- .gitignore +160 -0
- .pre-commit-config.yaml +20 -0
- LICENSE +165 -0
- README.md +187 -0
- config.yaml +13 -0
- docs/moe.md +38 -0
- examples/gradient-slerp.yml +20 -0
- examples/linear.yml +12 -0
- examples/mega.yml +37 -0
- examples/orcamini-platy-44layer.yml +9 -0
- examples/ties.yml +22 -0
- merge/README.md +47 -0
- merge/added_tokens.json +40 -0
- merge/config.json +48 -0
- merge/mergekit_config.yml +13 -0
- merge/merges.txt +0 -0
- merge/model-00001-of-00002.safetensors +3 -0
- merge/model-00002-of-00002.safetensors +3 -0
- merge/model.safetensors.index.json +1 -0
- merge/special_tokens_map.json +23 -0
- merge/tokenizer.json +0 -0
- merge/tokenizer_config.json +323 -0
- merge/vocab.json +0 -0
- mergekit/__init__.py +0 -0
- mergekit/_data/__init__.py +0 -0
- mergekit/_data/architectures/__init__.py +0 -0
- mergekit/_data/architectures/baichuan.json +47 -0
- mergekit/_data/architectures/chatglm.json +50 -0
- mergekit/_data/architectures/cohere.json +53 -0
- mergekit/_data/architectures/falcon.json +53 -0
- mergekit/_data/architectures/gemma.json +85 -0
- mergekit/_data/architectures/gpt-neox.json +74 -0
- mergekit/_data/architectures/gpt2-sequence-classification.json +66 -0
- mergekit/_data/architectures/gpt2.json +64 -0
- mergekit/_data/architectures/gptbigcode.json +70 -0
- mergekit/_data/architectures/jais.json +70 -0
- mergekit/_data/architectures/llama.json +91 -0
- mergekit/_data/architectures/mamba.json +57 -0
- mergekit/_data/architectures/mistral.json +90 -0
- mergekit/_data/architectures/phi-1.json +66 -0
- mergekit/_data/architectures/phi2-old.json +62 -0
- mergekit/_data/architectures/phi2.json +74 -0
- mergekit/_data/architectures/qwen.json +50 -0
- mergekit/_data/architectures/qwen2.json +62 -0
- mergekit/_data/architectures/stablelm.json +98 -0
- mergekit/_data/architectures/stablelm2.json +74 -0
- mergekit/_data/architectures/starcoder2.json +78 -0
- mergekit/architecture.py +374 -0
- mergekit/card.py +177 -0
.github/workflows/pre-commit.yml
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: pre-commit
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
push:
|
6 |
+
|
7 |
+
jobs:
|
8 |
+
pre-commit:
|
9 |
+
runs-on: ubuntu-latest
|
10 |
+
steps:
|
11 |
+
- uses: actions/checkout@v3
|
12 |
+
- uses: actions/setup-python@v4
|
13 |
+
with:
|
14 |
+
python-version: "3.11"
|
15 |
+
cache: "pip"
|
16 |
+
- uses: pre-commit/action@v3.0.0
|
17 |
+
|
18 |
+
pytest:
|
19 |
+
if: github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
|
20 |
+
name: PyTest
|
21 |
+
needs: [pre-commit]
|
22 |
+
runs-on: ubuntu-latest
|
23 |
+
strategy:
|
24 |
+
fail-fast: false
|
25 |
+
matrix:
|
26 |
+
python_version: ["3.9", "3.10", "3.11"]
|
27 |
+
timeout-minutes: 5
|
28 |
+
|
29 |
+
steps:
|
30 |
+
- uses: actions/checkout@v3
|
31 |
+
- name: Setup Python
|
32 |
+
uses: actions/setup-python@v4
|
33 |
+
with:
|
34 |
+
python-version: ${{ matrix.python_version }}
|
35 |
+
cache: "pip"
|
36 |
+
- name: Install dependencies
|
37 |
+
run: pip3 install -U -e .[test]
|
38 |
+
- name: Run tests
|
39 |
+
run: pytest .
|
.gitignore
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v3.2.0
|
4 |
+
hooks:
|
5 |
+
- id: check-added-large-files
|
6 |
+
- id: check-yaml
|
7 |
+
args: ["--allow-multiple-documents"]
|
8 |
+
- repo: https://github.com/PyCQA/isort
|
9 |
+
rev: 5.12.0
|
10 |
+
hooks:
|
11 |
+
- id: isort
|
12 |
+
- repo: https://github.com/psf/black
|
13 |
+
rev: 23.11.0
|
14 |
+
hooks:
|
15 |
+
- id: black
|
16 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
17 |
+
rev: v3.2.0
|
18 |
+
hooks:
|
19 |
+
- id: trailing-whitespace
|
20 |
+
- id: end-of-file-fixer
|
LICENSE
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU LESSER GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 29 June 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
|
9 |
+
This version of the GNU Lesser General Public License incorporates
|
10 |
+
the terms and conditions of version 3 of the GNU General Public
|
11 |
+
License, supplemented by the additional permissions listed below.
|
12 |
+
|
13 |
+
0. Additional Definitions.
|
14 |
+
|
15 |
+
As used herein, "this License" refers to version 3 of the GNU Lesser
|
16 |
+
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
17 |
+
General Public License.
|
18 |
+
|
19 |
+
"The Library" refers to a covered work governed by this License,
|
20 |
+
other than an Application or a Combined Work as defined below.
|
21 |
+
|
22 |
+
An "Application" is any work that makes use of an interface provided
|
23 |
+
by the Library, but which is not otherwise based on the Library.
|
24 |
+
Defining a subclass of a class defined by the Library is deemed a mode
|
25 |
+
of using an interface provided by the Library.
|
26 |
+
|
27 |
+
A "Combined Work" is a work produced by combining or linking an
|
28 |
+
Application with the Library. The particular version of the Library
|
29 |
+
with which the Combined Work was made is also called the "Linked
|
30 |
+
Version".
|
31 |
+
|
32 |
+
The "Minimal Corresponding Source" for a Combined Work means the
|
33 |
+
Corresponding Source for the Combined Work, excluding any source code
|
34 |
+
for portions of the Combined Work that, considered in isolation, are
|
35 |
+
based on the Application, and not on the Linked Version.
|
36 |
+
|
37 |
+
The "Corresponding Application Code" for a Combined Work means the
|
38 |
+
object code and/or source code for the Application, including any data
|
39 |
+
and utility programs needed for reproducing the Combined Work from the
|
40 |
+
Application, but excluding the System Libraries of the Combined Work.
|
41 |
+
|
42 |
+
1. Exception to Section 3 of the GNU GPL.
|
43 |
+
|
44 |
+
You may convey a covered work under sections 3 and 4 of this License
|
45 |
+
without being bound by section 3 of the GNU GPL.
|
46 |
+
|
47 |
+
2. Conveying Modified Versions.
|
48 |
+
|
49 |
+
If you modify a copy of the Library, and, in your modifications, a
|
50 |
+
facility refers to a function or data to be supplied by an Application
|
51 |
+
that uses the facility (other than as an argument passed when the
|
52 |
+
facility is invoked), then you may convey a copy of the modified
|
53 |
+
version:
|
54 |
+
|
55 |
+
a) under this License, provided that you make a good faith effort to
|
56 |
+
ensure that, in the event an Application does not supply the
|
57 |
+
function or data, the facility still operates, and performs
|
58 |
+
whatever part of its purpose remains meaningful, or
|
59 |
+
|
60 |
+
b) under the GNU GPL, with none of the additional permissions of
|
61 |
+
this License applicable to that copy.
|
62 |
+
|
63 |
+
3. Object Code Incorporating Material from Library Header Files.
|
64 |
+
|
65 |
+
The object code form of an Application may incorporate material from
|
66 |
+
a header file that is part of the Library. You may convey such object
|
67 |
+
code under terms of your choice, provided that, if the incorporated
|
68 |
+
material is not limited to numerical parameters, data structure
|
69 |
+
layouts and accessors, or small macros, inline functions and templates
|
70 |
+
(ten or fewer lines in length), you do both of the following:
|
71 |
+
|
72 |
+
a) Give prominent notice with each copy of the object code that the
|
73 |
+
Library is used in it and that the Library and its use are
|
74 |
+
covered by this License.
|
75 |
+
|
76 |
+
b) Accompany the object code with a copy of the GNU GPL and this license
|
77 |
+
document.
|
78 |
+
|
79 |
+
4. Combined Works.
|
80 |
+
|
81 |
+
You may convey a Combined Work under terms of your choice that,
|
82 |
+
taken together, effectively do not restrict modification of the
|
83 |
+
portions of the Library contained in the Combined Work and reverse
|
84 |
+
engineering for debugging such modifications, if you also do each of
|
85 |
+
the following:
|
86 |
+
|
87 |
+
a) Give prominent notice with each copy of the Combined Work that
|
88 |
+
the Library is used in it and that the Library and its use are
|
89 |
+
covered by this License.
|
90 |
+
|
91 |
+
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
92 |
+
document.
|
93 |
+
|
94 |
+
c) For a Combined Work that displays copyright notices during
|
95 |
+
execution, include the copyright notice for the Library among
|
96 |
+
these notices, as well as a reference directing the user to the
|
97 |
+
copies of the GNU GPL and this license document.
|
98 |
+
|
99 |
+
d) Do one of the following:
|
100 |
+
|
101 |
+
0) Convey the Minimal Corresponding Source under the terms of this
|
102 |
+
License, and the Corresponding Application Code in a form
|
103 |
+
suitable for, and under terms that permit, the user to
|
104 |
+
recombine or relink the Application with a modified version of
|
105 |
+
the Linked Version to produce a modified Combined Work, in the
|
106 |
+
manner specified by section 6 of the GNU GPL for conveying
|
107 |
+
Corresponding Source.
|
108 |
+
|
109 |
+
1) Use a suitable shared library mechanism for linking with the
|
110 |
+
Library. A suitable mechanism is one that (a) uses at run time
|
111 |
+
a copy of the Library already present on the user's computer
|
112 |
+
system, and (b) will operate properly with a modified version
|
113 |
+
of the Library that is interface-compatible with the Linked
|
114 |
+
Version.
|
115 |
+
|
116 |
+
e) Provide Installation Information, but only if you would otherwise
|
117 |
+
be required to provide such information under section 6 of the
|
118 |
+
GNU GPL, and only to the extent that such information is
|
119 |
+
necessary to install and execute a modified version of the
|
120 |
+
Combined Work produced by recombining or relinking the
|
121 |
+
Application with a modified version of the Linked Version. (If
|
122 |
+
you use option 4d0, the Installation Information must accompany
|
123 |
+
the Minimal Corresponding Source and Corresponding Application
|
124 |
+
Code. If you use option 4d1, you must provide the Installation
|
125 |
+
Information in the manner specified by section 6 of the GNU GPL
|
126 |
+
for conveying Corresponding Source.)
|
127 |
+
|
128 |
+
5. Combined Libraries.
|
129 |
+
|
130 |
+
You may place library facilities that are a work based on the
|
131 |
+
Library side by side in a single library together with other library
|
132 |
+
facilities that are not Applications and are not covered by this
|
133 |
+
License, and convey such a combined library under terms of your
|
134 |
+
choice, if you do both of the following:
|
135 |
+
|
136 |
+
a) Accompany the combined library with a copy of the same work based
|
137 |
+
on the Library, uncombined with any other library facilities,
|
138 |
+
conveyed under the terms of this License.
|
139 |
+
|
140 |
+
b) Give prominent notice with the combined library that part of it
|
141 |
+
is a work based on the Library, and explaining where to find the
|
142 |
+
accompanying uncombined form of the same work.
|
143 |
+
|
144 |
+
6. Revised Versions of the GNU Lesser General Public License.
|
145 |
+
|
146 |
+
The Free Software Foundation may publish revised and/or new versions
|
147 |
+
of the GNU Lesser General Public License from time to time. Such new
|
148 |
+
versions will be similar in spirit to the present version, but may
|
149 |
+
differ in detail to address new problems or concerns.
|
150 |
+
|
151 |
+
Each version is given a distinguishing version number. If the
|
152 |
+
Library as you received it specifies that a certain numbered version
|
153 |
+
of the GNU Lesser General Public License "or any later version"
|
154 |
+
applies to it, you have the option of following the terms and
|
155 |
+
conditions either of that published version or of any later version
|
156 |
+
published by the Free Software Foundation. If the Library as you
|
157 |
+
received it does not specify a version number of the GNU Lesser
|
158 |
+
General Public License, you may choose any version of the GNU Lesser
|
159 |
+
General Public License ever published by the Free Software Foundation.
|
160 |
+
|
161 |
+
If the Library as you received it specifies that a proxy can decide
|
162 |
+
whether future versions of the GNU Lesser General Public License shall
|
163 |
+
apply, that proxy's public statement of acceptance of any version is
|
164 |
+
permanent authorization for you to choose that version for the
|
165 |
+
Library.
|
README.md
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mergekit
|
2 |
+
|
3 |
+
`mergekit` is a toolkit for merging pre-trained language models. `mergekit` uses an out-of-core approach to perform unreasonably elaborate merges in resource-constrained situations. Merges can be run entirely on CPU or accelerated with as little as 8 GB of VRAM. Many merging algorithms are supported, with more coming as they catch my attention.
|
4 |
+
|
5 |
+
Features:
|
6 |
+
|
7 |
+
- Supports Llama, Mistral, GPT-NeoX, StableLM, and more
|
8 |
+
- Many [merge methods](#merge-methods)
|
9 |
+
- GPU or CPU execution
|
10 |
+
- Lazy loading of tensors for low memory use
|
11 |
+
- Interpolated gradients for parameter values (inspired by Gryphe's [BlockMerge_Gradient](https://github.com/Gryphe/BlockMerge_Gradient) script)
|
12 |
+
- Piecewise assembly of language models from layers ("Frankenmerging")
|
13 |
+
|
14 |
+
🔊 Call to Evolve - to solve evolutionary merge methods as a community - please see https://github.com/arcee-ai/mergekit/issues/207.
|
15 |
+
|
16 |
+
## Installation
|
17 |
+
|
18 |
+
```sh
|
19 |
+
git clone https://github.com/cg123/mergekit.git
|
20 |
+
cd mergekit
|
21 |
+
|
22 |
+
pip install -e . # install the package and make scripts available
|
23 |
+
```
|
24 |
+
|
25 |
+
If the above fails with the error of:
|
26 |
+
|
27 |
+
```
|
28 |
+
ERROR: File "setup.py" or "setup.cfg" not found. Directory cannot be installed in editable mode:
|
29 |
+
(A "pyproject.toml" file was found, but editable mode currently requires a setuptools-based build.)
|
30 |
+
```
|
31 |
+
|
32 |
+
You may need to upgrade pip to > 21.3 with the command `python3 -m pip install --upgrade pip`
|
33 |
+
|
34 |
+
## Usage
|
35 |
+
|
36 |
+
The script `mergekit-yaml` is the main entry point for `mergekit`. It takes a YAML configuration file and an output path, like so:
|
37 |
+
|
38 |
+
```sh
|
39 |
+
mergekit-yaml path/to/your/config.yml ./output-model-directory [--cuda] [--lazy-unpickle] [--allow-crimes] [... other options]
|
40 |
+
```
|
41 |
+
|
42 |
+
This will run the merge and write your merged model to `./output-model-directory`.
|
43 |
+
|
44 |
+
For more information on the arguments accepted by `mergekit-yaml` run the command `mergekit-yaml --help`.
|
45 |
+
|
46 |
+
### Uploading to Huggingface
|
47 |
+
|
48 |
+
When you have a merged model you're happy with, you may want to share it on the Hugging Face Hub. `mergekit` generates a `README.md` for your merge with some basic information for a model card. You can edit it to include more details about your merge, like giving it a good name or explaining what it's good at; rewrite it entirely; or use the generated `README.md` as-is. It is also possible to edit your `README.md` online once it has been uploaded to the Hub.
|
49 |
+
|
50 |
+
Once you're happy with your model card and merged model, you can upload it to the Hugging Face Hub using the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) Python library.
|
51 |
+
|
52 |
+
```
|
53 |
+
# log in to huggingface with an access token (must have write permission)
|
54 |
+
huggingface-cli login
|
55 |
+
# upload your model
|
56 |
+
huggingface-cli upload your_hf_username/my-cool-model ./output-model-directory .
|
57 |
+
```
|
58 |
+
|
59 |
+
The [documentation](https://huggingface.co/docs/huggingface_hub/guides/cli#huggingface-cli-upload) for `huggingface_hub` goes into more detail about other options for uploading.
|
60 |
+
|
61 |
+
## Merge Configuration
|
62 |
+
|
63 |
+
Merge configurations are YAML documents specifying the operations to perform in order to produce your merged model.
|
64 |
+
Below are the primary elements of a configuration file:
|
65 |
+
|
66 |
+
- `merge_method`: Specifies the method to use for merging models. See [Merge Methods](#merge-methods) for a list.
|
67 |
+
- `slices`: Defines slices of layers from different models to be used. This field is mutually exclusive with `models`.
|
68 |
+
- `models`: Defines entire models to be used for merging. This field is mutually exclusive with `slices`.
|
69 |
+
- `base_model`: Specifies the base model used in some merging methods.
|
70 |
+
- `parameters`: Holds various parameters such as weights and densities, which can also be specified at different levels of the configuration.
|
71 |
+
- `dtype`: Specifies the data type used for the merging operation.
|
72 |
+
- `tokenizer_source`: Determines how to construct a tokenizer for the merged model.
|
73 |
+
|
74 |
+
### Parameter Specification
|
75 |
+
|
76 |
+
Parameters are flexible and can be set with varying precedence. They can be specified conditionally using tensor name filters, which allows finer control such as differentiating between attention heads and fully connected layers.
|
77 |
+
|
78 |
+
Parameters can be specified as:
|
79 |
+
|
80 |
+
- **Scalars**: Single floating-point values.
|
81 |
+
- **Gradients**: List of floating-point values, specifying an interpolated gradient.
|
82 |
+
|
83 |
+
The parameters can be set at different levels, with decreasing precedence as follows:
|
84 |
+
|
85 |
+
1. `slices.*.sources.parameters` - applying to a specific input slice
|
86 |
+
2. `slices.*.parameters` - applying to a specific output slice
|
87 |
+
3. `models.*.parameters` or `input_model_parameters` - applying to any tensors coming from specific input models
|
88 |
+
4. `parameters` - catchall
|
89 |
+
|
90 |
+
### Tokenizer Source
|
91 |
+
|
92 |
+
The `tokenizer_source` field of a configuration file determines what tokenizer is used by the merged model. This also effects how embeddings and language model heads are merged.
|
93 |
+
|
94 |
+
This functionality is still experimental and may break. Please file an issue if you encounter any issues with it.
|
95 |
+
|
96 |
+
Valid values:
|
97 |
+
|
98 |
+
- `base`: use the tokenizer from the base model
|
99 |
+
- `union`: construct a tokenizer with all tokens from all models
|
100 |
+
- `model:<model_path>`: use the tokenizer from a specific model
|
101 |
+
|
102 |
+
If set, mergekit will find a mapping between each model's vocabulary and the output tokenizer. This allows models with different vocabularies or added tokens to be meaningfully merged.
|
103 |
+
|
104 |
+
`tokenizer_source` is compatible with all merge methods, but when used `lm_head`/`embed_tokens` will be merged linearly. For two-model merges, the `embed_slerp` parameter can be set to `true` to use SLERP instead.
|
105 |
+
|
106 |
+
If the `tokenizer_source` field is not set, mergekit will fall back to its legacy default behavior. The tokenizer for the base model (or first model in the merge, if no base model is specified) will be copied to the output directory. The parameter matrices for `lm_head`/`embed_tokens` will be truncated to the smallest size present in the merge. In _most_ cases this corresponds to using the tokenizer for the base model.
|
107 |
+
|
108 |
+
### Examples
|
109 |
+
|
110 |
+
Several examples of merge configurations are available in [`examples/`](examples/).
|
111 |
+
|
112 |
+
## Merge Methods
|
113 |
+
|
114 |
+
A quick overview of the currently supported merge methods:
|
115 |
+
|
116 |
+
| Method | `merge_method` value | Multi-Model | Uses base model |
|
117 |
+
| -------------------------------------------------------------------------------------------- | -------------------- | ----------- | --------------- |
|
118 |
+
| Linear ([Model Soups](https://arxiv.org/abs/2203.05482)) | `linear` | ✅ | ❌ |
|
119 |
+
| SLERP | `slerp` | ❌ | ✅ |
|
120 |
+
| [Task Arithmetic](https://arxiv.org/abs/2212.04089) | `task_arithmetic` | ✅ | ✅ |
|
121 |
+
| [TIES](https://arxiv.org/abs/2306.01708) | `ties` | ✅ | ✅ |
|
122 |
+
| [DARE](https://arxiv.org/abs/2311.03099) [TIES](https://arxiv.org/abs/2306.01708) | `dare_ties` | ✅ | ✅ |
|
123 |
+
| [DARE](https://arxiv.org/abs/2311.03099) [Task Arithmetic](https://arxiv.org/abs/2212.04089) | `dare_linear` | ✅ | ✅ |
|
124 |
+
| Passthrough | `passthrough` | ❌ | ❌ |
|
125 |
+
| [Model Stock](https://arxiv.org/abs/2403.19522) | `model_stock` | ✅ | ✅ |
|
126 |
+
|
127 |
+
### Linear
|
128 |
+
|
129 |
+
The classic merge method - a simple weighted average.
|
130 |
+
|
131 |
+
Parameters:
|
132 |
+
|
133 |
+
- `weight` - relative (or absolute if `normalize=False`) weighting of a given tensor
|
134 |
+
- `normalize` - if true, the weights of all models contributing to a tensor will be normalized. Default behavior.
|
135 |
+
|
136 |
+
### SLERP
|
137 |
+
|
138 |
+
Spherically interpolate the parameters of two models. One must be set as `base_model`.
|
139 |
+
|
140 |
+
Parameters:
|
141 |
+
|
142 |
+
- `t` - interpolation factor. At `t=0` will return `base_model`, at `t=1` will return the other one.
|
143 |
+
|
144 |
+
### [Task Arithmetic](https://arxiv.org/abs/2212.04089)
|
145 |
+
|
146 |
+
Computes "task vectors" for each model by subtracting a base model. Merges the task vectors linearly and adds back the base. Works great for models that were fine tuned from a common ancestor. Also a super useful mental framework for several of the more involved merge methods.
|
147 |
+
|
148 |
+
Parameters: same as [Linear](#linear)
|
149 |
+
|
150 |
+
### [TIES](https://arxiv.org/abs/2306.01708)
|
151 |
+
|
152 |
+
Builds on the task arithmetic framework. Resolves interference between models by sparsifying the task vectors and applying a sign consensus algorithm. Allows you to merge a larger number of models and retain more of their strengths.
|
153 |
+
|
154 |
+
Parameters: same as [Linear](#linear), plus:
|
155 |
+
|
156 |
+
- `density` - fraction of weights in differences from the base model to retain
|
157 |
+
|
158 |
+
### [DARE](https://arxiv.org/abs/2311.03099)
|
159 |
+
|
160 |
+
In the same vein as TIES, sparsifies task vectors to reduce interference. Differs in that DARE uses random pruning with a novel rescaling to better match performance of the original models. DARE can be used either with the sign consensus algorithm of TIES (`dare_ties`) or without (`dare_linear`).
|
161 |
+
|
162 |
+
Parameters: same as [TIES](#ties) for `dare_ties`, or [Linear](#linear) for `dare_linear`
|
163 |
+
|
164 |
+
### Passthrough
|
165 |
+
|
166 |
+
`passthrough` is a no-op that simply passes input tensors through unmodified. It is meant to be used for layer-stacking type merges where you have only one input model. Useful for frankenmerging.
|
167 |
+
|
168 |
+
### [Model Stock](https://arxiv.org/abs/2403.19522)
|
169 |
+
|
170 |
+
Uses some neat geometric properties of fine tuned models to compute good weights for linear interpolation. Requires at least three models, including a base model.
|
171 |
+
|
172 |
+
Parameters:
|
173 |
+
|
174 |
+
- `filter_wise`: if true, weight calculation will be per-row rather than per-tensor. Not recommended.
|
175 |
+
|
176 |
+
# Citation
|
177 |
+
|
178 |
+
We now have a [paper](https://arxiv.org/abs/2403.13257) you can cite for the MergeKit library:
|
179 |
+
|
180 |
+
```bibtex
|
181 |
+
@article{goddard2024arcee,
|
182 |
+
title={Arcee's MergeKit: A Toolkit for Merging Large Language Models},
|
183 |
+
author={Goddard, Charles and Siriwardhana, Shamane and Ehghaghi, Malikeh and Meyers, Luke and Karpukhin, Vlad and Benedict, Brian and McQuade, Mark and Solawetz, Jacob},
|
184 |
+
journal={arXiv preprint arXiv:2403.13257},
|
185 |
+
year={2024}
|
186 |
+
}
|
187 |
+
```
|
config.yaml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
models:
|
3 |
+
- model: Shaleen123/phi-2-code
|
4 |
+
parameters:
|
5 |
+
weight: 0.5
|
6 |
+
- model: Shaleen123/phi-2-maths
|
7 |
+
parameters:
|
8 |
+
weight: 0.3
|
9 |
+
- model: Shaleen123/phi-2-4bits
|
10 |
+
parameters:
|
11 |
+
weight: 1.0
|
12 |
+
merge_method: linear
|
13 |
+
dtype: float16
|
docs/moe.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mergekit-moe
|
2 |
+
|
3 |
+
`mergekit-moe` is a script for combining Mistral or Llama models of the same size into Mixtral Mixture of Experts models. The script will combine the self-attention and layer normalization parameters from a "base" model with the MLP parameters from a set of "expert" models. `mergekit-moe` uses its own YML configuration syntax, which looks like so:
|
4 |
+
|
5 |
+
```yml
|
6 |
+
base_model: path/to/self_attn_donor
|
7 |
+
gate_mode: hidden # one of "hidden", "cheap_embed", or "random"
|
8 |
+
dtype: bfloat16 # output dtype (float32, float16, or bfloat16)
|
9 |
+
## (optional)
|
10 |
+
# experts_per_token: 2
|
11 |
+
experts:
|
12 |
+
- source_model: expert_model_1
|
13 |
+
positive_prompts:
|
14 |
+
- "This is a prompt that is demonstrative of what expert_model_1 excels at"
|
15 |
+
## (optional)
|
16 |
+
# negative_prompts:
|
17 |
+
# - "This is a prompt expert_model_1 should not be used for"
|
18 |
+
- source_model: expert_model_2
|
19 |
+
# ... and so on
|
20 |
+
```
|
21 |
+
|
22 |
+
The script takes two arguments, an input config and an output path: `mergekit-moe ./config.yml ./my-clowncar-moe-12x180B`
|
23 |
+
|
24 |
+
## Gate Modes
|
25 |
+
|
26 |
+
There are three methods for populating the MoE gates implemented.
|
27 |
+
|
28 |
+
### "hidden"
|
29 |
+
|
30 |
+
Uses the hidden state representations of the positive/negative prompts for MoE gate parameters. Best quality and most effective option; the default. Requires evaluating each prompt using the base model so you might not be able to use this on constrained hardware (depending on the model). You can use `--load-in-8bit` or `--load-in-4bit` to reduce VRAM usage.
|
31 |
+
|
32 |
+
### "cheap_embed"
|
33 |
+
|
34 |
+
Uses only the raw token embedding of the prompts, using the same gate parameters for every layer. Distinctly less effective than "hidden". Can be run on much, much lower end hardware.
|
35 |
+
|
36 |
+
### "random"
|
37 |
+
|
38 |
+
Randomly initializes the MoE gates. Good for if you are going to fine tune the model afterwards, or maybe if you want something a little unhinged? I won't judge.
|
examples/gradient-slerp.yml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
slices:
|
2 |
+
- sources:
|
3 |
+
- model: psmathur/orca_mini_v3_13b
|
4 |
+
layer_range: [0, 40]
|
5 |
+
- model: garage-bAInd/Platypus2-13B
|
6 |
+
layer_range: [0, 40]
|
7 |
+
# or, the equivalent models: syntax:
|
8 |
+
# models:
|
9 |
+
# - model: psmathur/orca_mini_v3_13b
|
10 |
+
# - model: garage-bAInd/Platypus2-13B
|
11 |
+
merge_method: slerp
|
12 |
+
base_model: psmathur/orca_mini_v3_13b
|
13 |
+
parameters:
|
14 |
+
t:
|
15 |
+
- filter: self_attn
|
16 |
+
value: [0, 0.5, 0.3, 0.7, 1]
|
17 |
+
- filter: mlp
|
18 |
+
value: [1, 0.5, 0.7, 0.3, 0]
|
19 |
+
- value: 0.5 # fallback for rest of tensors
|
20 |
+
dtype: float16
|
examples/linear.yml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
models:
|
2 |
+
- model: psmathur/orca_mini_v3_13b
|
3 |
+
parameters:
|
4 |
+
weight: 1.0
|
5 |
+
- model: WizardLM/WizardLM-13B-V1.2
|
6 |
+
parameters:
|
7 |
+
weight: 0.3
|
8 |
+
- model: garage-bAInd/Platypus2-13B
|
9 |
+
parameters:
|
10 |
+
weight: 0.5
|
11 |
+
merge_method: linear
|
12 |
+
dtype: float16
|
examples/mega.yml
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
slices:
|
2 |
+
- sources:
|
3 |
+
- model: psmathur/orca_mini_v3_13b
|
4 |
+
layer_range: [0, 40]
|
5 |
+
- model: garage-bAInd/Platypus2-13B
|
6 |
+
layer_range: [0, 40]
|
7 |
+
merge_method: slerp
|
8 |
+
base_model: psmathur/orca_mini_v3_13b
|
9 |
+
parameters:
|
10 |
+
t:
|
11 |
+
- filter: self_attn
|
12 |
+
value: [0, 0.5, 0.3, 0.7, 1]
|
13 |
+
- filter: mlp
|
14 |
+
value: [1, 0.5, 0.7, 0.3, 0]
|
15 |
+
- value: 0.5 # fallback for rest of tensors
|
16 |
+
dtype: float16
|
17 |
+
name: gradient-slerp
|
18 |
+
---
|
19 |
+
models:
|
20 |
+
- model: gradient-slerp
|
21 |
+
parameters:
|
22 |
+
density: [1, 0.7, 0.1] # density gradient
|
23 |
+
weight: 1.0
|
24 |
+
- model: WizardLM/WizardMath-13B-V1.0
|
25 |
+
parameters:
|
26 |
+
density: 0.33
|
27 |
+
weight:
|
28 |
+
- filter: mlp
|
29 |
+
value: 0.5
|
30 |
+
- value: 0
|
31 |
+
merge_method: ties
|
32 |
+
base_model: TheBloke/Llama-2-13B-fp16
|
33 |
+
parameters:
|
34 |
+
normalize: true
|
35 |
+
int8_mask: true
|
36 |
+
dtype: float16
|
37 |
+
name: gradient-slerp-ties
|
examples/orcamini-platy-44layer.yml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
slices:
|
2 |
+
- sources:
|
3 |
+
- model: psmathur/orca_mini_v3_13b
|
4 |
+
layer_range: [0, 24]
|
5 |
+
- sources:
|
6 |
+
- model: garage-bAInd/Platypus2-13B
|
7 |
+
layer_range: [20, 40]
|
8 |
+
merge_method: passthrough
|
9 |
+
dtype: float16
|
examples/ties.yml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
models:
|
2 |
+
- model: psmathur/orca_mini_v3_13b
|
3 |
+
parameters:
|
4 |
+
density: [1, 0.7, 0.1] # density gradient
|
5 |
+
weight: 1.0
|
6 |
+
- model: garage-bAInd/Platypus2-13B
|
7 |
+
parameters:
|
8 |
+
density: 0.5
|
9 |
+
weight: [0, 0.3, 0.7, 1] # weight gradient
|
10 |
+
- model: WizardLM/WizardMath-13B-V1.0
|
11 |
+
parameters:
|
12 |
+
density: 0.33
|
13 |
+
weight:
|
14 |
+
- filter: mlp
|
15 |
+
value: 0.5
|
16 |
+
- value: 0
|
17 |
+
merge_method: ties
|
18 |
+
base_model: TheBloke/Llama-2-13B-fp16
|
19 |
+
parameters:
|
20 |
+
normalize: true
|
21 |
+
int8_mask: true
|
22 |
+
dtype: float16
|
merge/README.md
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model:
|
3 |
+
- Shaleen123/phi-2-maths
|
4 |
+
- Shaleen123/phi-2-code
|
5 |
+
- Shaleen123/phi-2-4bits
|
6 |
+
library_name: transformers
|
7 |
+
tags:
|
8 |
+
- mergekit
|
9 |
+
- merge
|
10 |
+
|
11 |
+
---
|
12 |
+
# merge
|
13 |
+
|
14 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
15 |
+
|
16 |
+
## Merge Details
|
17 |
+
### Merge Method
|
18 |
+
|
19 |
+
This model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method.
|
20 |
+
|
21 |
+
### Models Merged
|
22 |
+
|
23 |
+
The following models were included in the merge:
|
24 |
+
* [Shaleen123/phi-2-maths](https://huggingface.co/Shaleen123/phi-2-maths)
|
25 |
+
* [Shaleen123/phi-2-code](https://huggingface.co/Shaleen123/phi-2-code)
|
26 |
+
* [Shaleen123/phi-2-4bits](https://huggingface.co/Shaleen123/phi-2-4bits)
|
27 |
+
|
28 |
+
### Configuration
|
29 |
+
|
30 |
+
The following YAML configuration was used to produce this model:
|
31 |
+
|
32 |
+
```yaml
|
33 |
+
|
34 |
+
models:
|
35 |
+
- model: Shaleen123/phi-2-code
|
36 |
+
parameters:
|
37 |
+
weight: 0.5
|
38 |
+
- model: Shaleen123/phi-2-maths
|
39 |
+
parameters:
|
40 |
+
weight: 0.3
|
41 |
+
- model: Shaleen123/phi-2-4bits
|
42 |
+
parameters:
|
43 |
+
weight: 1.0
|
44 |
+
merge_method: linear
|
45 |
+
dtype: float16
|
46 |
+
|
47 |
+
```
|
merge/added_tokens.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"\t\t": 50294,
|
3 |
+
"\t\t\t": 50293,
|
4 |
+
"\t\t\t\t": 50292,
|
5 |
+
"\t\t\t\t\t": 50291,
|
6 |
+
"\t\t\t\t\t\t": 50290,
|
7 |
+
"\t\t\t\t\t\t\t": 50289,
|
8 |
+
"\t\t\t\t\t\t\t\t": 50288,
|
9 |
+
"\t\t\t\t\t\t\t\t\t": 50287,
|
10 |
+
" ": 50286,
|
11 |
+
" ": 50285,
|
12 |
+
" ": 50284,
|
13 |
+
" ": 50283,
|
14 |
+
" ": 50282,
|
15 |
+
" ": 50281,
|
16 |
+
" ": 50280,
|
17 |
+
" ": 50279,
|
18 |
+
" ": 50278,
|
19 |
+
" ": 50277,
|
20 |
+
" ": 50276,
|
21 |
+
" ": 50275,
|
22 |
+
" ": 50274,
|
23 |
+
" ": 50273,
|
24 |
+
" ": 50272,
|
25 |
+
" ": 50271,
|
26 |
+
" ": 50270,
|
27 |
+
" ": 50269,
|
28 |
+
" ": 50268,
|
29 |
+
" ": 50267,
|
30 |
+
" ": 50266,
|
31 |
+
" ": 50265,
|
32 |
+
" ": 50264,
|
33 |
+
" ": 50263,
|
34 |
+
" ": 50262,
|
35 |
+
" ": 50261,
|
36 |
+
" ": 50260,
|
37 |
+
" ": 50259,
|
38 |
+
" ": 50258,
|
39 |
+
" ": 50257
|
40 |
+
}
|
merge/config.json
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Shaleen123/phi-2-maths",
|
3 |
+
"architectures": [
|
4 |
+
"PhiForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "microsoft/phi-2--configuration_phi.PhiConfig",
|
9 |
+
"AutoModelForCausalLM": "microsoft/phi-2--modeling_phi.PhiForCausalLM"
|
10 |
+
},
|
11 |
+
"bos_token_id": 50256,
|
12 |
+
"embd_pdrop": 0.0,
|
13 |
+
"eos_token_id": 50256,
|
14 |
+
"hidden_act": "gelu_new",
|
15 |
+
"hidden_size": 2560,
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 10240,
|
18 |
+
"layer_norm_eps": 1e-05,
|
19 |
+
"max_position_embeddings": 2048,
|
20 |
+
"model_type": "phi",
|
21 |
+
"num_attention_heads": 32,
|
22 |
+
"num_hidden_layers": 32,
|
23 |
+
"num_key_value_heads": 32,
|
24 |
+
"partial_rotary_factor": 0.4,
|
25 |
+
"qk_layernorm": false,
|
26 |
+
"quantization_config": {
|
27 |
+
"_load_in_4bit": true,
|
28 |
+
"_load_in_8bit": false,
|
29 |
+
"bnb_4bit_compute_dtype": "float32",
|
30 |
+
"bnb_4bit_quant_type": "fp4",
|
31 |
+
"bnb_4bit_use_double_quant": false,
|
32 |
+
"llm_int8_enable_fp32_cpu_offload": false,
|
33 |
+
"llm_int8_has_fp16_weight": false,
|
34 |
+
"llm_int8_skip_modules": null,
|
35 |
+
"llm_int8_threshold": 6.0,
|
36 |
+
"load_in_4bit": true,
|
37 |
+
"load_in_8bit": false,
|
38 |
+
"quant_method": "bitsandbytes"
|
39 |
+
},
|
40 |
+
"resid_pdrop": 0.1,
|
41 |
+
"rope_scaling": null,
|
42 |
+
"rope_theta": 10000.0,
|
43 |
+
"tie_word_embeddings": false,
|
44 |
+
"torch_dtype": "float16",
|
45 |
+
"transformers_version": "4.38.2",
|
46 |
+
"use_cache": true,
|
47 |
+
"vocab_size": 51200
|
48 |
+
}
|
merge/mergekit_config.yml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
models:
|
3 |
+
- model: Shaleen123/phi-2-code
|
4 |
+
parameters:
|
5 |
+
weight: 0.5
|
6 |
+
- model: Shaleen123/phi-2-maths
|
7 |
+
parameters:
|
8 |
+
weight: 0.3
|
9 |
+
- model: Shaleen123/phi-2-4bits
|
10 |
+
parameters:
|
11 |
+
weight: 1.0
|
12 |
+
merge_method: linear
|
13 |
+
dtype: float16
|
merge/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
merge/model-00001-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6a858e3c69e1ba3c09c6d49a7dee2460ccd4c13b599704e608c88df6fa46c64
|
3 |
+
size 1993680248
|
merge/model-00002-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a42859d88e8973ad35321d724fc93498b3b74ce153db25b5e975d0c5eeb1395
|
3 |
+
size 1049154408
|
merge/model.safetensors.index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"metadata": {"mergekit_version": "0.0.4.2", "total_size": 3042785280}, "weight_map": {"model.final_layernorm.weight": "model-00001-of-00002.safetensors", "model.final_layernorm.bias": "model-00001-of-00002.safetensors", "lm_head.weight": "model-00001-of-00002.safetensors", "lm_head.bias": "model-00001-of-00002.safetensors", "model.layers.31.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.31.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.31.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.31.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.31.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.31.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.31.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.31.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.31.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.31.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.31.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.30.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.30.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.30.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.30.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.30.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.30.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.30.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.30.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.30.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.30.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.30.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.29.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.29.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.29.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.29.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.29.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.29.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.29.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.29.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.29.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.29.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.29.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.28.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.28.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.28.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.28.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.28.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.28.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.28.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.28.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.28.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.28.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.28.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.27.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.27.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.27.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.27.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.27.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.27.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.27.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.27.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.27.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.27.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.27.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.26.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.26.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.26.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.26.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.26.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.26.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.26.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.26.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.26.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.26.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.25.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.25.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.25.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.25.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.25.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.25.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.25.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.25.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.25.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.25.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.24.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.24.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.24.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.24.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.24.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.24.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.24.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.24.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.24.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.24.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.23.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.23.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.23.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.22.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.22.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.22.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.21.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.21.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.21.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.20.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.20.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.19.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.19.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.18.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.18.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.17.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.17.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.16.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.16.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.15.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.15.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.14.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.14.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.13.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.13.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.12.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.12.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.11.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.11.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.10.mlp.fc2.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.fc2.bias": "model-00001-of-00002.safetensors", "model.layers.10.mlp.fc1.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.fc1.bias": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.dense.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.dense.bias": "model-00001-of-00002.safetensors", "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.10.input_layernorm.bias": "model-00001-of-00002.safetensors", "model.layers.9.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.9.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.9.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.9.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.8.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.8.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.8.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.8.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.7.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.7.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.7.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.7.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.6.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.6.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.6.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.6.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.5.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.5.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.5.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.5.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.4.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.4.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.4.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.4.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.3.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.3.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.3.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.3.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.2.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.2.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.2.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.2.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.2.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.2.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.2.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.2.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.2.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.2.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.2.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.1.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.1.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.1.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.1.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.1.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.1.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.1.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.1.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.1.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.1.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.1.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.layers.0.mlp.fc2.weight": "model-00002-of-00002.safetensors", "model.layers.0.mlp.fc2.bias": "model-00002-of-00002.safetensors", "model.layers.0.mlp.fc1.weight": "model-00002-of-00002.safetensors", "model.layers.0.mlp.fc1.bias": "model-00002-of-00002.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.0.self_attn.v_proj.bias": "model-00002-of-00002.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.0.self_attn.k_proj.bias": "model-00002-of-00002.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.0.self_attn.q_proj.bias": "model-00002-of-00002.safetensors", "model.layers.0.self_attn.dense.weight": "model-00002-of-00002.safetensors", "model.layers.0.self_attn.dense.bias": "model-00002-of-00002.safetensors", "model.layers.0.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.0.input_layernorm.bias": "model-00002-of-00002.safetensors", "model.embed_tokens.weight": "model-00002-of-00002.safetensors"}}
|
merge/special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"unk_token": {
|
17 |
+
"content": "<|endoftext|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
merge/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
merge/tokenizer_config.json
ADDED
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"50256": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"50257": {
|
13 |
+
"content": " ",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": false
|
19 |
+
},
|
20 |
+
"50258": {
|
21 |
+
"content": " ",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": false
|
27 |
+
},
|
28 |
+
"50259": {
|
29 |
+
"content": " ",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": false
|
35 |
+
},
|
36 |
+
"50260": {
|
37 |
+
"content": " ",
|
38 |
+
"lstrip": false,
|
39 |
+
"normalized": true,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": false
|
43 |
+
},
|
44 |
+
"50261": {
|
45 |
+
"content": " ",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": true,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false,
|
50 |
+
"special": false
|
51 |
+
},
|
52 |
+
"50262": {
|
53 |
+
"content": " ",
|
54 |
+
"lstrip": false,
|
55 |
+
"normalized": true,
|
56 |
+
"rstrip": false,
|
57 |
+
"single_word": false,
|
58 |
+
"special": false
|
59 |
+
},
|
60 |
+
"50263": {
|
61 |
+
"content": " ",
|
62 |
+
"lstrip": false,
|
63 |
+
"normalized": true,
|
64 |
+
"rstrip": false,
|
65 |
+
"single_word": false,
|
66 |
+
"special": false
|
67 |
+
},
|
68 |
+
"50264": {
|
69 |
+
"content": " ",
|
70 |
+
"lstrip": false,
|
71 |
+
"normalized": true,
|
72 |
+
"rstrip": false,
|
73 |
+
"single_word": false,
|
74 |
+
"special": false
|
75 |
+
},
|
76 |
+
"50265": {
|
77 |
+
"content": " ",
|
78 |
+
"lstrip": false,
|
79 |
+
"normalized": true,
|
80 |
+
"rstrip": false,
|
81 |
+
"single_word": false,
|
82 |
+
"special": false
|
83 |
+
},
|
84 |
+
"50266": {
|
85 |
+
"content": " ",
|
86 |
+
"lstrip": false,
|
87 |
+
"normalized": true,
|
88 |
+
"rstrip": false,
|
89 |
+
"single_word": false,
|
90 |
+
"special": false
|
91 |
+
},
|
92 |
+
"50267": {
|
93 |
+
"content": " ",
|
94 |
+
"lstrip": false,
|
95 |
+
"normalized": true,
|
96 |
+
"rstrip": false,
|
97 |
+
"single_word": false,
|
98 |
+
"special": false
|
99 |
+
},
|
100 |
+
"50268": {
|
101 |
+
"content": " ",
|
102 |
+
"lstrip": false,
|
103 |
+
"normalized": true,
|
104 |
+
"rstrip": false,
|
105 |
+
"single_word": false,
|
106 |
+
"special": false
|
107 |
+
},
|
108 |
+
"50269": {
|
109 |
+
"content": " ",
|
110 |
+
"lstrip": false,
|
111 |
+
"normalized": true,
|
112 |
+
"rstrip": false,
|
113 |
+
"single_word": false,
|
114 |
+
"special": false
|
115 |
+
},
|
116 |
+
"50270": {
|
117 |
+
"content": " ",
|
118 |
+
"lstrip": false,
|
119 |
+
"normalized": true,
|
120 |
+
"rstrip": false,
|
121 |
+
"single_word": false,
|
122 |
+
"special": false
|
123 |
+
},
|
124 |
+
"50271": {
|
125 |
+
"content": " ",
|
126 |
+
"lstrip": false,
|
127 |
+
"normalized": true,
|
128 |
+
"rstrip": false,
|
129 |
+
"single_word": false,
|
130 |
+
"special": false
|
131 |
+
},
|
132 |
+
"50272": {
|
133 |
+
"content": " ",
|
134 |
+
"lstrip": false,
|
135 |
+
"normalized": true,
|
136 |
+
"rstrip": false,
|
137 |
+
"single_word": false,
|
138 |
+
"special": false
|
139 |
+
},
|
140 |
+
"50273": {
|
141 |
+
"content": " ",
|
142 |
+
"lstrip": false,
|
143 |
+
"normalized": true,
|
144 |
+
"rstrip": false,
|
145 |
+
"single_word": false,
|
146 |
+
"special": false
|
147 |
+
},
|
148 |
+
"50274": {
|
149 |
+
"content": " ",
|
150 |
+
"lstrip": false,
|
151 |
+
"normalized": true,
|
152 |
+
"rstrip": false,
|
153 |
+
"single_word": false,
|
154 |
+
"special": false
|
155 |
+
},
|
156 |
+
"50275": {
|
157 |
+
"content": " ",
|
158 |
+
"lstrip": false,
|
159 |
+
"normalized": true,
|
160 |
+
"rstrip": false,
|
161 |
+
"single_word": false,
|
162 |
+
"special": false
|
163 |
+
},
|
164 |
+
"50276": {
|
165 |
+
"content": " ",
|
166 |
+
"lstrip": false,
|
167 |
+
"normalized": true,
|
168 |
+
"rstrip": false,
|
169 |
+
"single_word": false,
|
170 |
+
"special": false
|
171 |
+
},
|
172 |
+
"50277": {
|
173 |
+
"content": " ",
|
174 |
+
"lstrip": false,
|
175 |
+
"normalized": true,
|
176 |
+
"rstrip": false,
|
177 |
+
"single_word": false,
|
178 |
+
"special": false
|
179 |
+
},
|
180 |
+
"50278": {
|
181 |
+
"content": " ",
|
182 |
+
"lstrip": false,
|
183 |
+
"normalized": true,
|
184 |
+
"rstrip": false,
|
185 |
+
"single_word": false,
|
186 |
+
"special": false
|
187 |
+
},
|
188 |
+
"50279": {
|
189 |
+
"content": " ",
|
190 |
+
"lstrip": false,
|
191 |
+
"normalized": true,
|
192 |
+
"rstrip": false,
|
193 |
+
"single_word": false,
|
194 |
+
"special": false
|
195 |
+
},
|
196 |
+
"50280": {
|
197 |
+
"content": " ",
|
198 |
+
"lstrip": false,
|
199 |
+
"normalized": true,
|
200 |
+
"rstrip": false,
|
201 |
+
"single_word": false,
|
202 |
+
"special": false
|
203 |
+
},
|
204 |
+
"50281": {
|
205 |
+
"content": " ",
|
206 |
+
"lstrip": false,
|
207 |
+
"normalized": true,
|
208 |
+
"rstrip": false,
|
209 |
+
"single_word": false,
|
210 |
+
"special": false
|
211 |
+
},
|
212 |
+
"50282": {
|
213 |
+
"content": " ",
|
214 |
+
"lstrip": false,
|
215 |
+
"normalized": true,
|
216 |
+
"rstrip": false,
|
217 |
+
"single_word": false,
|
218 |
+
"special": false
|
219 |
+
},
|
220 |
+
"50283": {
|
221 |
+
"content": " ",
|
222 |
+
"lstrip": false,
|
223 |
+
"normalized": true,
|
224 |
+
"rstrip": false,
|
225 |
+
"single_word": false,
|
226 |
+
"special": false
|
227 |
+
},
|
228 |
+
"50284": {
|
229 |
+
"content": " ",
|
230 |
+
"lstrip": false,
|
231 |
+
"normalized": true,
|
232 |
+
"rstrip": false,
|
233 |
+
"single_word": false,
|
234 |
+
"special": false
|
235 |
+
},
|
236 |
+
"50285": {
|
237 |
+
"content": " ",
|
238 |
+
"lstrip": false,
|
239 |
+
"normalized": true,
|
240 |
+
"rstrip": false,
|
241 |
+
"single_word": false,
|
242 |
+
"special": false
|
243 |
+
},
|
244 |
+
"50286": {
|
245 |
+
"content": " ",
|
246 |
+
"lstrip": false,
|
247 |
+
"normalized": true,
|
248 |
+
"rstrip": false,
|
249 |
+
"single_word": false,
|
250 |
+
"special": false
|
251 |
+
},
|
252 |
+
"50287": {
|
253 |
+
"content": "\t\t\t\t\t\t\t\t\t",
|
254 |
+
"lstrip": false,
|
255 |
+
"normalized": true,
|
256 |
+
"rstrip": false,
|
257 |
+
"single_word": false,
|
258 |
+
"special": false
|
259 |
+
},
|
260 |
+
"50288": {
|
261 |
+
"content": "\t\t\t\t\t\t\t\t",
|
262 |
+
"lstrip": false,
|
263 |
+
"normalized": true,
|
264 |
+
"rstrip": false,
|
265 |
+
"single_word": false,
|
266 |
+
"special": false
|
267 |
+
},
|
268 |
+
"50289": {
|
269 |
+
"content": "\t\t\t\t\t\t\t",
|
270 |
+
"lstrip": false,
|
271 |
+
"normalized": true,
|
272 |
+
"rstrip": false,
|
273 |
+
"single_word": false,
|
274 |
+
"special": false
|
275 |
+
},
|
276 |
+
"50290": {
|
277 |
+
"content": "\t\t\t\t\t\t",
|
278 |
+
"lstrip": false,
|
279 |
+
"normalized": true,
|
280 |
+
"rstrip": false,
|
281 |
+
"single_word": false,
|
282 |
+
"special": false
|
283 |
+
},
|
284 |
+
"50291": {
|
285 |
+
"content": "\t\t\t\t\t",
|
286 |
+
"lstrip": false,
|
287 |
+
"normalized": true,
|
288 |
+
"rstrip": false,
|
289 |
+
"single_word": false,
|
290 |
+
"special": false
|
291 |
+
},
|
292 |
+
"50292": {
|
293 |
+
"content": "\t\t\t\t",
|
294 |
+
"lstrip": false,
|
295 |
+
"normalized": true,
|
296 |
+
"rstrip": false,
|
297 |
+
"single_word": false,
|
298 |
+
"special": false
|
299 |
+
},
|
300 |
+
"50293": {
|
301 |
+
"content": "\t\t\t",
|
302 |
+
"lstrip": false,
|
303 |
+
"normalized": true,
|
304 |
+
"rstrip": false,
|
305 |
+
"single_word": false,
|
306 |
+
"special": false
|
307 |
+
},
|
308 |
+
"50294": {
|
309 |
+
"content": "\t\t",
|
310 |
+
"lstrip": false,
|
311 |
+
"normalized": true,
|
312 |
+
"rstrip": false,
|
313 |
+
"single_word": false,
|
314 |
+
"special": false
|
315 |
+
}
|
316 |
+
},
|
317 |
+
"bos_token": "<|endoftext|>",
|
318 |
+
"clean_up_tokenization_spaces": true,
|
319 |
+
"eos_token": "<|endoftext|>",
|
320 |
+
"model_max_length": 2048,
|
321 |
+
"tokenizer_class": "CodeGenTokenizer",
|
322 |
+
"unk_token": "<|endoftext|>"
|
323 |
+
}
|
merge/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
mergekit/__init__.py
ADDED
File without changes
|
mergekit/_data/__init__.py
ADDED
File without changes
|
mergekit/_data/architectures/__init__.py
ADDED
File without changes
|
mergekit/_data/architectures/baichuan.json
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "baichuan",
|
3 |
+
"architectures": [
|
4 |
+
"BaichuanForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "model.norm.weight"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "lm_head.weight",
|
18 |
+
"is_embed": true
|
19 |
+
}
|
20 |
+
],
|
21 |
+
"num_layers_config_key": "num_hidden_layers",
|
22 |
+
"layer_templates": {
|
23 |
+
"weights": [
|
24 |
+
{
|
25 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight"
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"name": "model.layers.${layer_index}.self_attn.W_pack.weight"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "model.layers.${layer_index}.mlp.gate_proj.weight"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "model.layers.${layer_index}.mlp.down_proj.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "model.layers.${layer_index}.mlp.up_proj.weight"
|
44 |
+
}
|
45 |
+
]
|
46 |
+
}
|
47 |
+
}
|
mergekit/_data/architectures/chatglm.json
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "chatglm",
|
3 |
+
"architectures": [
|
4 |
+
"ChatGLMModel"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "transformer.embedding.word_embeddings.weight",
|
9 |
+
"is_embed": true
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"name": "transformer.rotary_pos_emb.inv_freq"
|
13 |
+
}
|
14 |
+
],
|
15 |
+
"post_weights": [
|
16 |
+
{
|
17 |
+
"name": "transformer.encoder.final_layernorm.weight"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"name": "transformer.output_layer.weight",
|
21 |
+
"is_embed": true
|
22 |
+
}
|
23 |
+
],
|
24 |
+
"num_layers_config_key": "num_hidden_layers",
|
25 |
+
"layer_templates": {
|
26 |
+
"weights": [
|
27 |
+
{
|
28 |
+
"name": "transformer.encoder.layers.${layer_index}.input_layernorm.weight"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "transformer.encoder.layers.${layer_index}.mlp.dense_4h_to_h.weight"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "transformer.encoder.layers.${layer_index}.mlp.dense_h_to_4h.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "transformer.encoder.layers.${layer_index}.post_attention_layernorm.weight"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "transformer.encoder.layers.${layer_index}.self_attention.dense.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "transformer.encoder.layers.${layer_index}.self_attention.query_key_value.bias"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "transformer.encoder.layers.${layer_index}.self_attention.query_key_value.weight"
|
47 |
+
}
|
48 |
+
]
|
49 |
+
}
|
50 |
+
}
|
mergekit/_data/architectures/cohere.json
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "cohere",
|
3 |
+
"architectures": [
|
4 |
+
"CohereForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "model.norm.weight"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "lm_head.weight",
|
18 |
+
"is_embed": true,
|
19 |
+
"aliases": [
|
20 |
+
"model.embed_tokens.weight"
|
21 |
+
]
|
22 |
+
}
|
23 |
+
],
|
24 |
+
"num_layers_config_key": "num_hidden_layers",
|
25 |
+
"layer_templates": {
|
26 |
+
"weights": [
|
27 |
+
{
|
28 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "model.layers.${layer_index}.mlp.down_proj.weight"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "model.layers.${layer_index}.mlp.gate_proj.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "model.layers.${layer_index}.mlp.up_proj.weight"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight"
|
50 |
+
}
|
51 |
+
]
|
52 |
+
}
|
53 |
+
}
|
mergekit/_data/architectures/falcon.json
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "falcon",
|
3 |
+
"architectures": [
|
4 |
+
"FalconForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "transformer.word_embeddings.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "transformer.ln_f.weight"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "transformer.ln_f.bias"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"name": "lm_head.weight",
|
21 |
+
"is_embed": true
|
22 |
+
}
|
23 |
+
],
|
24 |
+
"num_layers_config_key": "num_hidden_layers",
|
25 |
+
"layer_templates": {
|
26 |
+
"weights": [
|
27 |
+
{
|
28 |
+
"name": "transformer.h.${layer_index}.ln_attn.bias"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "transformer.h.${layer_index}.ln_attn.weight"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "transformer.h.${layer_index}.ln_mlp.bias"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "transformer.h.${layer_index}.ln_mlp.weight"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "transformer.h.${layer_index}.mlp.dense_4h_to_h.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "transformer.h.${layer_index}.mlp.dense_h_to_4h.weight"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "transformer.h.${layer_index}.self_attention.dense.weight"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"name": "transformer.h.${layer_index}.self_attention.query_key_value.weight"
|
50 |
+
}
|
51 |
+
]
|
52 |
+
}
|
53 |
+
}
|
mergekit/_data/architectures/gemma.json
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "gemma",
|
3 |
+
"architectures": [
|
4 |
+
"GemmaForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true,
|
10 |
+
"output_space": "h_0"
|
11 |
+
}
|
12 |
+
],
|
13 |
+
"num_layers_config_key": "num_hidden_layers",
|
14 |
+
"layer_templates": {
|
15 |
+
"weights": [
|
16 |
+
{
|
17 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight",
|
18 |
+
"input_space": "h_${layer_index}"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight",
|
22 |
+
"input_space": "h_${layer_index}",
|
23 |
+
"output_space": "attn_qk_${layer_index}"
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight",
|
27 |
+
"input_space": "h_${layer_index}",
|
28 |
+
"output_space": "attn_qk_${layer_index}"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight",
|
32 |
+
"input_space": "h_${layer_index}",
|
33 |
+
"output_space": "attn_v_${layer_index}"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight",
|
37 |
+
"input_space": "attn_v_${layer_index}",
|
38 |
+
"output_space": "post_attn_${layer_index}"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.weight",
|
42 |
+
"input_space": "h_a_${layer_index}"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"name": "model.layers.${layer_index}.mlp.up_proj.weight",
|
46 |
+
"input_space": "h_a_${layer_index}",
|
47 |
+
"output_space": "up_${layer_index}"
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"name": "model.layers.${layer_index}.mlp.gate_proj.weight",
|
51 |
+
"input_space": "h_a_${layer_index}",
|
52 |
+
"output_space": "up_${layer_index}"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"name": "model.layers.${layer_index}.mlp.down_proj.weight",
|
56 |
+
"input_space": "up_${layer_index}",
|
57 |
+
"output_space": "post_mlp_${layer_index}"
|
58 |
+
}
|
59 |
+
],
|
60 |
+
"procedural_spaces": [
|
61 |
+
{
|
62 |
+
"name": "h_a_${layer_index}",
|
63 |
+
"type": "residual",
|
64 |
+
"inputs": [
|
65 |
+
"h_${layer_index}",
|
66 |
+
"post_attn_${layer_index}"
|
67 |
+
]
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"name": "h_${layer_index+1}",
|
71 |
+
"type": "residual",
|
72 |
+
"inputs": [
|
73 |
+
"h_a_${layer_index}",
|
74 |
+
"post_mlp_${layer_index}"
|
75 |
+
]
|
76 |
+
}
|
77 |
+
]
|
78 |
+
},
|
79 |
+
"post_weights": [
|
80 |
+
{
|
81 |
+
"name": "model.norm.weight",
|
82 |
+
"input_space": "h_${num_layers}"
|
83 |
+
}
|
84 |
+
]
|
85 |
+
}
|
mergekit/_data/architectures/gpt-neox.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "gpt_neox",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "gpt_neox.embed_in.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "gpt_neox.final_layer_norm.bias"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "gpt_neox.final_layer_norm.weight"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"name": "embed_out.weight",
|
21 |
+
"is_embed": true
|
22 |
+
}
|
23 |
+
],
|
24 |
+
"num_layers_config_key": "num_hidden_layers",
|
25 |
+
"layer_templates": {
|
26 |
+
"weights": [
|
27 |
+
{
|
28 |
+
"name": "gpt_neox.layers.${layer_index}.attention.dense.weight"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "gpt_neox.layers.${layer_index}.attention.dense.bias"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "gpt_neox.layers.${layer_index}.attention.query_key_value.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "gpt_neox.layers.${layer_index}.attention.query_key_value.bias"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "gpt_neox.layers.${layer_index}.input_layernorm.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "gpt_neox.layers.${layer_index}.input_layernorm.bias"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "gpt_neox.layers.${layer_index}.mlp.dense_4h_to_h.weight"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"name": "gpt_neox.layers.${layer_index}.mlp.dense_4h_to_h.bias"
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"name": "gpt_neox.layers.${layer_index}.mlp.dense_h_to_4h.weight"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"name": "gpt_neox.layers.${layer_index}.mlp.dense_h_to_4h.bias"
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"name": "gpt_neox.layers.${layer_index}.post_attention_layernorm.weight"
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"name": "gpt_neox.layers.${layer_index}.post_attention_layernorm.bias"
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"name": "gpt_neox.layers.${layer_index}.attention.bias"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"name": "gpt_neox.layers.${layer_index}.attention.masked_bias"
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"name": "gpt_neox.layers.${layer_index}.attention.rotary_emb.inv_freq"
|
71 |
+
}
|
72 |
+
]
|
73 |
+
}
|
74 |
+
}
|
mergekit/_data/architectures/gpt2-sequence-classification.json
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "gpt2",
|
3 |
+
"architectures": [
|
4 |
+
"GPT2ForSequenceClassification"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "transformer.wte.weight"
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"name": "transformer.wpe.weight"
|
12 |
+
}
|
13 |
+
],
|
14 |
+
"post_weights": [
|
15 |
+
{
|
16 |
+
"name": "transformer.ln_f.weight"
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"name": "transformer.ln_f.bias"
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"name": "score.weight"
|
23 |
+
}
|
24 |
+
],
|
25 |
+
"num_layers_config_key": "n_layer",
|
26 |
+
"layer_templates": {
|
27 |
+
"weights": [
|
28 |
+
{
|
29 |
+
"name": "transformer.h.${layer_index}.attn.c_attn.weight"
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"name": "transformer.h.${layer_index}.attn.c_attn.bias"
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"name": "transformer.h.${layer_index}.attn.c_proj.weight"
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"name": "transformer.h.${layer_index}.attn.c_proj.bias"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"name": "transformer.h.${layer_index}.ln_1.weight"
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"name": "transformer.h.${layer_index}.ln_1.bias"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"name": "transformer.h.${layer_index}.ln_2.weight"
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"name": "transformer.h.${layer_index}.ln_2.bias"
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"name": "transformer.h.${layer_index}.mlp.c_proj.weight"
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"name": "transformer.h.${layer_index}.mlp.c_proj.bias"
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"name": "transformer.h.${layer_index}.mlp.c_fc.weight"
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"name": "transformer.h.${layer_index}.mlp.c_fc.bias"
|
63 |
+
}
|
64 |
+
]
|
65 |
+
}
|
66 |
+
}
|
mergekit/_data/architectures/gpt2.json
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "gpt2",
|
3 |
+
"architectures": [
|
4 |
+
"GPT2LMHeadModel"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "wte.weight",
|
9 |
+
"is_embed": true
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"name": "wpe.weight"
|
13 |
+
}
|
14 |
+
],
|
15 |
+
"post_weights": [
|
16 |
+
{
|
17 |
+
"name": "ln_f.weight"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"name": "ln_f.bias"
|
21 |
+
}
|
22 |
+
],
|
23 |
+
"num_layers_config_key": "n_layer",
|
24 |
+
"layer_templates": {
|
25 |
+
"weights": [
|
26 |
+
{
|
27 |
+
"name": "h.${layer_index}.attn.c_attn.weight"
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"name": "h.${layer_index}.attn.c_attn.bias"
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"name": "h.${layer_index}.attn.c_proj.weight"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"name": "h.${layer_index}.attn.c_proj.bias"
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"name": "h.${layer_index}.ln_1.weight"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"name": "h.${layer_index}.ln_1.bias"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"name": "h.${layer_index}.ln_2.weight"
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"name": "h.${layer_index}.ln_2.bias"
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"name": "h.${layer_index}.mlp.c_proj.weight"
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"name": "h.${layer_index}.mlp.c_proj.bias"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"name": "h.${layer_index}.mlp.c_fc.weight"
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"name": "h.${layer_index}.mlp.c_fc.bias"
|
61 |
+
}
|
62 |
+
]
|
63 |
+
}
|
64 |
+
}
|
mergekit/_data/architectures/gptbigcode.json
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "gpt_bigcode",
|
3 |
+
"architectures": [
|
4 |
+
"GPTBigCodeForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "transformer.wte.weight",
|
9 |
+
"is_embed": true
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"name": "transformer.wpe.weight"
|
13 |
+
}
|
14 |
+
],
|
15 |
+
"post_weights": [
|
16 |
+
{
|
17 |
+
"name": "transformer.ln_f.weight"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"name": "transformer.ln_f.bias"
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"name": "lm_head.weight",
|
24 |
+
"aliases": [
|
25 |
+
"transformer.wte.weight"
|
26 |
+
]
|
27 |
+
}
|
28 |
+
],
|
29 |
+
"num_layers_config_key": "n_layer",
|
30 |
+
"layer_templates": {
|
31 |
+
"weights": [
|
32 |
+
{
|
33 |
+
"name": "transformer.h.${layer_index}.attn.c_attn.weight"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"name": "transformer.h.${layer_index}.attn.c_attn.bias"
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"name": "transformer.h.${layer_index}.attn.c_proj.weight"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"name": "transformer.h.${layer_index}.attn.c_proj.bias"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"name": "transformer.h.${layer_index}.ln_1.weight"
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"name": "transformer.h.${layer_index}.ln_1.bias"
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"name": "transformer.h.${layer_index}.ln_2.weight"
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"name": "transformer.h.${layer_index}.ln_2.bias"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"name": "transformer.h.${layer_index}.mlp.c_proj.weight"
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"name": "transformer.h.${layer_index}.mlp.c_proj.bias"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"name": "transformer.h.${layer_index}.mlp.c_fc.weight"
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"name": "transformer.h.${layer_index}.mlp.c_fc.bias"
|
67 |
+
}
|
68 |
+
]
|
69 |
+
}
|
70 |
+
}
|
mergekit/_data/architectures/jais.json
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "jais",
|
3 |
+
"architectures": [
|
4 |
+
"JAISLMHeadModel"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "transformer.wte.weight",
|
9 |
+
"is_embed": true
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"name": "transformer.relative_pe.slopes"
|
13 |
+
}
|
14 |
+
],
|
15 |
+
"post_weights": [
|
16 |
+
{
|
17 |
+
"name": "transformer.ln_f.weight"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"name": "transformer.ln_f.bias"
|
21 |
+
}
|
22 |
+
],
|
23 |
+
"num_layers_config_key": "n_layer",
|
24 |
+
"layer_templates": {
|
25 |
+
"weights": [
|
26 |
+
{
|
27 |
+
"name": "transformer.h.${layer_index}.attn.c_attn.weight"
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"name": "transformer.h.${layer_index}.attn.c_attn.bias"
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"name": "transformer.h.${layer_index}.attn.c_proj.weight"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"name": "transformer.h.${layer_index}.attn.c_proj.bias"
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"name": "transformer.h.${layer_index}.ln_1.weight"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"name": "transformer.h.${layer_index}.ln_1.bias"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"name": "transformer.h.${layer_index}.ln_2.weight"
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"name": "transformer.h.${layer_index}.ln_2.bias"
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"name": "transformer.h.${layer_index}.mlp.c_fc.weight"
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"name": "transformer.h.${layer_index}.mlp.c_fc.bias"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"name": "transformer.h.${layer_index}.mlp.c_fc2.weight"
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"name": "transformer.h.${layer_index}.mlp.c_fc2.bias"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"name": "transformer.h.${layer_index}.mlp.c_proj.weight"
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"name": "transformer.h.${layer_index}.mlp.c_proj.bias"
|
67 |
+
}
|
68 |
+
]
|
69 |
+
}
|
70 |
+
}
|
mergekit/_data/architectures/llama.json
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "llama",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM",
|
5 |
+
"LLaMaForCausalLM"
|
6 |
+
],
|
7 |
+
"pre_weights": [
|
8 |
+
{
|
9 |
+
"name": "model.embed_tokens.weight",
|
10 |
+
"is_embed": true,
|
11 |
+
"output_space": "h_0"
|
12 |
+
}
|
13 |
+
],
|
14 |
+
"num_layers_config_key": "num_hidden_layers",
|
15 |
+
"layer_templates": {
|
16 |
+
"weights": [
|
17 |
+
{
|
18 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight",
|
19 |
+
"input_space": "h_${layer_index}"
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight",
|
23 |
+
"input_space": "h_${layer_index}",
|
24 |
+
"output_space": "attn_qk_${layer_index}"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight",
|
28 |
+
"input_space": "h_${layer_index}",
|
29 |
+
"output_space": "attn_qk_${layer_index}"
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight",
|
33 |
+
"input_space": "h_${layer_index}",
|
34 |
+
"output_space": "attn_v_${layer_index}"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight",
|
38 |
+
"input_space": "attn_v_${layer_index}",
|
39 |
+
"output_space": "post_attn_${layer_index}"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.weight",
|
43 |
+
"input_space": "h_a_${layer_index}"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "model.layers.${layer_index}.mlp.up_proj.weight",
|
47 |
+
"input_space": "h_a_${layer_index}",
|
48 |
+
"output_space": "up_${layer_index}"
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"name": "model.layers.${layer_index}.mlp.gate_proj.weight",
|
52 |
+
"input_space": "h_a_${layer_index}",
|
53 |
+
"output_space": "up_${layer_index}"
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"name": "model.layers.${layer_index}.mlp.down_proj.weight",
|
57 |
+
"input_space": "up_${layer_index}",
|
58 |
+
"output_space": "post_mlp_${layer_index}"
|
59 |
+
}
|
60 |
+
],
|
61 |
+
"procedural_spaces": [
|
62 |
+
{
|
63 |
+
"name": "h_a_${layer_index}",
|
64 |
+
"type": "residual",
|
65 |
+
"inputs": [
|
66 |
+
"h_${layer_index}",
|
67 |
+
"post_attn_${layer_index}"
|
68 |
+
]
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"name": "h_${layer_index+1}",
|
72 |
+
"type": "residual",
|
73 |
+
"inputs": [
|
74 |
+
"h_a_${layer_index}",
|
75 |
+
"post_mlp_${layer_index}"
|
76 |
+
]
|
77 |
+
}
|
78 |
+
]
|
79 |
+
},
|
80 |
+
"post_weights": [
|
81 |
+
{
|
82 |
+
"name": "model.norm.weight",
|
83 |
+
"input_space": "h_${num_layers}"
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"name": "lm_head.weight",
|
87 |
+
"input_space": "h_${num_layers}",
|
88 |
+
"is_embed": true
|
89 |
+
}
|
90 |
+
]
|
91 |
+
}
|
mergekit/_data/architectures/mamba.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "mamba",
|
3 |
+
"architectures": [
|
4 |
+
"MambaForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "backbone.embeddings.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "backbone.norm_f.weight"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "lm_head.weight",
|
18 |
+
"is_embed": true,
|
19 |
+
"aliases": ["backbone.embeddings.weight"]
|
20 |
+
}
|
21 |
+
],
|
22 |
+
"num_layers_config_key": "num_hidden_layers",
|
23 |
+
"layer_templates": {
|
24 |
+
"weights": [
|
25 |
+
{
|
26 |
+
"name": "backbone.layers.${layer_index}.mixer.A_log"
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"name": "backbone.layers.${layer_index}.mixer.conv1d.bias"
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"name": "backbone.layers.${layer_index}.mixer.conv1d.weight"
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"name": "backbone.layers.${layer_index}.mixer.D"
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"name": "backbone.layers.${layer_index}.mixer.dt_proj.bias"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"name": "backbone.layers.${layer_index}.mixer.dt_proj.weight"
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"name": "backbone.layers.${layer_index}.mixer.in_proj.weight"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"name": "backbone.layers.${layer_index}.mixer.out_proj.weight"
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"name": "backbone.layers.${layer_index}.mixer.x_proj.weight"
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"name": "backbone.layers.${layer_index}.norm.weight"
|
54 |
+
}
|
55 |
+
]
|
56 |
+
}
|
57 |
+
}
|
mergekit/_data/architectures/mistral.json
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "mistral",
|
3 |
+
"architectures": [
|
4 |
+
"MistralForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true,
|
10 |
+
"output_space": "h_0"
|
11 |
+
}
|
12 |
+
],
|
13 |
+
"num_layers_config_key": "num_hidden_layers",
|
14 |
+
"layer_templates": {
|
15 |
+
"weights": [
|
16 |
+
{
|
17 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight",
|
18 |
+
"input_space": "h_${layer_index}"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight",
|
22 |
+
"input_space": "h_${layer_index}",
|
23 |
+
"output_space": "attn_qk_${layer_index}"
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight",
|
27 |
+
"input_space": "h_${layer_index}",
|
28 |
+
"output_space": "attn_qk_${layer_index}"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight",
|
32 |
+
"input_space": "h_${layer_index}",
|
33 |
+
"output_space": "attn_v_${layer_index}"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight",
|
37 |
+
"input_space": "attn_v_${layer_index}",
|
38 |
+
"output_space": "post_attn_${layer_index}"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.weight",
|
42 |
+
"input_space": "h_a_${layer_index}"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"name": "model.layers.${layer_index}.mlp.up_proj.weight",
|
46 |
+
"input_space": "h_a_${layer_index}",
|
47 |
+
"output_space": "up_${layer_index}"
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"name": "model.layers.${layer_index}.mlp.gate_proj.weight",
|
51 |
+
"input_space": "h_a_${layer_index}",
|
52 |
+
"output_space": "up_${layer_index}"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"name": "model.layers.${layer_index}.mlp.down_proj.weight",
|
56 |
+
"input_space": "up_${layer_index}",
|
57 |
+
"output_space": "post_mlp_${layer_index}"
|
58 |
+
}
|
59 |
+
],
|
60 |
+
"procedural_spaces": [
|
61 |
+
{
|
62 |
+
"name": "h_a_${layer_index}",
|
63 |
+
"type": "residual",
|
64 |
+
"inputs": [
|
65 |
+
"h_${layer_index}",
|
66 |
+
"post_attn_${layer_index}"
|
67 |
+
]
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"name": "h_${layer_index+1}",
|
71 |
+
"type": "residual",
|
72 |
+
"inputs": [
|
73 |
+
"h_a_${layer_index}",
|
74 |
+
"post_mlp_${layer_index}"
|
75 |
+
]
|
76 |
+
}
|
77 |
+
]
|
78 |
+
},
|
79 |
+
"post_weights": [
|
80 |
+
{
|
81 |
+
"name": "model.norm.weight",
|
82 |
+
"input_space": "h_${num_layers}"
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"name": "lm_head.weight",
|
86 |
+
"input_space": "h_${num_layers}",
|
87 |
+
"is_embed": true
|
88 |
+
}
|
89 |
+
]
|
90 |
+
}
|
mergekit/_data/architectures/phi-1.json
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "mixformer-sequential",
|
3 |
+
"architectures": [
|
4 |
+
"MixFormerSequentialForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "layers.0.wte.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"num_layers_config_key": "n_layer",
|
13 |
+
"layer_templates": {
|
14 |
+
"weights": [
|
15 |
+
{
|
16 |
+
"name": "layers.${layer_index}.ln.bias"
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"name": "layers.${layer_index}.ln.weight"
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"name": "layers.${layer_index}.mixer.Wqkv.bias"
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"name": "layers.${layer_index}.mixer.Wqkv.weight"
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"name": "layers.${layer_index}.mixer.out_proj.bias"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "layers.${layer_index}.mixer.out_proj.weight"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "layers.${layer_index}.mixer.rotary_emb.inv_freq"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "layers.${layer_index}.mlp.fc1.bias"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "layers.${layer_index}.mlp.fc1.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "layers.${layer_index}.mlp.fc2.bias"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "layers.${layer_index}.mlp.fc2.weight"
|
47 |
+
}
|
48 |
+
]
|
49 |
+
},
|
50 |
+
"post_weights": [
|
51 |
+
{
|
52 |
+
"name": "layers.${num_layers}.linear.bias",
|
53 |
+
"is_embed": true
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"name": "layers.${num_layers}.linear.weight",
|
57 |
+
"is_embed": true
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"name": "layers.${num_layers}.ln.bias"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"name": "layers.${num_layers}.ln.weight"
|
64 |
+
}
|
65 |
+
]
|
66 |
+
}
|
mergekit/_data/architectures/phi2-old.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "phi-msft",
|
3 |
+
"architectures": [
|
4 |
+
"PhiForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "transformer.embd.wte.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "lm_head.linear.bias"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "lm_head.linear.weight",
|
18 |
+
"is_embed": true
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"name": "lm_head.ln.bias"
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"name": "lm_head.ln.weight"
|
25 |
+
}
|
26 |
+
],
|
27 |
+
"num_layers_config_key": "n_layer",
|
28 |
+
"layer_templates": {
|
29 |
+
"weights": [
|
30 |
+
{
|
31 |
+
"name": "transformer.h.${layer_index}.ln.bias"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "transformer.h.${layer_index}.ln.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "transformer.h.${layer_index}.mixer.out_proj.bias"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "transformer.h.${layer_index}.mixer.out_proj.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "transformer.h.${layer_index}.mixer.Wqkv.bias"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "transformer.h.${layer_index}.mixer.Wqkv.weight"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"name": "transformer.h.${layer_index}.mlp.fc1.bias"
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"name": "transformer.h.${layer_index}.mlp.fc1.weight"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"name": "transformer.h.${layer_index}.mlp.fc2.bias"
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"name": "transformer.h.${layer_index}.mlp.fc2.weight"
|
59 |
+
}
|
60 |
+
]
|
61 |
+
}
|
62 |
+
}
|
mergekit/_data/architectures/phi2.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "phi",
|
3 |
+
"architectures": [
|
4 |
+
"PhiForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "lm_head.bias"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "lm_head.weight",
|
18 |
+
"is_embed": true
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"name": "model.final_layernorm.bias"
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"name": "model.final_layernorm.weight"
|
25 |
+
}
|
26 |
+
],
|
27 |
+
"num_layers_config_key": "num_hidden_layers",
|
28 |
+
"layer_templates": {
|
29 |
+
"weights": [
|
30 |
+
{
|
31 |
+
"name": "model.layers.${layer_index}.input_layernorm.bias"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "model.layers.${layer_index}.self_attn.dense.bias"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "model.layers.${layer_index}.self_attn.dense.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.bias"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.bias"
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.bias"
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight"
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"name": "model.layers.${layer_index}.mlp.fc1.bias"
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"name": "model.layers.${layer_index}.mlp.fc1.weight"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"name": "model.layers.${layer_index}.mlp.fc2.bias"
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"name": "model.layers.${layer_index}.mlp.fc2.weight"
|
71 |
+
}
|
72 |
+
]
|
73 |
+
}
|
74 |
+
}
|
mergekit/_data/architectures/qwen.json
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "qwen",
|
3 |
+
"architectures": [
|
4 |
+
"QWenLMHeadModel"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "transformer.wte.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "transformer.ln_f.weight"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "lm_head.weight",
|
18 |
+
"is_embed": true
|
19 |
+
}
|
20 |
+
],
|
21 |
+
"num_layers_config_key": "num_hidden_layers",
|
22 |
+
"layer_templates": {
|
23 |
+
"weights": [
|
24 |
+
{
|
25 |
+
"name": "transformer.h.${layer_index}.attn.c_attn.bias"
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"name": "transformer.h.${layer_index}.attn.c_attn.weight"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "transformer.h.${layer_index}.attn.c_proj.weight"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "transformer.h.${layer_index}.ln_1.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "transformer.h.${layer_index}.ln_2.weight"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "transformer.h.${layer_index}.mlp.c_proj.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "transformer.h.${layer_index}.mlp.w1.weight"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "transformer.h.${layer_index}.mlp.w2.weight"
|
47 |
+
}
|
48 |
+
]
|
49 |
+
}
|
50 |
+
}
|
mergekit/_data/architectures/qwen2.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "qwen2",
|
3 |
+
"architectures": [
|
4 |
+
"Qwen2ForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "model.norm.weight"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "lm_head.weight",
|
18 |
+
"is_embed": true
|
19 |
+
}
|
20 |
+
],
|
21 |
+
"num_layers_config_key": "num_hidden_layers",
|
22 |
+
"layer_templates": {
|
23 |
+
"weights": [
|
24 |
+
{
|
25 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight"
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"name": "model.layers.${layer_index}.mlp.down_proj.weight"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "model.layers.${layer_index}.mlp.gate_proj.weight"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "model.layers.${layer_index}.mlp.up_proj.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.weight"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.bias"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.bias"
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.bias"
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight"
|
59 |
+
}
|
60 |
+
]
|
61 |
+
}
|
62 |
+
}
|
mergekit/_data/architectures/stablelm.json
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "stablelm_epoch",
|
3 |
+
"architectures": [
|
4 |
+
"StableLMEpochForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true,
|
10 |
+
"output_space": "h_0"
|
11 |
+
}
|
12 |
+
],
|
13 |
+
"num_layers_config_key": "num_hidden_layers",
|
14 |
+
"layer_templates": {
|
15 |
+
"weights": [
|
16 |
+
{
|
17 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight",
|
18 |
+
"input_space": "h_${layer_index}"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"name": "model.layers.${layer_index}.input_layernorm.bias",
|
22 |
+
"input_space": "h_${layer_index}"
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight",
|
26 |
+
"input_space": "h_${layer_index}",
|
27 |
+
"output_space": "attn_qk_${layer_index}"
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight",
|
31 |
+
"input_space": "h_${layer_index}",
|
32 |
+
"output_space": "attn_qk_${layer_index}"
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight",
|
36 |
+
"input_space": "h_${layer_index}",
|
37 |
+
"output_space": "attn_v_${layer_index}"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight",
|
41 |
+
"input_space": "attn_v_${layer_index}",
|
42 |
+
"output_space": "post_attn_${layer_index}"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.weight",
|
46 |
+
"input_space": "h_a_${layer_index}"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.bias",
|
50 |
+
"input_space": "h_a_${layer_index}"
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"name": "model.layers.${layer_index}.mlp.up_proj.weight",
|
54 |
+
"input_space": "h_a_${layer_index}",
|
55 |
+
"output_space": "up_${layer_index}"
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"name": "model.layers.${layer_index}.mlp.gate_proj.weight",
|
59 |
+
"input_space": "h_a_${layer_index}",
|
60 |
+
"output_space": "up_${layer_index}"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"name": "model.layers.${layer_index}.mlp.down_proj.weight",
|
64 |
+
"input_space": "up_${layer_index}",
|
65 |
+
"output_space": "post_mlp_${layer_index}"
|
66 |
+
}
|
67 |
+
],
|
68 |
+
"procedural_spaces": [
|
69 |
+
{
|
70 |
+
"name": "h_a_${layer_index}",
|
71 |
+
"type": "residual",
|
72 |
+
"inputs": [
|
73 |
+
"h_${layer_index}",
|
74 |
+
"post_attn_${layer_index}"
|
75 |
+
]
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"name": "h_${layer_index+1}",
|
79 |
+
"type": "residual",
|
80 |
+
"inputs": [
|
81 |
+
"h_a_${layer_index}",
|
82 |
+
"post_mlp_${layer_index}"
|
83 |
+
]
|
84 |
+
}
|
85 |
+
]
|
86 |
+
},
|
87 |
+
"post_weights": [
|
88 |
+
{
|
89 |
+
"name": "model.norm.weight",
|
90 |
+
"input_space": "h_${num_layers}"
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"name": "lm_head.weight",
|
94 |
+
"input_space": "h_${num_layers}",
|
95 |
+
"is_embed": true
|
96 |
+
}
|
97 |
+
]
|
98 |
+
}
|
mergekit/_data/architectures/stablelm2.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "stablelm",
|
3 |
+
"architectures": [
|
4 |
+
"StableLmForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "model.norm.weight"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"name": "model.norm.bias"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"name": "lm_head.weight",
|
21 |
+
"is_embed": true
|
22 |
+
}
|
23 |
+
],
|
24 |
+
"num_layers_config_key": "num_hidden_layers",
|
25 |
+
"layer_templates": {
|
26 |
+
"weights": [
|
27 |
+
{
|
28 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"name": "model.layers.${layer_index}.input_layernorm.bias"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "model.layers.${layer_index}.mlp.down_proj.weight"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"name": "model.layers.${layer_index}.mlp.gate_proj.weight"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"name": "model.layers.${layer_index}.mlp.up_proj.weight"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.weight"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.bias"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight"
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.bias",
|
53 |
+
"optional": true
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight"
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.bias",
|
60 |
+
"optional": true
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight"
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.bias",
|
67 |
+
"optional": true
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight"
|
71 |
+
}
|
72 |
+
]
|
73 |
+
}
|
74 |
+
}
|
mergekit/_data/architectures/starcoder2.json
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "starcoder2",
|
3 |
+
"architectures": [
|
4 |
+
"Starcoder2ForCausalLM"
|
5 |
+
],
|
6 |
+
"pre_weights": [
|
7 |
+
{
|
8 |
+
"name": "model.embed_tokens.weight",
|
9 |
+
"is_embed": true
|
10 |
+
}
|
11 |
+
],
|
12 |
+
"post_weights": [
|
13 |
+
{
|
14 |
+
"name": "lm_head.weight",
|
15 |
+
"is_embed": true,
|
16 |
+
"aliases": ["model.embed_tokens.weight"]
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"name": "model.norm.bias"
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"name": "model.norm.weight"
|
23 |
+
}
|
24 |
+
],
|
25 |
+
"num_layers_config_key": "num_hidden_layers",
|
26 |
+
"layer_templates": {
|
27 |
+
"weights": [
|
28 |
+
{
|
29 |
+
"name": "model.layers.${layer_index}.input_layernorm.bias"
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"name": "model.layers.${layer_index}.input_layernorm.weight"
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.bias"
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"name": "model.layers.${layer_index}.self_attn.q_proj.weight"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.bias"
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"name": "model.layers.${layer_index}.self_attn.k_proj.weight"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.bias"
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"name": "model.layers.${layer_index}.self_attn.v_proj.weight"
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.bias"
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"name": "model.layers.${layer_index}.self_attn.o_proj.weight"
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.bias"
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"name": "model.layers.${layer_index}.post_attention_layernorm.weight"
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"name": "model.layers.${layer_index}.mlp.c_fc.bias"
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"name": "model.layers.${layer_index}.mlp.c_fc.weight"
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"name": "model.layers.${layer_index}.mlp.c_proj.bias"
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"name": "model.layers.${layer_index}.mlp.c_proj.weight"
|
75 |
+
}
|
76 |
+
]
|
77 |
+
}
|
78 |
+
}
|
mergekit/architecture.py
ADDED
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2024 Charles O. Goddard
|
2 |
+
#
|
3 |
+
# This software is free software: you can redistribute it and/or
|
4 |
+
# modify it under the terms of the GNU Lesser General Public License as
|
5 |
+
# published by the Free Software Foundation, either version 3 of the
|
6 |
+
# License, or (at your option) any later version.
|
7 |
+
#
|
8 |
+
# This software is distributed in the hope that it will be useful, but
|
9 |
+
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
10 |
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
11 |
+
# Lesser General Public License for more details.
|
12 |
+
#
|
13 |
+
# You should have received a copy of the GNU Lesser General Public License
|
14 |
+
# along with this program. If not, see http://www.gnu.org/licenses/.
|
15 |
+
|
16 |
+
import importlib.resources
|
17 |
+
import string
|
18 |
+
from abc import ABC, abstractmethod
|
19 |
+
from typing import ClassVar, Dict, List, Optional, Tuple, Union
|
20 |
+
|
21 |
+
from pydantic import BaseModel, Field
|
22 |
+
from transformers import PretrainedConfig
|
23 |
+
from typing_extensions import Literal
|
24 |
+
|
25 |
+
import mergekit._data.architectures
|
26 |
+
|
27 |
+
|
28 |
+
class WeightInfo(BaseModel, frozen=True):
|
29 |
+
"""Information about an individual weight tensor in a model.
|
30 |
+
|
31 |
+
Attributes:
|
32 |
+
name (str):
|
33 |
+
The name of the tensor representing the weight.
|
34 |
+
is_embed (bool):
|
35 |
+
Indicates whether the weight is for an embedding or language model head.
|
36 |
+
input_space (Optional[str]):
|
37 |
+
The name of the input space associated with the weight, if applicable.
|
38 |
+
output_space (Optional[str]):
|
39 |
+
The name of the output space associated with the weight, if applicable.
|
40 |
+
optional (bool):
|
41 |
+
Indicates whether the weight can be omitted from a model.
|
42 |
+
aliases (Optional[List[str]]):
|
43 |
+
List of alternative names for the weight, if applicable.
|
44 |
+
"""
|
45 |
+
|
46 |
+
name: str
|
47 |
+
is_embed: bool = False
|
48 |
+
input_space: Optional[str] = None
|
49 |
+
output_space: Optional[str] = None
|
50 |
+
optional: bool = False
|
51 |
+
aliases: Optional[List[str]] = None
|
52 |
+
|
53 |
+
|
54 |
+
class ProceduralSpaceInfo(BaseModel, frozen=True):
|
55 |
+
"""Defines a procedural space computed from one or more other spaces.
|
56 |
+
|
57 |
+
Currently only supports residual connections.
|
58 |
+
|
59 |
+
Attributes:
|
60 |
+
name (str): The name of the space defined.
|
61 |
+
type (str): The type of procedural space.
|
62 |
+
inputs (List[str]): List of names of spaces used to define this space."""
|
63 |
+
|
64 |
+
name: str
|
65 |
+
type: Literal["residual"]
|
66 |
+
inputs: List[str]
|
67 |
+
|
68 |
+
|
69 |
+
class ArchitectureInfo(ABC):
|
70 |
+
@abstractmethod
|
71 |
+
def name(self) -> str:
|
72 |
+
"""Return the name of the architecture."""
|
73 |
+
...
|
74 |
+
|
75 |
+
@abstractmethod
|
76 |
+
def pre_weights(self, config: PretrainedConfig) -> List[WeightInfo]:
|
77 |
+
"""Return a list of all weights preceding the first layer."""
|
78 |
+
...
|
79 |
+
|
80 |
+
@abstractmethod
|
81 |
+
def post_weights(self, config: PretrainedConfig) -> List[WeightInfo]:
|
82 |
+
"""Return a list of all weights following the final layer."""
|
83 |
+
...
|
84 |
+
|
85 |
+
@abstractmethod
|
86 |
+
def layer_weights(
|
87 |
+
self, index: int, config: PretrainedConfig
|
88 |
+
) -> Optional[List[WeightInfo]]:
|
89 |
+
"""Return a list of all weights associated with a given layer."""
|
90 |
+
...
|
91 |
+
|
92 |
+
@abstractmethod
|
93 |
+
def sliceable(self) -> bool:
|
94 |
+
"""
|
95 |
+
Return True if the layers of this architecture can be meaningfully sliced.
|
96 |
+
"""
|
97 |
+
...
|
98 |
+
|
99 |
+
def num_layers_config_key(self) -> str:
|
100 |
+
"""Key in config that represents number of layers"""
|
101 |
+
return "num_hidden_layers"
|
102 |
+
|
103 |
+
def num_layers(self, config: PretrainedConfig) -> int:
|
104 |
+
"""Return the number of layers in a model."""
|
105 |
+
return getattr(config, self.num_layers_config_key())
|
106 |
+
|
107 |
+
def all_weights(self, config: PretrainedConfig) -> List[WeightInfo]:
|
108 |
+
"""Return all weights associated with a model."""
|
109 |
+
num_layers = self.num_layers(config)
|
110 |
+
res = list(self.pre_weights(config))
|
111 |
+
for layer_idx in range(num_layers):
|
112 |
+
res.extend(self.layer_weights(layer_idx, config))
|
113 |
+
res.extend(self.post_weights(config))
|
114 |
+
return res
|
115 |
+
|
116 |
+
def procedural_spaces(self, config: PretrainedConfig) -> List[ProceduralSpaceInfo]:
|
117 |
+
"""Return a list of all procedurally defined spaces in a model."""
|
118 |
+
return []
|
119 |
+
|
120 |
+
def has_defined_spaces(self) -> bool:
|
121 |
+
"""
|
122 |
+
Return True if this architecture defines space information needed for
|
123 |
+
matching-based merge methods.
|
124 |
+
"""
|
125 |
+
return False
|
126 |
+
|
127 |
+
|
128 |
+
class ConfiguredArchitectureInfo(BaseModel, frozen=True, arbitrary_types_allowed=True):
|
129 |
+
info: ArchitectureInfo
|
130 |
+
config: PretrainedConfig
|
131 |
+
|
132 |
+
def name(self) -> str:
|
133 |
+
return self.info.name()
|
134 |
+
|
135 |
+
def num_layers(self) -> int:
|
136 |
+
return self.info.num_layers(self.config)
|
137 |
+
|
138 |
+
def pre_weights(self) -> List[WeightInfo]:
|
139 |
+
return self.info.pre_weights(self.config)
|
140 |
+
|
141 |
+
def post_weights(self) -> List[WeightInfo]:
|
142 |
+
return self.info.post_weights(self.config)
|
143 |
+
|
144 |
+
def layer_weights(self, index: int) -> List[WeightInfo]:
|
145 |
+
return self.info.layer_weights(index, self.config)
|
146 |
+
|
147 |
+
def procedural_spaces(self) -> List[ProceduralSpaceInfo]:
|
148 |
+
return self.info.procedural_spaces(self.config)
|
149 |
+
|
150 |
+
def all_weights(self) -> List[WeightInfo]:
|
151 |
+
return self.info.all_weights(self.config)
|
152 |
+
|
153 |
+
|
154 |
+
class JSONLayerTemplates(BaseModel, frozen=True):
|
155 |
+
weights: List[WeightInfo]
|
156 |
+
procedural_spaces: Optional[List[ProceduralSpaceInfo]] = None
|
157 |
+
|
158 |
+
|
159 |
+
class JSONArchitectureDefinition(BaseModel, frozen=True):
|
160 |
+
expected_model_type: str = Field(alias="model_type")
|
161 |
+
architectures: List[str]
|
162 |
+
pre_weights: List[WeightInfo]
|
163 |
+
layer_templates: JSONLayerTemplates
|
164 |
+
post_weights: List[WeightInfo]
|
165 |
+
procedural_spaces: Optional[List[ProceduralSpaceInfo]] = None
|
166 |
+
num_layers_config_key: Optional[str] = None
|
167 |
+
|
168 |
+
|
169 |
+
class TemplateWithArithmetic(string.Template):
|
170 |
+
idpattern = r"(?a:[_a-z][_a-z0-9]*([+-]1)?)"
|
171 |
+
|
172 |
+
|
173 |
+
def _template_substitution(
|
174 |
+
template: str, num_layers: int, layer_idx: Optional[int] = None
|
175 |
+
) -> str:
|
176 |
+
if "{" not in template:
|
177 |
+
return template
|
178 |
+
|
179 |
+
substitutions = {
|
180 |
+
"num_layers": num_layers,
|
181 |
+
"num_layers+1": num_layers + 1,
|
182 |
+
"num_layers-1": num_layers - 1,
|
183 |
+
}
|
184 |
+
|
185 |
+
if layer_idx is not None:
|
186 |
+
substitutions.update(
|
187 |
+
{
|
188 |
+
"layer_index": layer_idx,
|
189 |
+
"layer_index+1": layer_idx + 1,
|
190 |
+
"layer_index-1": layer_idx - 1,
|
191 |
+
}
|
192 |
+
)
|
193 |
+
|
194 |
+
return TemplateWithArithmetic(template).substitute(substitutions)
|
195 |
+
|
196 |
+
|
197 |
+
class JsonArchitectureInfo(ArchitectureInfo, BaseModel, frozen=True):
|
198 |
+
definition: JSONArchitectureDefinition
|
199 |
+
|
200 |
+
def _substitute(
|
201 |
+
self,
|
202 |
+
item: Union[WeightInfo, ProceduralSpaceInfo],
|
203 |
+
config: PretrainedConfig,
|
204 |
+
layer_idx: Optional[int] = None,
|
205 |
+
) -> Union[WeightInfo, ProceduralSpaceInfo]:
|
206 |
+
num_layers = self.num_layers(config)
|
207 |
+
|
208 |
+
obj_dict = item.model_dump(mode="json", exclude_unset=True)
|
209 |
+
for key in obj_dict:
|
210 |
+
if isinstance(obj_dict[key], str):
|
211 |
+
obj_dict[key] = _template_substitution(
|
212 |
+
obj_dict[key], num_layers, layer_idx
|
213 |
+
)
|
214 |
+
elif isinstance(obj_dict[key], list):
|
215 |
+
obj_dict[key] = [
|
216 |
+
(
|
217 |
+
_template_substitution(s, num_layers, layer_idx)
|
218 |
+
if isinstance(s, str)
|
219 |
+
else s
|
220 |
+
)
|
221 |
+
for s in obj_dict[key]
|
222 |
+
]
|
223 |
+
return type(item).model_validate(obj_dict)
|
224 |
+
|
225 |
+
def name(self) -> str:
|
226 |
+
return self.definition.expected_model_type
|
227 |
+
|
228 |
+
def pre_weights(self, config: PretrainedConfig) -> List[WeightInfo]:
|
229 |
+
return [
|
230 |
+
self._substitute(wi, config=config) for wi in self.definition.pre_weights
|
231 |
+
]
|
232 |
+
|
233 |
+
def layer_weights(
|
234 |
+
self, index: int, config: PretrainedConfig
|
235 |
+
) -> Optional[List[WeightInfo]]:
|
236 |
+
return [
|
237 |
+
self._substitute(wi, config=config, layer_idx=index)
|
238 |
+
for wi in self.definition.layer_templates.weights
|
239 |
+
]
|
240 |
+
|
241 |
+
def post_weights(self, config: PretrainedConfig) -> List[WeightInfo]:
|
242 |
+
return [
|
243 |
+
self._substitute(wi, config=config) for wi in self.definition.post_weights
|
244 |
+
]
|
245 |
+
|
246 |
+
def sliceable(self) -> bool:
|
247 |
+
return True
|
248 |
+
|
249 |
+
def procedural_spaces(self, config: PretrainedConfig) -> List[ProceduralSpaceInfo]:
|
250 |
+
res = []
|
251 |
+
for s in self.definition.procedural_spaces or []:
|
252 |
+
res.append(self._substitute(s, config=config))
|
253 |
+
for idx in range(self.num_layers(config)):
|
254 |
+
for s in self.definition.layer_templates.procedural_spaces or []:
|
255 |
+
res.append(self._substitute(s, config=config, layer_idx=idx))
|
256 |
+
return res
|
257 |
+
|
258 |
+
def has_defined_spaces(self) -> bool:
|
259 |
+
if (
|
260 |
+
self.definition.procedural_spaces
|
261 |
+
or self.definition.layer_templates.procedural_spaces
|
262 |
+
):
|
263 |
+
return True
|
264 |
+
for wi in (
|
265 |
+
self.definition.layer_templates.weights
|
266 |
+
+ self.definition.pre_weights
|
267 |
+
+ self.definition.post_weights
|
268 |
+
):
|
269 |
+
if wi.input_space or wi.output_space:
|
270 |
+
return True
|
271 |
+
return False
|
272 |
+
|
273 |
+
def num_layers_config_key(self) -> str:
|
274 |
+
return self.definition.num_layers_config_key
|
275 |
+
|
276 |
+
|
277 |
+
class MixtralTensorNames(ArchitectureInfo, BaseModel):
|
278 |
+
ARCHITECTURE_NAME: ClassVar[str] = "MixtralForCausalLM"
|
279 |
+
num_local_experts: int
|
280 |
+
|
281 |
+
def name(self) -> str:
|
282 |
+
return "mixtral"
|
283 |
+
|
284 |
+
@classmethod
|
285 |
+
def from_config(cls, config: PretrainedConfig):
|
286 |
+
return MixtralTensorNames(num_local_experts=config.num_local_experts)
|
287 |
+
|
288 |
+
def pre_weights(self, config: PretrainedConfig) -> List[WeightInfo]:
|
289 |
+
return MISTRAL_INFO.pre_weights(config)
|
290 |
+
|
291 |
+
def post_weights(self, config: PretrainedConfig) -> List[WeightInfo]:
|
292 |
+
return MISTRAL_INFO.post_weights(config)
|
293 |
+
|
294 |
+
def num_layers_config_key(self) -> str:
|
295 |
+
return MISTRAL_INFO.num_layers_config_key()
|
296 |
+
|
297 |
+
def layer_weights(
|
298 |
+
self, index: int, config: PretrainedConfig
|
299 |
+
) -> Optional[List[WeightInfo]]:
|
300 |
+
num_experts = self.num_local_experts
|
301 |
+
prefix = f"model.layers.{index}"
|
302 |
+
tensor_names = []
|
303 |
+
for expert_idx in range(num_experts):
|
304 |
+
for param in ("w1", "w2", "w3"):
|
305 |
+
tensor_names.append(
|
306 |
+
prefix + f".block_sparse_moe.experts.{expert_idx}.{param}.weight"
|
307 |
+
)
|
308 |
+
tensor_names.append(prefix + ".block_sparse_moe.gate.weight")
|
309 |
+
res = []
|
310 |
+
for name in tensor_names:
|
311 |
+
res.append(WeightInfo(name=name))
|
312 |
+
for weight_info in MISTRAL_INFO.layer_weights(index, config):
|
313 |
+
if ".mlp." in weight_info.name:
|
314 |
+
continue
|
315 |
+
res.append(weight_info)
|
316 |
+
return res
|
317 |
+
|
318 |
+
def sliceable(self) -> bool:
|
319 |
+
return True
|
320 |
+
|
321 |
+
def has_defined_spaces(self) -> bool:
|
322 |
+
return False
|
323 |
+
|
324 |
+
|
325 |
+
def _load_json_arch(name: str) -> JsonArchitectureInfo:
|
326 |
+
text = importlib.resources.read_text(mergekit._data.architectures, name)
|
327 |
+
return JsonArchitectureInfo(
|
328 |
+
definition=JSONArchitectureDefinition.model_validate_json(text)
|
329 |
+
)
|
330 |
+
|
331 |
+
|
332 |
+
def _load_all_architectures() -> (
|
333 |
+
Tuple[List[JsonArchitectureInfo], Dict[str, List[JsonArchitectureInfo]]]
|
334 |
+
):
|
335 |
+
architectures: List[JsonArchitectureInfo] = []
|
336 |
+
for f in importlib.resources.contents(mergekit._data.architectures):
|
337 |
+
if f.lower().endswith(".json"):
|
338 |
+
architectures.append(_load_json_arch(f))
|
339 |
+
|
340 |
+
name_to_arch: Dict[str, List[JsonArchitectureInfo]] = {}
|
341 |
+
for arch_info in architectures:
|
342 |
+
for name in arch_info.definition.architectures:
|
343 |
+
name_to_arch[name] = name_to_arch.get(name, [])
|
344 |
+
name_to_arch[name].append(arch_info)
|
345 |
+
return architectures, name_to_arch
|
346 |
+
|
347 |
+
|
348 |
+
JSON_ARCHITECTURES, NAME_TO_ARCH = _load_all_architectures()
|
349 |
+
MISTRAL_INFO = _load_json_arch("mistral.json")
|
350 |
+
|
351 |
+
|
352 |
+
def get_architecture_info(config: PretrainedConfig) -> ArchitectureInfo:
|
353 |
+
if len(config.architectures) != 1:
|
354 |
+
raise RuntimeError("More than one architecture in config?")
|
355 |
+
|
356 |
+
arch_name = config.architectures[0]
|
357 |
+
|
358 |
+
if arch_name == MixtralTensorNames.ARCHITECTURE_NAME:
|
359 |
+
return MixtralTensorNames.from_config(config)
|
360 |
+
|
361 |
+
if arch_name not in NAME_TO_ARCH:
|
362 |
+
raise RuntimeError(f"Unsupported architecture {arch_name}")
|
363 |
+
|
364 |
+
candidates = list(NAME_TO_ARCH[arch_name])
|
365 |
+
if len(candidates) == 1:
|
366 |
+
return candidates[0]
|
367 |
+
|
368 |
+
for c in candidates:
|
369 |
+
if c.definition.expected_model_type == config.model_type:
|
370 |
+
return c
|
371 |
+
|
372 |
+
raise RuntimeError(
|
373 |
+
f"Unsupported model_type {config.model_type} for architecture {arch_name}"
|
374 |
+
)
|
mergekit/card.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2024 Charles O. Goddard
|
2 |
+
#
|
3 |
+
# This software is free software: you can redistribute it and/or
|
4 |
+
# modify it under the terms of the GNU Lesser General Public License as
|
5 |
+
# published by the Free Software Foundation, either version 3 of the
|
6 |
+
# License, or (at your option) any later version.
|
7 |
+
#
|
8 |
+
# This software is distributed in the hope that it will be useful, but
|
9 |
+
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
10 |
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
11 |
+
# Lesser General Public License for more details.
|
12 |
+
#
|
13 |
+
# You should have received a copy of the GNU Lesser General Public License
|
14 |
+
# along with this program. If not, see http://www.gnu.org/licenses/.
|
15 |
+
|
16 |
+
import os
|
17 |
+
from typing import Generator, List, Optional
|
18 |
+
|
19 |
+
import huggingface_hub
|
20 |
+
import yaml
|
21 |
+
from huggingface_hub.utils import HFValidationError
|
22 |
+
from yaml.nodes import SequenceNode as SequenceNode
|
23 |
+
|
24 |
+
from mergekit.config import MergeConfiguration, ModelReference
|
25 |
+
|
26 |
+
CARD_TEMPLATE = """---
|
27 |
+
{metadata}
|
28 |
+
---
|
29 |
+
# {name}
|
30 |
+
|
31 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
32 |
+
|
33 |
+
## Merge Details
|
34 |
+
### Merge Method
|
35 |
+
|
36 |
+
This model was merged using the {merge_method} merge method{base_text}.
|
37 |
+
|
38 |
+
### Models Merged
|
39 |
+
|
40 |
+
The following models were included in the merge:
|
41 |
+
{model_list}
|
42 |
+
|
43 |
+
### Configuration
|
44 |
+
|
45 |
+
The following YAML configuration was used to produce this model:
|
46 |
+
|
47 |
+
```yaml
|
48 |
+
{config_yaml}
|
49 |
+
```
|
50 |
+
"""
|
51 |
+
|
52 |
+
|
53 |
+
def is_hf(path: str) -> bool:
|
54 |
+
"""
|
55 |
+
Determines if the given path is a Hugging Face model repository.
|
56 |
+
|
57 |
+
Args:
|
58 |
+
path: A string path to check.
|
59 |
+
"""
|
60 |
+
if path[0] in "/~" or path.count("/") > 1:
|
61 |
+
return False # definitely a local path
|
62 |
+
if not os.path.exists(path):
|
63 |
+
return True # If path doesn't exist locally, it must be a HF repo
|
64 |
+
try:
|
65 |
+
return huggingface_hub.repo_exists(path, repo_type="model", token=False)
|
66 |
+
except HFValidationError:
|
67 |
+
return False
|
68 |
+
|
69 |
+
|
70 |
+
def extract_hf_paths(models: List[ModelReference]) -> Generator[str, None, None]:
|
71 |
+
"""
|
72 |
+
Yields all valid Hugging Face paths from a list of ModelReference objects.
|
73 |
+
|
74 |
+
Args:
|
75 |
+
models: A list of ModelReference objects.
|
76 |
+
"""
|
77 |
+
for model in models:
|
78 |
+
if is_hf(model.model.path):
|
79 |
+
yield model.model.path
|
80 |
+
|
81 |
+
if model.lora and is_hf(model.lora.path):
|
82 |
+
yield model.lora.path
|
83 |
+
|
84 |
+
|
85 |
+
def method_md(merge_method: str) -> str:
|
86 |
+
"""
|
87 |
+
Returns a markdown string for the given merge method.
|
88 |
+
|
89 |
+
Args:
|
90 |
+
merge_method: A string indicating the merge method used.
|
91 |
+
"""
|
92 |
+
methods = {
|
93 |
+
"linear": "[linear](https://arxiv.org/abs/2203.05482)",
|
94 |
+
"ties": "[TIES](https://arxiv.org/abs/2306.01708)",
|
95 |
+
"slerp": "SLERP",
|
96 |
+
"task_arithmetic": "[task arithmetic](https://arxiv.org/abs/2212.04089)",
|
97 |
+
"dare_ties": "[DARE](https://arxiv.org/abs/2311.03099) [TIES](https://arxiv.org/abs/2306.01708)",
|
98 |
+
"dare_linear": "linear [DARE](https://arxiv.org/abs/2311.03099)",
|
99 |
+
"model_stock": "[Model Stock](https://arxiv.org/abs/2403.19522)",
|
100 |
+
}
|
101 |
+
return methods.get(merge_method, merge_method)
|
102 |
+
|
103 |
+
|
104 |
+
def maybe_link_hf(path: str) -> str:
|
105 |
+
"""
|
106 |
+
Convert a path to a clickable link if it's a Hugging Face model path.
|
107 |
+
|
108 |
+
Args:
|
109 |
+
path: A string path to possibly convert to a link.
|
110 |
+
"""
|
111 |
+
if is_hf(path):
|
112 |
+
return f"[{path}](https://huggingface.co/{path})"
|
113 |
+
return path
|
114 |
+
|
115 |
+
|
116 |
+
def modelref_md(model: ModelReference) -> str:
|
117 |
+
"""
|
118 |
+
Generates markdown description for a ModelReference object.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
model: A ModelReference object.
|
122 |
+
|
123 |
+
Returns:
|
124 |
+
A markdown formatted string describing the model reference.
|
125 |
+
"""
|
126 |
+
text = maybe_link_hf(model.model.path)
|
127 |
+
if model.lora:
|
128 |
+
text += " + " + maybe_link_hf(model.lora.path)
|
129 |
+
return text
|
130 |
+
|
131 |
+
|
132 |
+
def generate_card(
|
133 |
+
config: MergeConfiguration,
|
134 |
+
config_yaml: str,
|
135 |
+
name: Optional[str] = None,
|
136 |
+
) -> str:
|
137 |
+
"""
|
138 |
+
Generates a markdown card for a merged model configuration.
|
139 |
+
|
140 |
+
Args:
|
141 |
+
config: A MergeConfiguration object.
|
142 |
+
config_yaml: YAML source text of the config.
|
143 |
+
name: An optional name for the model.
|
144 |
+
"""
|
145 |
+
if not name:
|
146 |
+
name = "Untitled Model (1)"
|
147 |
+
|
148 |
+
hf_bases = list(extract_hf_paths(config.referenced_models()))
|
149 |
+
tags = ["mergekit", "merge"]
|
150 |
+
|
151 |
+
actual_base = config.base_model
|
152 |
+
if config.merge_method == "slerp":
|
153 |
+
# curse my past self
|
154 |
+
actual_base = None
|
155 |
+
|
156 |
+
base_text = ""
|
157 |
+
if actual_base:
|
158 |
+
base_text = f" using {modelref_md(actual_base)} as a base"
|
159 |
+
|
160 |
+
model_bullets = []
|
161 |
+
for model in config.referenced_models():
|
162 |
+
if model == actual_base:
|
163 |
+
# actual_base is mentioned in base_text - don't include in list
|
164 |
+
continue
|
165 |
+
|
166 |
+
model_bullets.append("* " + modelref_md(model))
|
167 |
+
|
168 |
+
return CARD_TEMPLATE.format(
|
169 |
+
metadata=yaml.dump(
|
170 |
+
{"base_model": hf_bases, "tags": tags, "library_name": "transformers"}
|
171 |
+
),
|
172 |
+
model_list="\n".join(model_bullets),
|
173 |
+
base_text=base_text,
|
174 |
+
merge_method=method_md(config.merge_method),
|
175 |
+
name=name,
|
176 |
+
config_yaml=config_yaml,
|
177 |
+
)
|