Spaces:
Sleeping
Sleeping
pengdaqian
commited on
Commit
•
cb34746
0
Parent(s):
init scan
Browse files- .idea/.gitignore +8 -0
- .idea/inspectionProfiles/Project_Default.xml +14 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +4 -0
- .idea/model_scan.iml +8 -0
- .idea/modules.xml +8 -0
- Dockerfile +15 -0
- model.py +48 -0
- pure_blake3.py +353 -0
- requirements.txt +6 -0
- scan.py +76 -0
- scan_convert.py +4 -0
- scan_hash.py +68 -0
- scan_import.py +0 -0
- scan_main.py +36 -0
- test_main.http +11 -0
.idea/.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Default ignored files
|
2 |
+
/shelf/
|
3 |
+
/workspace.xml
|
4 |
+
# Editor-based HTTP Client requests
|
5 |
+
/httpRequests/
|
6 |
+
# Datasource local storage ignored files
|
7 |
+
/dataSources/
|
8 |
+
/dataSources.local.xml
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<profile version="1.0">
|
3 |
+
<option name="myName" value="Project Default" />
|
4 |
+
<inspection_tool class="Eslint" enabled="true" level="WARNING" enabled_by_default="true" />
|
5 |
+
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
6 |
+
<option name="ignoredErrors">
|
7 |
+
<list>
|
8 |
+
<option value="N806" />
|
9 |
+
<option value="N802" />
|
10 |
+
</list>
|
11 |
+
</option>
|
12 |
+
</inspection_tool>
|
13 |
+
</profile>
|
14 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/misc.xml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="$USER_HOME$/miniconda3" project-jdk-type="Python SDK" />
|
4 |
+
</project>
|
.idea/model_scan.iml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<module type="PYTHON_MODULE" version="4">
|
3 |
+
<component name="NewModuleRootManager">
|
4 |
+
<content url="file://$MODULE_DIR$" />
|
5 |
+
<orderEntry type="inheritedJdk" />
|
6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
7 |
+
</component>
|
8 |
+
</module>
|
.idea/modules.xml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectModuleManager">
|
4 |
+
<modules>
|
5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/model_scan.iml" filepath="$PROJECT_DIR$/.idea/model_scan.iml" />
|
6 |
+
</modules>
|
7 |
+
</component>
|
8 |
+
</project>
|
Dockerfile
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM clamav/clamav:stable
|
2 |
+
|
3 |
+
RUN apk update && apk add --no-cache \
|
4 |
+
python3 \
|
5 |
+
py3-pip \
|
6 |
+
&& rm -rf /var/cache/apk/*
|
7 |
+
|
8 |
+
WORKDIR /app
|
9 |
+
|
10 |
+
COPY requirements.txt requirements.txt
|
11 |
+
RUN pip3 install -r requirements.txt
|
12 |
+
|
13 |
+
COPY . .
|
14 |
+
|
15 |
+
CMD ["sh", "-c", "freshclam && python3 scan_main.py"]
|
model.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional
|
2 |
+
|
3 |
+
from pydantic import BaseModel
|
4 |
+
|
5 |
+
|
6 |
+
class ImageScanRequest(BaseModel):
|
7 |
+
imageId: int
|
8 |
+
url: str
|
9 |
+
wait: bool
|
10 |
+
scans: List[int]
|
11 |
+
callbackUrl: str
|
12 |
+
|
13 |
+
|
14 |
+
class ImageScanTag(BaseModel):
|
15 |
+
type: str
|
16 |
+
name: str
|
17 |
+
|
18 |
+
|
19 |
+
class ImageScanResponse(BaseModel):
|
20 |
+
ok: bool
|
21 |
+
error: str
|
22 |
+
deleted: bool
|
23 |
+
blockedFor: List[str]
|
24 |
+
tags: List[ImageScanTag]
|
25 |
+
|
26 |
+
|
27 |
+
class ImageTag(BaseModel):
|
28 |
+
tag: str
|
29 |
+
id: Optional[int]
|
30 |
+
confidence: int
|
31 |
+
|
32 |
+
|
33 |
+
class ImageScanCallbackRequest(BaseModel):
|
34 |
+
id: int
|
35 |
+
isValid: bool
|
36 |
+
tags: List[ImageTag]
|
37 |
+
|
38 |
+
|
39 |
+
class ModelScanRequest(BaseModel):
|
40 |
+
callbackUrl: str
|
41 |
+
fileUrl: str
|
42 |
+
lowPriority: bool
|
43 |
+
tasks: List[str]
|
44 |
+
|
45 |
+
|
46 |
+
class ModelScanResponse(BaseModel):
|
47 |
+
ok: bool
|
48 |
+
error: str
|
pure_blake3.py
ADDED
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/env python3
|
2 |
+
|
3 |
+
# This is a Python port of the Rust reference implementation of BLAKE3:
|
4 |
+
# https://github.com/BLAKE3-team/BLAKE3/blob/master/reference_impl/reference_impl.rs
|
5 |
+
|
6 |
+
from __future__ import annotations
|
7 |
+
from dataclasses import dataclass
|
8 |
+
|
9 |
+
OUT_LEN = 32
|
10 |
+
KEY_LEN = 32
|
11 |
+
BLOCK_LEN = 64
|
12 |
+
CHUNK_LEN = 1024
|
13 |
+
|
14 |
+
CHUNK_START = 1 << 0
|
15 |
+
CHUNK_END = 1 << 1
|
16 |
+
PARENT = 1 << 2
|
17 |
+
ROOT = 1 << 3
|
18 |
+
KEYED_HASH = 1 << 4
|
19 |
+
DERIVE_KEY_CONTEXT = 1 << 5
|
20 |
+
DERIVE_KEY_MATERIAL = 1 << 6
|
21 |
+
|
22 |
+
IV = [
|
23 |
+
0x6A09E667,
|
24 |
+
0xBB67AE85,
|
25 |
+
0x3C6EF372,
|
26 |
+
0xA54FF53A,
|
27 |
+
0x510E527F,
|
28 |
+
0x9B05688C,
|
29 |
+
0x1F83D9AB,
|
30 |
+
0x5BE0CD19,
|
31 |
+
]
|
32 |
+
|
33 |
+
MSG_PERMUTATION = [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8]
|
34 |
+
|
35 |
+
|
36 |
+
def mask32(x: int) -> int:
|
37 |
+
return x & 0xFFFFFFFF
|
38 |
+
|
39 |
+
|
40 |
+
def add32(x: int, y: int) -> int:
|
41 |
+
return mask32(x + y)
|
42 |
+
|
43 |
+
|
44 |
+
def rightrotate32(x: int, n: int) -> int:
|
45 |
+
return mask32(x << (32 - n)) | (x >> n)
|
46 |
+
|
47 |
+
|
48 |
+
# The mixing function, G, which mixes either a column or a diagonal.
|
49 |
+
def g(state: list[int], a: int, b: int, c: int, d: int, mx: int, my: int) -> None:
|
50 |
+
state[a] = add32(state[a], add32(state[b], mx))
|
51 |
+
state[d] = rightrotate32(state[d] ^ state[a], 16)
|
52 |
+
state[c] = add32(state[c], state[d])
|
53 |
+
state[b] = rightrotate32(state[b] ^ state[c], 12)
|
54 |
+
state[a] = add32(state[a], add32(state[b], my))
|
55 |
+
state[d] = rightrotate32(state[d] ^ state[a], 8)
|
56 |
+
state[c] = add32(state[c], state[d])
|
57 |
+
state[b] = rightrotate32(state[b] ^ state[c], 7)
|
58 |
+
|
59 |
+
|
60 |
+
def round(state: list[int], m: list[int]) -> None:
|
61 |
+
# Mix the columns.
|
62 |
+
g(state, 0, 4, 8, 12, m[0], m[1])
|
63 |
+
g(state, 1, 5, 9, 13, m[2], m[3])
|
64 |
+
g(state, 2, 6, 10, 14, m[4], m[5])
|
65 |
+
g(state, 3, 7, 11, 15, m[6], m[7])
|
66 |
+
# Mix the diagonals.
|
67 |
+
g(state, 0, 5, 10, 15, m[8], m[9])
|
68 |
+
g(state, 1, 6, 11, 12, m[10], m[11])
|
69 |
+
g(state, 2, 7, 8, 13, m[12], m[13])
|
70 |
+
g(state, 3, 4, 9, 14, m[14], m[15])
|
71 |
+
|
72 |
+
|
73 |
+
def permute(m: list[int]) -> None:
|
74 |
+
original = list(m)
|
75 |
+
for i in range(16):
|
76 |
+
m[i] = original[MSG_PERMUTATION[i]]
|
77 |
+
|
78 |
+
|
79 |
+
def compress(
|
80 |
+
chaining_value: list[int],
|
81 |
+
block_words: list[int],
|
82 |
+
counter: int,
|
83 |
+
block_len: int,
|
84 |
+
flags: int,
|
85 |
+
) -> list[int]:
|
86 |
+
state = [
|
87 |
+
chaining_value[0],
|
88 |
+
chaining_value[1],
|
89 |
+
chaining_value[2],
|
90 |
+
chaining_value[3],
|
91 |
+
chaining_value[4],
|
92 |
+
chaining_value[5],
|
93 |
+
chaining_value[6],
|
94 |
+
chaining_value[7],
|
95 |
+
IV[0],
|
96 |
+
IV[1],
|
97 |
+
IV[2],
|
98 |
+
IV[3],
|
99 |
+
mask32(counter),
|
100 |
+
mask32(counter >> 32),
|
101 |
+
block_len,
|
102 |
+
flags,
|
103 |
+
]
|
104 |
+
|
105 |
+
assert len(block_words) == 16
|
106 |
+
block = list(block_words)
|
107 |
+
|
108 |
+
round(state, block) # round 1
|
109 |
+
permute(block)
|
110 |
+
round(state, block) # round 2
|
111 |
+
permute(block)
|
112 |
+
round(state, block) # round 3
|
113 |
+
permute(block)
|
114 |
+
round(state, block) # round 4
|
115 |
+
permute(block)
|
116 |
+
round(state, block) # round 5
|
117 |
+
permute(block)
|
118 |
+
round(state, block) # round 6
|
119 |
+
permute(block)
|
120 |
+
round(state, block) # round 7
|
121 |
+
|
122 |
+
for i in range(8):
|
123 |
+
state[i] ^= state[i + 8]
|
124 |
+
state[i + 8] ^= chaining_value[i]
|
125 |
+
|
126 |
+
return state
|
127 |
+
|
128 |
+
|
129 |
+
def words_from_little_endian_bytes(b: bytes) -> list[int]:
|
130 |
+
assert len(b) % 4 == 0
|
131 |
+
return [int.from_bytes(b[i : i + 4], "little") for i in range(0, len(b), 4)]
|
132 |
+
|
133 |
+
|
134 |
+
# Each chunk or parent node can produce either an 8-word chaining value or, by
|
135 |
+
# setting the ROOT flag, any number of final output bytes. The Output struct
|
136 |
+
# captures the state just prior to choosing between those two possibilities.
|
137 |
+
@dataclass
|
138 |
+
class Output:
|
139 |
+
input_chaining_value: list[int]
|
140 |
+
block_words: list[int]
|
141 |
+
counter: int
|
142 |
+
block_len: int
|
143 |
+
flags: int
|
144 |
+
|
145 |
+
def chaining_value(self) -> list[int]:
|
146 |
+
return compress(
|
147 |
+
self.input_chaining_value,
|
148 |
+
self.block_words,
|
149 |
+
self.counter,
|
150 |
+
self.block_len,
|
151 |
+
self.flags,
|
152 |
+
)[:8]
|
153 |
+
|
154 |
+
def root_output_bytes(self, length: int) -> bytes:
|
155 |
+
output_bytes = bytearray()
|
156 |
+
i = 0
|
157 |
+
while i < length:
|
158 |
+
words = compress(
|
159 |
+
self.input_chaining_value,
|
160 |
+
self.block_words,
|
161 |
+
i // 64,
|
162 |
+
self.block_len,
|
163 |
+
self.flags | ROOT,
|
164 |
+
)
|
165 |
+
# The output length might not be a multiple of 4.
|
166 |
+
for word in words:
|
167 |
+
word_bytes = word.to_bytes(4, "little")
|
168 |
+
take = min(len(word_bytes), length - i)
|
169 |
+
output_bytes.extend(word_bytes[:take])
|
170 |
+
i += take
|
171 |
+
return output_bytes
|
172 |
+
|
173 |
+
|
174 |
+
@dataclass
|
175 |
+
class ChunkState:
|
176 |
+
chaining_value: list[int]
|
177 |
+
chunk_counter: int
|
178 |
+
block: bytearray
|
179 |
+
block_len: int
|
180 |
+
blocks_compressed: int
|
181 |
+
flags: int
|
182 |
+
|
183 |
+
def __init__(self, key_words: list[int], chunk_counter: int, flags: int) -> None:
|
184 |
+
self.chaining_value = key_words
|
185 |
+
self.chunk_counter = chunk_counter
|
186 |
+
self.block = bytearray(BLOCK_LEN)
|
187 |
+
self.block_len = 0
|
188 |
+
self.blocks_compressed = 0
|
189 |
+
self.flags = flags
|
190 |
+
|
191 |
+
def len(self) -> int:
|
192 |
+
return BLOCK_LEN * self.blocks_compressed + self.block_len
|
193 |
+
|
194 |
+
def start_flag(self) -> int:
|
195 |
+
if self.blocks_compressed == 0:
|
196 |
+
return CHUNK_START
|
197 |
+
else:
|
198 |
+
return 0
|
199 |
+
|
200 |
+
def update(self, input_bytes: bytes) -> None:
|
201 |
+
while input_bytes:
|
202 |
+
# If the block buffer is full, compress it and clear it. More
|
203 |
+
# input_bytes is coming, so this compression is not CHUNK_END.
|
204 |
+
if self.block_len == BLOCK_LEN:
|
205 |
+
block_words = words_from_little_endian_bytes(self.block)
|
206 |
+
self.chaining_value = compress(
|
207 |
+
self.chaining_value,
|
208 |
+
block_words,
|
209 |
+
self.chunk_counter,
|
210 |
+
BLOCK_LEN,
|
211 |
+
self.flags | self.start_flag(),
|
212 |
+
)[:8]
|
213 |
+
self.blocks_compressed += 1
|
214 |
+
self.block = bytearray(BLOCK_LEN)
|
215 |
+
self.block_len = 0
|
216 |
+
|
217 |
+
# Copy input bytes into the block buffer.
|
218 |
+
want = BLOCK_LEN - self.block_len
|
219 |
+
take = min(want, len(input_bytes))
|
220 |
+
self.block[self.block_len : self.block_len + take] = input_bytes[:take]
|
221 |
+
self.block_len += take
|
222 |
+
input_bytes = input_bytes[take:]
|
223 |
+
|
224 |
+
def output(self) -> Output:
|
225 |
+
block_words = words_from_little_endian_bytes(self.block)
|
226 |
+
return Output(
|
227 |
+
self.chaining_value,
|
228 |
+
block_words,
|
229 |
+
self.chunk_counter,
|
230 |
+
self.block_len,
|
231 |
+
self.flags | self.start_flag() | CHUNK_END,
|
232 |
+
)
|
233 |
+
|
234 |
+
|
235 |
+
def parent_output(
|
236 |
+
left_child_cv: list[int],
|
237 |
+
right_child_cv: list[int],
|
238 |
+
key_words: list[int],
|
239 |
+
flags: int,
|
240 |
+
) -> Output:
|
241 |
+
return Output(
|
242 |
+
key_words, left_child_cv + right_child_cv, 0, BLOCK_LEN, PARENT | flags
|
243 |
+
)
|
244 |
+
|
245 |
+
|
246 |
+
def parent_cv(
|
247 |
+
left_child_cv: list[int],
|
248 |
+
right_child_cv: list[int],
|
249 |
+
key_words: list[int],
|
250 |
+
flags: int,
|
251 |
+
) -> list[int]:
|
252 |
+
return parent_output(
|
253 |
+
left_child_cv, right_child_cv, key_words, flags
|
254 |
+
).chaining_value()
|
255 |
+
|
256 |
+
|
257 |
+
# An incremental hasher that can accept any number of writes.
|
258 |
+
@dataclass
|
259 |
+
class Hasher:
|
260 |
+
chunk_state: ChunkState
|
261 |
+
key_words: list[int]
|
262 |
+
cv_stack: list[list[int]]
|
263 |
+
flags: int
|
264 |
+
|
265 |
+
def _init(self, key_words: list[int], flags: int) -> None:
|
266 |
+
assert len(key_words) == 8
|
267 |
+
self.chunk_state = ChunkState(key_words, 0, flags)
|
268 |
+
self.key_words = key_words
|
269 |
+
self.cv_stack = []
|
270 |
+
self.flags = flags
|
271 |
+
|
272 |
+
# Construct a new `Hasher` for the regular hash function.
|
273 |
+
def __init__(self) -> None:
|
274 |
+
self._init(IV, 0)
|
275 |
+
|
276 |
+
# Construct a new `Hasher` for the keyed hash function.
|
277 |
+
@classmethod
|
278 |
+
def new_keyed(cls, key: bytes) -> Hasher:
|
279 |
+
keyed_hasher = cls()
|
280 |
+
key_words = words_from_little_endian_bytes(key)
|
281 |
+
keyed_hasher._init(key_words, KEYED_HASH)
|
282 |
+
return keyed_hasher
|
283 |
+
|
284 |
+
# Construct a new `Hasher` for the key derivation function. The context
|
285 |
+
# string should be hardcoded, globally unique, and application-specific.
|
286 |
+
@classmethod
|
287 |
+
def new_derive_key(cls, context: str) -> Hasher:
|
288 |
+
context_hasher = cls()
|
289 |
+
context_hasher._init(IV, DERIVE_KEY_CONTEXT)
|
290 |
+
context_hasher.update(context.encode("utf8"))
|
291 |
+
context_key = context_hasher.finalize(KEY_LEN)
|
292 |
+
context_key_words = words_from_little_endian_bytes(context_key)
|
293 |
+
derive_key_hasher = cls()
|
294 |
+
derive_key_hasher._init(context_key_words, DERIVE_KEY_MATERIAL)
|
295 |
+
return derive_key_hasher
|
296 |
+
|
297 |
+
# Section 5.1.2 of the BLAKE3 spec explains this algorithm in more detail.
|
298 |
+
def add_chunk_chaining_value(self, new_cv: list[int], total_chunks: int) -> None:
|
299 |
+
# This chunk might complete some subtrees. For each completed subtree,
|
300 |
+
# its left child will be the current top entry in the CV stack, and
|
301 |
+
# its right child will be the current value of `new_cv`. Pop each left
|
302 |
+
# child off the stack, merge it with `new_cv`, and overwrite `new_cv`
|
303 |
+
# with the result. After all these merges, push the final value of
|
304 |
+
# `new_cv` onto the stack. The number of completed subtrees is given
|
305 |
+
# by the number of trailing 0-bits in the new total number of chunks.
|
306 |
+
while total_chunks & 1 == 0:
|
307 |
+
new_cv = parent_cv(self.cv_stack.pop(), new_cv, self.key_words, self.flags)
|
308 |
+
total_chunks >>= 1
|
309 |
+
self.cv_stack.append(new_cv)
|
310 |
+
|
311 |
+
# Add input to the hash state. This can be called any number of times.
|
312 |
+
def update(self, input_bytes: bytes) -> None:
|
313 |
+
while input_bytes:
|
314 |
+
# If the current chunk is complete, finalize it and reset the
|
315 |
+
# chunk state. More input is coming, so this chunk is not ROOT.
|
316 |
+
if self.chunk_state.len() == CHUNK_LEN:
|
317 |
+
chunk_cv = self.chunk_state.output().chaining_value()
|
318 |
+
total_chunks = self.chunk_state.chunk_counter + 1
|
319 |
+
self.add_chunk_chaining_value(chunk_cv, total_chunks)
|
320 |
+
self.chunk_state = ChunkState(self.key_words, total_chunks, self.flags)
|
321 |
+
|
322 |
+
# Compress input bytes into the current chunk state.
|
323 |
+
want = CHUNK_LEN - self.chunk_state.len()
|
324 |
+
take = min(want, len(input_bytes))
|
325 |
+
self.chunk_state.update(input_bytes[:take])
|
326 |
+
input_bytes = input_bytes[take:]
|
327 |
+
|
328 |
+
# Finalize the hash and write any number of output bytes.
|
329 |
+
def finalize(self, length: int = OUT_LEN) -> bytes:
|
330 |
+
# Starting with the Output from the current chunk, compute all the
|
331 |
+
# parent chaining values along the right edge of the tree, until we
|
332 |
+
# have the root Output.
|
333 |
+
output = self.chunk_state.output()
|
334 |
+
parent_nodes_remaining = len(self.cv_stack)
|
335 |
+
while parent_nodes_remaining > 0:
|
336 |
+
parent_nodes_remaining -= 1
|
337 |
+
output = parent_output(
|
338 |
+
self.cv_stack[parent_nodes_remaining],
|
339 |
+
output.chaining_value(),
|
340 |
+
self.key_words,
|
341 |
+
self.flags,
|
342 |
+
)
|
343 |
+
return output.root_output_bytes(length)
|
344 |
+
|
345 |
+
|
346 |
+
# If this file is executed directly, hash standard input.
|
347 |
+
if __name__ == "__main__":
|
348 |
+
import sys
|
349 |
+
|
350 |
+
hasher = Hasher()
|
351 |
+
while buf := sys.stdin.buffer.read(65536):
|
352 |
+
hasher.update(buf)
|
353 |
+
print(hasher.finalize().hex())
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
crcmod
|
2 |
+
pyclamd
|
3 |
+
uvicorn[standard]
|
4 |
+
fastapi
|
5 |
+
requests
|
6 |
+
picklescan
|
scan.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pyclamd
|
2 |
+
|
3 |
+
from picklescan.scanner import (
|
4 |
+
scan_url,
|
5 |
+
ScanResult, SafetyLevel
|
6 |
+
)
|
7 |
+
|
8 |
+
|
9 |
+
# def scan_file(file_path: str):
|
10 |
+
# ret = scan_pickle_bytes(io.BytesIO(pickle.dumps(file_path)), "file.pkl")
|
11 |
+
# print(ret)
|
12 |
+
|
13 |
+
|
14 |
+
def scan_file(file_path: str):
|
15 |
+
if file_path.startswith("http"):
|
16 |
+
scan_result: ScanResult = scan_url(file_path)
|
17 |
+
else:
|
18 |
+
return None
|
19 |
+
|
20 |
+
globalImports = list(map(lambda x: fmt_import(x.module, x.name), scan_result.globals))
|
21 |
+
dangerousImports = list(map(lambda x: fmt_import(x.module, x.name),
|
22 |
+
filter(lambda x: x.safety == SafetyLevel.Dangerous, scan_result.globals)))
|
23 |
+
if len(dangerousImports) > 0:
|
24 |
+
picklescanExitCode = 1
|
25 |
+
else:
|
26 |
+
picklescanExitCode = 0
|
27 |
+
return {
|
28 |
+
'url': file_path,
|
29 |
+
'fileExists': True,
|
30 |
+
'picklescanExitCode': picklescanExitCode,
|
31 |
+
'picklescanGlobalImports': globalImports,
|
32 |
+
'picklescanDangerousImports': dangerousImports,
|
33 |
+
# 'clamscanExitCode': ScanExitCode,
|
34 |
+
# 'clamscanOutput': string,
|
35 |
+
# hashes: Record < ModelHashType, string >;
|
36 |
+
# conversions: Record < 'safetensors' | 'ckpt', ConversionResult >;
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
+
def init_clamd():
|
41 |
+
clamd = pyclamd.ClamdUnixSocket()
|
42 |
+
return clamd
|
43 |
+
|
44 |
+
|
45 |
+
def clamd_file(file_path: str, clamd):
|
46 |
+
if file_path.startswith("http"):
|
47 |
+
import urllib.request
|
48 |
+
tmp_path = f'/tmp/clamd_{file_path.split("/")[-1]}'
|
49 |
+
urllib.request.urlretrieve(file_path, tmp_path)
|
50 |
+
ret = clamd.scan_file(tmp_path)
|
51 |
+
if ret is None:
|
52 |
+
return {
|
53 |
+
'clamscanExitCode': 0,
|
54 |
+
'clamscanOutput': "No virus found",
|
55 |
+
}
|
56 |
+
elif file_path in ret and len(file_path) > 0:
|
57 |
+
return {
|
58 |
+
'clamscanExitCode': 1,
|
59 |
+
'clamscanOutput': ' '.join(ret[file_path]),
|
60 |
+
}
|
61 |
+
|
62 |
+
|
63 |
+
def fmt_import(module: str, name: str):
|
64 |
+
return f"from ${module} import ${name}",
|
65 |
+
|
66 |
+
|
67 |
+
if __name__ == "__main__":
|
68 |
+
detail = scan_file("https://huggingface.co/yesyeahvh/bad-hands-5/resolve/main/bad-hands-5.pt")
|
69 |
+
clamd_detail = clamd_file("https://huggingface.co/yesyeahvh/bad-hands-5/resolve/main/bad-hands-5.pt")
|
70 |
+
print(detail)
|
71 |
+
print(clamd_detail)
|
72 |
+
# ScanResult(
|
73 |
+
# globals=[Global(module='torch', name='FloatStorage', safety= < SafetyLevel.Innocuous: 'innocuous' >),
|
74 |
+
# Global(module='collections', name='OrderedDict', safety= < SafetyLevel.Innocuous: 'innocuous' >),
|
75 |
+
# Global(module='torch._utils', name='_rebuild_tensor_v2',safety= < SafetyLevel.Innocuous: 'innocuous' >)],
|
76 |
+
# scanned_files = 1, issues_count = 0, infected_files = 0, scan_err = False)
|
scan_convert.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
def convert_and_upload(from_type: str, target_type: str):
|
4 |
+
pass
|
scan_hash.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
import os.path
|
3 |
+
import sys
|
4 |
+
from typing import Dict
|
5 |
+
import binascii
|
6 |
+
import crcmod
|
7 |
+
import pure_blake3
|
8 |
+
|
9 |
+
|
10 |
+
def get_hash_string(hash_bytes: bytes) -> str:
|
11 |
+
return binascii.hexlify(hash_bytes).decode()
|
12 |
+
|
13 |
+
|
14 |
+
def compute_AutoV1Hash(file_stream) -> str:
|
15 |
+
minFileSize = 0x100000 * 2
|
16 |
+
if file_stream.seek(0, 2) < minFileSize:
|
17 |
+
return None
|
18 |
+
file_stream.seek(0x100000)
|
19 |
+
buffer = file_stream.read(0x10000)
|
20 |
+
hashBytes = hashlib.sha256(buffer).digest()
|
21 |
+
hashString = get_hash_string(hashBytes)
|
22 |
+
return hashString[:8]
|
23 |
+
|
24 |
+
|
25 |
+
def ComputeCRC32Hash(file_stream) -> str:
|
26 |
+
crc32 = crcmod.predefined.Crc('crc-32c')
|
27 |
+
file_stream.seek(0)
|
28 |
+
for chunk in iter(lambda: file_stream.read(4096), b""):
|
29 |
+
crc32.update(chunk)
|
30 |
+
return get_hash_string(crc32.digest())
|
31 |
+
|
32 |
+
|
33 |
+
def generate_model_hashes(file_path: str) -> Dict[str, str]:
|
34 |
+
if file_path.startswith("http"):
|
35 |
+
import urllib.request
|
36 |
+
tmp_path = f'/tmp/clamd_{file_path.split("/")[-1]}'
|
37 |
+
if os.path.exists(tmp_path):
|
38 |
+
pass
|
39 |
+
else:
|
40 |
+
urllib.request.urlretrieve(file_path, tmp_path)
|
41 |
+
file_path = tmp_path
|
42 |
+
|
43 |
+
sha256 = hashlib.sha256()
|
44 |
+
blake3Hasher = pure_blake3.Hasher()
|
45 |
+
|
46 |
+
with open(file_path, "rb") as fileStream:
|
47 |
+
for chunk in iter(lambda: fileStream.read(4096), b""):
|
48 |
+
sha256.update(chunk)
|
49 |
+
blake3Hasher.update(chunk)
|
50 |
+
sha256HashString = get_hash_string(sha256.digest())
|
51 |
+
autoV1HashString = compute_AutoV1Hash(open(file_path, "rb"))
|
52 |
+
autoV2HashString = sha256HashString[:10]
|
53 |
+
blake3HashString = blake3Hasher.finalize().hex()
|
54 |
+
crc32HashString = ComputeCRC32Hash(open(file_path, "rb"))
|
55 |
+
|
56 |
+
result = {
|
57 |
+
"SHA256": sha256HashString,
|
58 |
+
"AutoV1": autoV1HashString,
|
59 |
+
"AutoV2": autoV2HashString,
|
60 |
+
"BLAKE3": blake3HashString,
|
61 |
+
"CRC32": crc32HashString,
|
62 |
+
}
|
63 |
+
|
64 |
+
return result
|
65 |
+
|
66 |
+
|
67 |
+
if __name__ == "__main__":
|
68 |
+
print(generate_model_hashes(".gitignore"))
|
scan_import.py
ADDED
File without changes
|
scan_main.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import uvicorn
|
3 |
+
from fastapi import BackgroundTasks, FastAPI
|
4 |
+
import model
|
5 |
+
from scan import init_clamd, scan_file, clamd_file
|
6 |
+
from scan_hash import generate_model_hashes
|
7 |
+
|
8 |
+
app = FastAPI()
|
9 |
+
|
10 |
+
|
11 |
+
def write_scan_model_result(req: model.ModelScanRequest):
|
12 |
+
ret = {}
|
13 |
+
if 'Scan' in req.tasks:
|
14 |
+
detail = scan_file(req.fileUrl)
|
15 |
+
clamd_detail = clamd_file(req.fileUrl, clamd_exec)
|
16 |
+
ret |= detail
|
17 |
+
ret |= clamd_detail
|
18 |
+
if 'Hash' in req.tasks:
|
19 |
+
ret.hashes = generate_model_hashes(req.fileUrl)
|
20 |
+
|
21 |
+
try:
|
22 |
+
requests.post(req.callbackUrl, json=ret)
|
23 |
+
except Exception as ex:
|
24 |
+
print(ex)
|
25 |
+
|
26 |
+
|
27 |
+
@app.post("/model-scan")
|
28 |
+
async def model_scan_handler(req: model.ModelScanRequest, background_tasks: BackgroundTasks):
|
29 |
+
background_tasks.add_task(write_scan_model_result, req)
|
30 |
+
return model.ModelScanResponse(ok=True, error="")
|
31 |
+
|
32 |
+
|
33 |
+
if __name__ == "__main__":
|
34 |
+
global clamd_exec
|
35 |
+
clamd_exec = init_clamd()
|
36 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
test_main.http
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Test your FastAPI endpoints
|
2 |
+
|
3 |
+
GET http://127.0.0.1:8000/
|
4 |
+
Accept: application/json
|
5 |
+
|
6 |
+
###
|
7 |
+
|
8 |
+
GET http://127.0.0.1:8000/hello/User
|
9 |
+
Accept: application/json
|
10 |
+
|
11 |
+
###
|