Update README.md
#1
by
avilum
- opened
README.md
CHANGED
@@ -1,3 +1,39 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
+
|
5 |
+
|
6 |
+
# Download the llamafile
|
7 |
+
- Go to https://huggingface.co/aviol/TinyLlama1.1-llamafile-bootstraped/blob/main/TinyLlama-1.1B.llamafile
|
8 |
+
- Download this file using the download button.
|
9 |
+
|
10 |
+
# Run the server
|
11 |
+
```shell
|
12 |
+
chmod +x TinyLlama-1.1B.llamafile
|
13 |
+
|
14 |
+
./TinyLlama-1.1B.llamafile --server --host 0.0.0.0 --port 1234
|
15 |
+
```
|
16 |
+
|
17 |
+
# Use the LLM with OpenAI SDK:
|
18 |
+
```python
|
19 |
+
from openai import OpenAI
|
20 |
+
|
21 |
+
|
22 |
+
client = OpenAI(base_url="http://127.0.0.1:1234/v1", api_key="test")
|
23 |
+
|
24 |
+
# Prompt
|
25 |
+
prompt = "Hi, tell me something new about AppSec"
|
26 |
+
|
27 |
+
# Send API request to llamafile server
|
28 |
+
stream = client.chat.completions.create(
|
29 |
+
model="avi-llmsky",
|
30 |
+
messages=[{"role": "user", "content": prompt}],
|
31 |
+
stream=True,
|
32 |
+
)
|
33 |
+
|
34 |
+
# Print the responses
|
35 |
+
for chunk in stream:
|
36 |
+
if chunk.choices[0].delta.content is not None:
|
37 |
+
print(chunk.choices[0].delta.content, end="")
|
38 |
+
|
39 |
+
```
|