Spaces:
Runtime error
Runtime error
Esmail-AGumaan
commited on
Commit
•
33e1004
1
Parent(s):
4ea8388
Upload 6 files
Browse files- engine.py +36 -0
- get-pip.py +0 -0
- main.py +63 -0
- requirements.txt +8 -0
- setup.py +29 -0
- theme.js +31 -0
engine.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from fastapi.responses import FileResponse
|
3 |
+
from fastapi.staticfiles import StaticFiles
|
4 |
+
from pydantic import BaseModel
|
5 |
+
import uvicorn
|
6 |
+
|
7 |
+
from nano_engine import generate_image, apply_blueprint
|
8 |
+
|
9 |
+
app = FastAPI()
|
10 |
+
|
11 |
+
# Mount the 'static' directory to serve JavaScript, CSS, and HTML files
|
12 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
13 |
+
|
14 |
+
class ImageRequest(BaseModel):
|
15 |
+
blueprint_name: str
|
16 |
+
|
17 |
+
@app.post("/generate-image/")
|
18 |
+
async def generate_image_endpoint(request: ImageRequest):
|
19 |
+
# Call the function from engine.py to generate an image
|
20 |
+
sd_prompts, sd_cfg_scale, sd_num_inference_steps, sd_sampler, ollama_model, ollama_prompt = apply_blueprint(request.blueprint_name)
|
21 |
+
print(f"Sampler value: {sd_sampler}") # Add this line for debugging
|
22 |
+
image = generate_image(sd_prompts, sd_cfg_scale, sd_num_inference_steps, sd_sampler)
|
23 |
+
|
24 |
+
# Save the image and return its path
|
25 |
+
image_path = "static/generated_image.png"
|
26 |
+
image.save(image_path)
|
27 |
+
|
28 |
+
return {"image_url": f"/static/generated_image.png"}
|
29 |
+
|
30 |
+
|
31 |
+
@app.get("/static")
|
32 |
+
async def get_homepage():
|
33 |
+
return FileResponse("static/index.html")
|
34 |
+
|
35 |
+
if __name__ == "__main__":
|
36 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
get-pip.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
main.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nanograd
|
2 |
+
|
3 |
+
from nanograd.RL import Cartpole, car # import reinforcement learning package
|
4 |
+
# Cartpole.run()
|
5 |
+
# car.run()
|
6 |
+
|
7 |
+
###############################################################
|
8 |
+
from nanograd.models.stable_diffusion import sd_inference
|
9 |
+
sd_inference.run()
|
10 |
+
|
11 |
+
##############################################################
|
12 |
+
from nanograd.analysis_lab import sentiment_analysis
|
13 |
+
# sentiment_analysis.run()
|
14 |
+
|
15 |
+
############################################################
|
16 |
+
from nanograd import generate_dataset
|
17 |
+
|
18 |
+
# generate_dataset.tokenize()
|
19 |
+
|
20 |
+
###########################################################
|
21 |
+
|
22 |
+
from nanograd.models.llama import inference_llama
|
23 |
+
from nanograd.models.GPT import inference_gpt
|
24 |
+
from nanograd.models.GPT import tokenizer
|
25 |
+
|
26 |
+
# inference_gpt.use_model()
|
27 |
+
|
28 |
+
# inference_llama.use_model()
|
29 |
+
|
30 |
+
# tokenizer.run_tokenizer()
|
31 |
+
###########################################################
|
32 |
+
from nanograd.models import ollama
|
33 |
+
from nanograd.models import chat
|
34 |
+
# ollama.run() # test the model.
|
35 |
+
# chat.chat_with_models()
|
36 |
+
# chat.chat_models()
|
37 |
+
###################################################
|
38 |
+
|
39 |
+
|
40 |
+
# if __name__ == "__main__":
|
41 |
+
# from nanograd.nn.engine import Value
|
42 |
+
|
43 |
+
# a = Value(-4.0)
|
44 |
+
# b = Value(2.0)
|
45 |
+
# c = a + b
|
46 |
+
# d = a + b + b**3
|
47 |
+
# c += c + 1
|
48 |
+
# c += 1 + c + (-a)
|
49 |
+
# d += d * 2 + (b + a).relu()
|
50 |
+
# d += 3 * d + (b - a).relu()
|
51 |
+
# d += 3 * d + (b - a).sigmoid(5)
|
52 |
+
# e = c - d
|
53 |
+
# f = e**2
|
54 |
+
# g = f / 2.0
|
55 |
+
# g += 10.0 / f
|
56 |
+
# print(f'{g.data:.4f}')
|
57 |
+
# g.backward()
|
58 |
+
# print(f'{a.grad:.4f}')
|
59 |
+
# print(f'{b.grad:.4f}')
|
60 |
+
# print(f'{e.grad:.4f}')
|
61 |
+
|
62 |
+
|
63 |
+
# import nanograd.nn.train_nn
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch>=1.9.0
|
2 |
+
torchvision>=0.10.0
|
3 |
+
torchaudio>=0.9.0
|
4 |
+
numpy
|
5 |
+
pandas
|
6 |
+
matplotlib
|
7 |
+
transformers
|
8 |
+
litgpt
|
setup.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup, find_packages
|
2 |
+
|
3 |
+
setup(
|
4 |
+
name='nanograd',
|
5 |
+
version='0.1.0',
|
6 |
+
packages=find_packages(),
|
7 |
+
install_requires=[
|
8 |
+
'torch',
|
9 |
+
'argparse',
|
10 |
+
'tensorboard',
|
11 |
+
'wget',
|
12 |
+
'transformers',
|
13 |
+
'litgpt',
|
14 |
+
'tiktoken',
|
15 |
+
'sentencepiece',
|
16 |
+
'tqdm',
|
17 |
+
'regex',
|
18 |
+
'gradio',
|
19 |
+
'trl',
|
20 |
+
'datasets',
|
21 |
+
'gym',
|
22 |
+
'tinygrad',
|
23 |
+
],
|
24 |
+
entry_points={
|
25 |
+
'console_scripts': [
|
26 |
+
'nanograd=nanograd.nanograd_CLI:main',
|
27 |
+
],
|
28 |
+
},
|
29 |
+
)
|
theme.js
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function createGradioAnimation() {
|
2 |
+
var container = document.createElement('div');
|
3 |
+
container.id = 'gradio-animation';
|
4 |
+
container.style.fontSize = '2em';
|
5 |
+
container.style.fontWeight = 'bold';
|
6 |
+
container.style.textAlign = 'center';
|
7 |
+
container.style.marginBottom = '20px';
|
8 |
+
|
9 |
+
var text = 'Welcome to nanograd Engine!';
|
10 |
+
for (var i = 0; i < text.length; i++) {
|
11 |
+
(function(i){
|
12 |
+
setTimeout(function(){
|
13 |
+
var letter = document.createElement('span');
|
14 |
+
letter.style.opacity = '0';
|
15 |
+
letter.style.transition = 'opacity 0.5s';
|
16 |
+
letter.innerText = text[i];
|
17 |
+
|
18 |
+
container.appendChild(letter);
|
19 |
+
|
20 |
+
setTimeout(function() {
|
21 |
+
letter.style.opacity = '1';
|
22 |
+
}, 50);
|
23 |
+
}, i * 250);
|
24 |
+
})(i);
|
25 |
+
}
|
26 |
+
|
27 |
+
var gradioContainer = document.querySelector('.gradio-container');
|
28 |
+
gradioContainer.insertBefore(container, gradioContainer.firstChild);
|
29 |
+
|
30 |
+
return 'Animation created';
|
31 |
+
}
|