File size: 2,930 Bytes
ad4fcaa
 
 
 
 
 
 
 
 
 
d630363
ad4fcaa
 
294d474
ad4fcaa
703084a
ad4fcaa
 
f05e9cf
0e016e5
621839f
 
e05c142
00a296f
6e2b814
2287bc6
6e2b814
00a296f
9e00db0
c62c176
00a296f
e7cbd9e
 
f85173a
bce1681
73076cf
f85173a
 
 
 
 
 
f05e9cf
f85173a
 
 
 
 
 
13e9a8c
f85173a
 
 
 
7092ff6
ad4fcaa
bce1681
9495165
 
ad4fcaa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse

import platform
import subprocess
import logging
import urllib.request
import os
import json
import uuid

import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler, EulerDiscreteScheduler

app = FastAPI() 

@app.get("/generate")
def generate_image(prompt, inference_steps, model):
    torch.cuda.empty_cache()
    print(f"Is CUDA available: {torch.cuda.is_available()}")

    pipeline = StableDiffusionPipeline.from_pretrained(str(model), torch_dtype=torch.float16)
    
    #pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
    #another comment
    pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
    
    pipeline = pipeline.to("cuda")
    image = pipeline(prompt, num_inference_steps=int(inference_steps), height=512, width=512).images[0]

    filename = str(uuid.uuid4()) + ".jpg"
    image.save(filename)
    
    print(filename)
    
    # Data to be written
    assertion = {
                "assertions": [
                    {
                    "label": "com.truepic.custom.ai",
                    "data": {
                        "model_name": model,
                        "model_version": "1.0",
                        "prompt": prompt
                    }
                    }
                ]
                }
     
    json_object = json.dumps(assertion, indent=4)
    with open("assertion.json", "w") as outfile:
        outfile.write(json_object)
    
    subprocess.check_output(['./truepic-sign', 'sign', filename, '--profile', 'demo', '--assertions', 'assertion.json', '--output', (os.getcwd() + '/static/' + filename)])

    return {"response": filename}


@app.get("/generate-picsum")
def generate_picsum(prompt):
    local_filename, headers = urllib.request.urlretrieve(('https://picsum.photos/id/' + prompt + '/800/800'))

    # Data to be written
    assertion = {
                "assertions": [
                    {
                    "label": "com.truepic.custom.ai",
                    "data": {
                        "model_name": "Picsum",
                        "model_version": "1.0",
                        "prompt": prompt
                    }
                    }
                ]
                }
    
    json_object = json.dumps(assertion, indent=4)
    with open("assertion.json", "w") as outfile:
        outfile.write(json_object)
    
    subprocess.check_output(['./truepic-sign', 'sign', local_filename, '--profile', 'demo', '--assertions', 'assertion.json', '--output', (os.getcwd() + '/static/output.jpg')])
    return {"response": "success"}

app.mount("/", StaticFiles(directory="static", html=True), name="static")

@app.get("/")
def index() -> FileResponse:
    return FileResponse(path="/app/static/index.html", media_type="text/html")