File size: 5,265 Bytes
e7028c3
5da5ea4
 
138b43d
 
601e777
4835551
a22a765
138b43d
dc7bf5b
 
1b2a8d8
 
 
7c4e0db
5da5ea4
1b2a8d8
5da5ea4
6a3f247
1b2a8d8
dc7bf5b
 
e7028c3
8a4f802
 
5da5ea4
 
 
 
f9a575b
5da5ea4
 
8a4f802
 
 
 
 
d800787
e645169
07e1ae9
3f20d54
 
07e1ae9
 
 
 
 
9de793a
4b5e761
3f20d54
536a523
 
 
 
 
8a4f802
be418d5
f9a575b
f836602
 
 
f9a575b
8a4f802
 
 
 
2dd9fd5
9de793a
8a4f802
 
5da5ea4
 
1b2a8d8
80b9eac
a892af4
 
3f20d54
df42ef1
a892af4
1b2a8d8
b0e790f
536a523
1b2a8d8
 
 
8a4f802
4b5e761
8a4f802
4b5e761
1746d78
 
8a4f802
dc7bf5b
 
c56b678
8a4f802
 
1e16f51
 
 
 
 
 
0eb49bb
1e16f51
0eb49bb
 
696e325
8a4f802
0eb49bb
8a4f802
 
0eb49bb
 
 
80b9eac
 
0eb49bb
 
 
8a4f802
 
0eb49bb
8a4f802
 
 
 
 
 
 
1e16f51
696e325
8a4f802
 
 
 
dc7bf5b
 
8a4f802
80b9eac
8a4f802
 
5cd22ee
a30c8bf
dc7bf5b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import gradio as gr

import torch
from diffusers import DDPMPipeline, DDIMPipeline, PNDMPipeline

MODEL_ID = "verkaDerkaDerk/tiki-based-128"
MODEL_ID = "verkaDerkaDerk/tiki-64"
PIPELINE = DDPMPipeline.from_pretrained(MODEL_ID)

#############################################################################

def tiki(batch_size=1,seed=0):
    generator = torch.manual_seed(seed)
    return PIPELINE(generator=generator, batch_size=batch_size)["sample"]
    
def imagine4():
    return tiki(batch_size=4)
    
def imagine():
    return tiki()[0]
    
#############################################################################    

def fancy():
    # try some https://huggingface.co/spaces/stabilityai/stable-diffusion/blob/main/app.py trix
    css = '''
        .output_image   {height: 40rem !important; width: 100% !important;  } 
        .object-contain {height: 256px !important; width: 256px !important; }
        #gallery img    { height: 256px !important; width: 256px !important; }
        #.center        { text-align: center; }  
    '''
    block = gr.Blocks(css=css)
    with block:
        gr.HTML('''
            <pre>
            This is an unconditioned diffusion model trained on around 500 
            miscellaneous tiki images from around the web.
                
            It was trained for around 4k epochs with a loss around 1.5%.
            The 64x64 version (used here) took around 12s/epoch.

            Despite the loss staying about the same, the visual quality
            continue[sd] to improve. Occasionally, the tiki generated 
            require some suspension of disbelief. 
            
            More training in the near future, but the current model is 
            good enough to provide some limited play value.
                
            Image generation is slow, from 80s - 120s per image.
            When running 4 images concurrently it takes 4-5 minutes total.
            
            Despite the long wait time, it's more fun to generate 4 at a time.
            
            Different "tiki" values will give different tiki images if you can
            imagine.
            </pre>
             
            <p class="center">
                <center whatever="i know, i know...">
                    <img src="https://freeimghost.net/images/2022/08/23/tiki-600e.md.png" height="256">
                </center>
            </p>
        ''')
        with gr.Group():
            with gr.Box():
                with gr.Row():
                    btn = gr.Button("Generate image")
                       
            gallery = gr.Gallery(
                label="Generated images", show_label=False, elem_id="gallery"
            ).style(grid=[4], height="256")
            
            #btn.click(imagine4, inputs=None, outputs=gallery)
            
            maximum = 4294967296
            seed = torch.randint(maximum,[1])[0].item()
            seed = 2607725669 # lulz
            seed = 917832826
            
            with gr.Row(elem_id="tiki-options"):
                batch_size = gr.Slider(label="image count", minimum=1, maximum=4, value=1, step=1)
                seed = gr.Slider(label="tiki", minimum=0, maximum=maximum, value=seed, step=1)

            btn.click(tiki, inputs=[batch_size,seed], outputs=gallery)
 
            gr.HTML('''
                <p>Trained with <a href="https://github.com/huggingface/diffusers">huggingface/diffusers</a>.</p>
            ''')

    #block.queue(max_size=40).launch()
    block.queue().launch()

#############################################################################

def plain():
    # trix from https://tmabraham.github.io/blog/gradio_hf_spaces_tutorial
    title = "Tiki Diffusion Model"
    description = '''
        Diffusion model trained on random tiki images. 
        FIXME:
            - runs very slow 120s - 240s
            - image is weirdly stretched
    '''
    article = gr.HTML('''
        <p>Trained with <a href="https://github.com/huggingface/diffusers">diffusers</a>.</p>
    ''')
    
    # ValueError: The parameter `examples` must either be a string directory or a list(if there is only 1 input component) or (more generally), a nested list, where each sublist represents a set of inputs.
    examples = ['tiki-600e.png']
    
    interpretation = 'default' # no idea...
    enable_queue = True
    
    # https://github.com/gradio-app/gradio/issues/287
    css = '''
        .output_image   {height: 40rem !important; width: 100% !important;  } 
        .object-contain {height: 256px !important; width: 256px !important; }
    '''
    # css = ".output-image, .input-image, .image-preview {height: 600px !important}"
    
    inputs = None
    outputs = "pil"
    
    gr.Interface(
        fn=imagine, 
        inputs=inputs,     
        outputs=outputs,
        title=title,
        description=description,
        article=article,
        css=css,
        #examples=examples,
        interpretation=interpretation,
        enable_queue=enable_queue
    ).launch()

#############################################################################

def main():
    if True:
        return fancy()
    plain()

        
main()

# EOF
#############################################################################