Spaces:
Sleeping
Sleeping
RashiAgarwal
commited on
Commit
•
de107f0
1
Parent(s):
bf86a33
Update app.py
Browse files
app.py
CHANGED
@@ -82,8 +82,8 @@ def nanogpt(start:str , max_new_tokens = 500, num_samples =2):
|
|
82 |
output = decode(y[0].tolist())
|
83 |
return output
|
84 |
|
85 |
-
INTERFACE = gr.Interface(fn=nanogpt, inputs=[gr.Textbox(label= "Prompt", value= '
|
86 |
-
gr.Slider(minimum = 300, maximum = 500,
|
87 |
outputs=gr.Text(label= "Generated Text"), title="NanoGPT",
|
88 |
description="NanoGPT is a transformer-based language model with only 10.65 million parameters, trained on a small dataset of Shakespeare work (size: 1MB only). It is trained with character level tokenization with a simple objective: predict the next char, given all of the previous chars within a text.",
|
89 |
examples = [['We know what we are, but know not what we may be',300],
|
|
|
82 |
output = decode(y[0].tolist())
|
83 |
return output
|
84 |
|
85 |
+
INTERFACE = gr.Interface(fn=nanogpt, inputs=[gr.Textbox(label= "Prompt", value= 'All that glisters is not gold.'),
|
86 |
+
gr.Slider(minimum = 300, maximum = 500, value= 300, label= "Maximum number of tokens to be generated")] ,
|
87 |
outputs=gr.Text(label= "Generated Text"), title="NanoGPT",
|
88 |
description="NanoGPT is a transformer-based language model with only 10.65 million parameters, trained on a small dataset of Shakespeare work (size: 1MB only). It is trained with character level tokenization with a simple objective: predict the next char, given all of the previous chars within a text.",
|
89 |
examples = [['We know what we are, but know not what we may be',300],
|