RashiAgarwal commited on
Commit
ab8dd0d
1 Parent(s): 357686e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -82,7 +82,9 @@ def nanogpt(start:str , max_new_tokens = 500, num_samples =2):
82
  output = decode(y[0].tolist())
83
  return output
84
 
85
- INTERFACE = gr.Interface(fn=nanogpt, inputs=[gr.Textbox(label= "Prompt", value= 'My mind is tossing on the ocean.'),gr.Slider(300,500, "number",value= 250, label= "Maximum number of tokens to be generated")] , outputs=gr.Text(label= "Generated Text"), title="NanoGPT",
 
 
86
  description="NanoGPT is a transformer-based language model with only 10.65 million parameters, trained on a small dataset of Shakespeare work (size: 1MB only). It is trained with character level tokenization with a simple objective: predict the next char, given all of the previous chars within a text.",
87
  examples = [['We know what we are, but know not what we may be',300],
88
  ['Sweet are the uses of adversity which, like the toad, ugly and venomous, wears yet a precious jewel in his head',300],]
 
82
  output = decode(y[0].tolist())
83
  return output
84
 
85
+ INTERFACE = gr.Interface(fn=nanogpt, inputs=[gr.Textbox(label= "Prompt", value= 'My mind is tossing on the ocean.'),
86
+ gr.Slider(minimum = 300, maximum = 500, "number",value= 300, label= "Maximum number of tokens to be generated")] ,
87
+ outputs=gr.Text(label= "Generated Text"), title="NanoGPT",
88
  description="NanoGPT is a transformer-based language model with only 10.65 million parameters, trained on a small dataset of Shakespeare work (size: 1MB only). It is trained with character level tokenization with a simple objective: predict the next char, given all of the previous chars within a text.",
89
  examples = [['We know what we are, but know not what we may be',300],
90
  ['Sweet are the uses of adversity which, like the toad, ugly and venomous, wears yet a precious jewel in his head',300],]