RashiAgarwal commited on
Commit
357686e
1 Parent(s): 762d705

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -10
app.py CHANGED
@@ -11,10 +11,7 @@ def nanogpt(start:str , max_new_tokens = 500, num_samples =2):
11
 
12
  # -----------------------------------------------------------------------------
13
  init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
14
- out_dir = 'out-shakespeare-char' # ignored if init_from is not 'resume'
15
- #start = "God is great. I love Him." #"\n" or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt"
16
- #num_samples = 10 # number of samples to draw
17
- #max_new_tokens = 500 # number of tokens generated in each sample
18
  temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
19
  top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability
20
  seed = 1337
@@ -85,10 +82,8 @@ def nanogpt(start:str , max_new_tokens = 500, num_samples =2):
85
  output = decode(y[0].tolist())
86
  return output
87
 
88
- INTERFACE = gr.Interface(fn=nanogpt, inputs=[gr.Textbox(label= "Prompt"),gr.Slider(300,500, "number", label= "Maximum number of tokens to be geenrated")] , outputs=gr.Text(label= "Generated Text"), title="NanoGPT",
89
- description="NanoGPT is a large transformer-based language model with 10.65 million parameters, trained on a small dataset of Shakespeare work (size: 1MB only). It is trained with character level tokeniation with a simple objective: predict the next char, given all of the previous chars within some text.",
90
- examples = [['We as the new generation AI enginners.',300,1],
91
- ['A forgotten era of humility and happiness',300,2],
92
-
93
- ]
94
  ).launch(debug=True)
 
11
 
12
  # -----------------------------------------------------------------------------
13
  init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
14
+
 
 
 
15
  temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
16
  top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability
17
  seed = 1337
 
82
  output = decode(y[0].tolist())
83
  return output
84
 
85
+ INTERFACE = gr.Interface(fn=nanogpt, inputs=[gr.Textbox(label= "Prompt", value= 'My mind is tossing on the ocean.'),gr.Slider(300,500, "number",value= 250, label= "Maximum number of tokens to be generated")] , outputs=gr.Text(label= "Generated Text"), title="NanoGPT",
86
+ description="NanoGPT is a transformer-based language model with only 10.65 million parameters, trained on a small dataset of Shakespeare work (size: 1MB only). It is trained with character level tokenization with a simple objective: predict the next char, given all of the previous chars within a text.",
87
+ examples = [['We know what we are, but know not what we may be',300],
88
+ ['Sweet are the uses of adversity which, like the toad, ugly and venomous, wears yet a precious jewel in his head',300],]
 
 
89
  ).launch(debug=True)