Leonard Püttmann commited on
Commit
122c55f
·
verified ·
1 Parent(s): 663f046

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import spaces
2
  import torch
3
  import gradio as gr
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
@@ -28,13 +28,13 @@ class ModelSingleton:
28
 
29
  model_singleton = ModelSingleton()
30
 
31
- @spaces.GPU(duration=30)
32
  def generate_response_en_it(input_text):
33
  input_ids = model_singleton.tokenizer_en_it("translate English to Italian: " + input_text, return_tensors="pt").input_ids
34
  output = model_singleton.model_en_it.generate(input_ids, max_new_tokens=256)
35
  return model_singleton.tokenizer_en_it.decode(output[0], skip_special_tokens=True)
36
 
37
- @spaces.GPU(duration=30)
38
  def generate_response_it_en(input_text):
39
  input_ids = model_singleton.tokenizer_it_en("translate Italian to English: " + input_text, return_tensors="pt").input_ids
40
  output = model_singleton.model_it_en.generate(input_ids, max_new_tokens=256)
 
1
+ #import spaces
2
  import torch
3
  import gradio as gr
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
 
28
 
29
  model_singleton = ModelSingleton()
30
 
31
+ #@spaces.GPU(duration=30)
32
  def generate_response_en_it(input_text):
33
  input_ids = model_singleton.tokenizer_en_it("translate English to Italian: " + input_text, return_tensors="pt").input_ids
34
  output = model_singleton.model_en_it.generate(input_ids, max_new_tokens=256)
35
  return model_singleton.tokenizer_en_it.decode(output[0], skip_special_tokens=True)
36
 
37
+ #@spaces.GPU(duration=30)
38
  def generate_response_it_en(input_text):
39
  input_ids = model_singleton.tokenizer_it_en("translate Italian to English: " + input_text, return_tensors="pt").input_ids
40
  output = model_singleton.model_it_en.generate(input_ids, max_new_tokens=256)