ldhldh commited on
Commit
8943af7
1 Parent(s): abbd7b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -29
app.py CHANGED
@@ -2,10 +2,10 @@ from threading import Thread
2
 
3
  import torch
4
  import gradio as gr
5
- from transformers import pipeline,AutoTokenizer, AutoModelForCausalLM, BertTokenizer, BertForSequenceClassification, StoppingCriteria, StoppingCriteriaList
6
  from peft import PeftModel, PeftConfig
7
  import re
8
- from kobert_transformers import get_tokenizer
9
 
10
  torch_device = "cuda" if torch.cuda.is_available() else "cpu"
11
  print("Running on device:", torch_device)
@@ -33,29 +33,6 @@ model.eval()
33
  model.config.use_cache = True
34
 
35
 
36
- mbti_bert_model_name = "Lanvizu/fine-tuned-klue-bert-base_model_11"
37
- mbti_bert_model = BertForSequenceClassification.from_pretrained(mbti_bert_model_name)
38
- mbti_bert_model.eval()
39
- mbti_bert_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
40
-
41
- bert_model_name = "ldhldh/bert_YN_small"
42
- bert_model = BertForSequenceClassification.from_pretrained(bert_model_name)
43
- bert_model.eval()
44
- bert_tokenizer = get_tokenizer()
45
-
46
-
47
- def mbti_classify(x):
48
- classifier = pipeline("text-classification", model=mbti_bert_model, tokenizer=mbti_bert_tokenizer, return_all_scores=True)
49
- result = classifier([x])
50
- return result[0]
51
-
52
-
53
- def classify(x):
54
- input_list = bert_tokenizer.batch_encode_plus([x], truncation=True, padding=True, return_tensors='pt')
55
- input_ids = input_list['input_ids'].to(bert_model.device)
56
- attention_masks = input_list['attention_mask'].to(bert_model.device)
57
- outputs = bert_model(input_ids, attention_mask=attention_masks, return_dict=True)
58
- return outputs.logits.argmax(dim=1).cpu().tolist()[0]
59
 
60
  def gen(x, top_p, top_k, temperature, max_new_tokens, repetition_penalty):
61
  gened = model.generate(
@@ -101,8 +78,7 @@ with gr.Blocks() as demo:
101
  )
102
  model_output = gr.Textbox(label="Model output", lines=10, interactive=False)
103
  button_submit = gr.Button(value="Submit")
104
- button_bert = gr.Button(value="bert_Sumit")
105
- button_mbti_bert = gr.Button(value="mbti_bert_Sumit")
106
  with gr.Column(scale=1):
107
  max_new_tokens = gr.Slider(
108
  minimum=1, maximum=200, value=20, step=1, interactive=True, label="Max New Tokens",
@@ -121,6 +97,5 @@ with gr.Blocks() as demo:
121
  )
122
 
123
  button_submit.click(gen, [user_text, top_p, top_k, temperature, max_new_tokens, repetition_penalty], model_output)
124
- button_bert.click(classify, [user_text], model_output)
125
- button_mbti_bert.click(mbti_classify, [user_text], model_output)
126
  demo.queue(max_size=32).launch(enable_queue=True)
 
2
 
3
  import torch
4
  import gradio as gr
5
+ from transformers import pipeline,AutoTokenizer, AutoModelForCausalLM
6
  from peft import PeftModel, PeftConfig
7
  import re
8
+
9
 
10
  torch_device = "cuda" if torch.cuda.is_available() else "cpu"
11
  print("Running on device:", torch_device)
 
33
  model.config.use_cache = True
34
 
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  def gen(x, top_p, top_k, temperature, max_new_tokens, repetition_penalty):
38
  gened = model.generate(
 
78
  )
79
  model_output = gr.Textbox(label="Model output", lines=10, interactive=False)
80
  button_submit = gr.Button(value="Submit")
81
+
 
82
  with gr.Column(scale=1):
83
  max_new_tokens = gr.Slider(
84
  minimum=1, maximum=200, value=20, step=1, interactive=True, label="Max New Tokens",
 
97
  )
98
 
99
  button_submit.click(gen, [user_text, top_p, top_k, temperature, max_new_tokens, repetition_penalty], model_output)
100
+
 
101
  demo.queue(max_size=32).launch(enable_queue=True)