pablo-rf commited on
Commit
ad2ab25
1 Parent(s): 58dc49a

Initial commit

Browse files
Files changed (3) hide show
  1. README.md +3 -3
  2. app.py +132 -0
  3. requirements.txt +3 -0
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: FLOR 1.3B GL
3
- emoji: 📊
4
- colorFrom: green
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 4.19.2
8
  app_file: app.py
 
1
  ---
2
  title: FLOR 1.3B GL
3
+ emoji: 💐
4
+ colorFrom: blue
5
+ colorTo: white
6
  sdk: gradio
7
  sdk_version: 4.19.2
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio.components import Slider
3
+ import torch
4
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
5
+
6
+ # Model, information and examples ----------------------------------------------
7
+ model_id = "proxectonos/FLOR-1.3B-GL"
8
+ title = "Modelo de xeración de texto FLOR-1.3B-GL"
9
+ markdown_description = """
10
+ # FLOR-1.3B-GL
11
+
12
+ 🪷 **[FLOR-1.3B-GL](https://huggingface.co/proxectonos/FLOR-1.3B-GL)** is a 1.3B parameters multilingual LLM for Galician language.
13
+
14
+ 👀 **Learn more about FLOR-1.3B:** [HF official model card](https://huggingface.co/proxectonos/FLOR-1.3B-GL) and the [Proxecto Nós](https://nos.gal/en/proxecto-nos).
15
+ """
16
+
17
+ short_prompts_examples = [
18
+ ["A receita tradicional das filloas é"],
19
+ ["O neno vivía preto de"]
20
+ ]
21
+
22
+ few_shot_prompts_examples = [
23
+ ["Responde á seguinte pregunta. \nPregunta: \"Cal é a capital de Noruega? \"\nResposta: \"A capital de Noruega é Oslo.\"\n---- \nResponde á seguinte pregunta.\nPregunta: \"Cal é a moeda de Portugal\" \nResposta: \"A moeda de Portugal é o euro.\" \n---- \nResponde á seguinte pregunta. \nPregunta: \"Cal é a capital de Suecia?\"\nResposta:"],
24
+ ["Extrae as entidades nomeadas do seguinte texto: \nTexto: \"Chámome Wolfgang e vivo en Berlin\" \nEntidades: Wolfgang:PER, Berlin:LOC \n ---- \nExtrae as entidades nomeadas do seguinte texto: \nTexto: \"María e Miguel non teñen ningún problema\" \nEntidades: María:PER, Miguel:PER \n---- \nExtrae as entidades nomeadas do seguinte texto: \nTexto: \"O mellor de Barcelona é o bar do meu amigo Pablo\" \nEntidades: Pablo:PER, Barcelona:LOC \n---- \nExtrae as entidades nomeadas do seguinte texto: \nTexto: \"Carlos comparte cuarto con Marc\" \nEntidades:"]
25
+ ]
26
+ fronted_theme = 'Soft'
27
+
28
+ # Model charge ---------------------------------------------------------
29
+ model_id = "proxectonos/FLOR-1.3B-GL"
30
+ generator_model = pipeline("text-generation", model=model_id)
31
+
32
+ # Generation functions ---------------------------------------------------------
33
+ def remove_empty_lines(text):
34
+ lines = text.strip().split("\n")
35
+ non_empty_lines = [line for line in lines if line.strip()]
36
+ return "\n".join(non_empty_lines)
37
+
38
+ def predict(prompt, max_length, repetition_penalty=1.3):
39
+ print("Dentro da xeración...")
40
+ prompt_length = len(generator_model.tokenizer.encode(prompt))
41
+ generated_text = generator_model(
42
+ prompt,
43
+ max_length=prompt_length + max_length,
44
+ pad_token_id=generator_model.tokenizer.eos_token_id,
45
+ repetition_penalty=repetition_penalty)
46
+
47
+ generated_sequence = generated_text[0]['generated_text']
48
+ if generated_sequence is None:
49
+ gr.Warning('Inference endpoint is not available right now. Please try again later.')
50
+ return
51
+
52
+ generated_sequence = remove_empty_lines(generated_sequence)
53
+ print("Xeración completada")
54
+ return generated_sequence
55
+
56
+ # Gradio app ---------------------------------------------------------
57
+ def clear():
58
+ return (
59
+ None,
60
+ None,
61
+ gr.update(value=20),
62
+ gr.update(value=1.3)
63
+ )
64
+ def pass_to_input(generated_gl):
65
+ return (
66
+ gr.update(value=generated_gl),
67
+ None,
68
+ )
69
+
70
+ def gradio_app():
71
+ with gr.Blocks(theme=fronted_theme) as demo:
72
+ with gr.Row():
73
+ with gr.Column(scale=0.1):
74
+ gr.HTML('<img src="https://huggingface.co/spaces/proxectonos/README/resolve/main/title-card.png" width="100%" style="border-radius: 0.75rem;">')
75
+ with gr.Column():
76
+ gr.Markdown(markdown_description)
77
+
78
+ with gr.Row(equal_height=True):
79
+ with gr.Column():
80
+ text_gl = gr.Textbox(label="Input",
81
+ lines=6, placeholder="e.g. O neno vai a escola con ")
82
+ with gr.Row(variant="panel"):
83
+ with gr.Accordion("Model parameters", open=False):
84
+ max_length = Slider(
85
+ minimum=1,
86
+ maximum=200,
87
+ step=1,
88
+ value=30,
89
+ label="Max tokens"
90
+ )
91
+ repetition_penalty = Slider(
92
+ minimum=0.1,
93
+ maximum=4,
94
+ step=0.1,
95
+ value=1.3,
96
+ label="Repetition penalty"
97
+ )
98
+ generator_btn = gr.Button(value="Generate",variant='primary')
99
+ with gr.Column():
100
+ generated_gl = gr.Textbox(label="Output",
101
+ lines=6,
102
+ placeholder="Generated text will appear here",
103
+ interactive=False,
104
+ show_copy_button=True)
105
+ pass_btn = gr.Button(value="Pass text to input")
106
+ clean_btn = gr.Button(value="Clean")
107
+
108
+ generator_btn.click(predict, inputs=[text_gl,max_length, repetition_penalty], outputs=generated_gl, api_name="generate-flor-gl")
109
+ clean_btn.click(fn=clear, inputs=[], outputs=[text_gl, generated_gl, max_length, repetition_penalty], queue=False, api_name=False)
110
+ pass_btn.click(fn=pass_to_input, inputs=[generated_gl], outputs=[text_gl,generated_gl], queue=False, api_name=False)
111
+
112
+ with gr.Row():
113
+ with gr.Column(scale=0.5):
114
+ gr.Examples(
115
+ label = "Short prompts",
116
+ examples = short_prompts_examples,
117
+ inputs = [text_gl,max_length, repetition_penalty],
118
+ outputs = generated_gl,
119
+ fn =predict
120
+ )
121
+ gr.Examples(
122
+ label = "Few-shot prompts",
123
+ examples = few_shot_prompts_examples,
124
+ inputs = [text_gl,max_length, repetition_penalty],
125
+ outputs = generated_gl,
126
+ fn =predict
127
+ )
128
+
129
+ demo.launch()
130
+
131
+ if __name__ == "__main__":
132
+ gradio_app()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ torch
3
+ accelerate