Spaces:
Running
Running
File size: 5,385 Bytes
750020e 11b325d 750020e 80a2f6a 750020e 4c91de3 9fcaecd 750020e 9fcaecd b4f2e1a 9fcaecd 4c91de3 9fcaecd 4c91de3 750020e 4c91de3 05775e4 4c91de3 750020e 05775e4 750020e 05775e4 4c91de3 750020e fac8734 4c91de3 750020e 9e9c47f 750020e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import transformers
import re
from transformers import AutoConfig, AutoTokenizer, AutoModel, AutoModelForCausalLM, pipeline
import torch
import gradio as gr
import json
import os
import shutil
import requests
import pandas as pd
# Define the device
device = "cuda" if torch.cuda.is_available() else "cpu"
editorial_model = "PleIAs/Estienne"
token_classifier = pipeline(
"token-classification", model=editorial_model, aggregation_strategy="simple", device=device
)
tokenizer = AutoTokenizer.from_pretrained(editorial_model, model_max_length=512)
css = """
<style>
.manuscript {
display: flex;
margin-bottom: 20px;
}
.annotation {
width: 30%;
padding-right: 20px;
color: grey;
font-style: italic;
}
.content {
width: 70%;
}
h3 {
margin-top: 0;
}
</style>
"""
# Preprocess the 'word' column
def preprocess_text(text):
# Remove HTML tags
text = re.sub(r'<[^>]+>', '', text)
# Replace newlines with spaces
text = re.sub(r'\n', ' ', text)
# Replace multiple spaces with a single space
text = re.sub(r'\s+', ' ', text)
# Strip leading and trailing whitespace
return text.strip()
def split_text(text, max_tokens=500):
# Split the text by newline characters
parts = text.split("\n")
chunks = []
current_chunk = ""
for part in parts:
# Add part to current chunk
if current_chunk:
temp_chunk = current_chunk + "\n" + part
else:
temp_chunk = part
# Tokenize the temporary chunk
num_tokens = len(tokenizer.tokenize(temp_chunk))
if num_tokens <= max_tokens:
current_chunk = temp_chunk
else:
if current_chunk:
chunks.append(current_chunk)
current_chunk = part
if current_chunk:
chunks.append(current_chunk)
# If no newlines were found and still exceeding max_tokens, split further
if len(chunks) == 1 and len(tokenizer.tokenize(chunks[0])) > max_tokens:
long_text = chunks[0]
chunks = []
while len(tokenizer.tokenize(long_text)) > max_tokens:
split_point = len(long_text) // 2
while split_point < len(long_text) and not re.match(r'\s', long_text[split_point]):
split_point += 1
# Ensure split_point does not go out of range
if split_point >= len(long_text):
split_point = len(long_text) - 1
chunks.append(long_text[:split_point].strip())
long_text = long_text[split_point:].strip()
if long_text:
chunks.append(long_text)
return chunks
def transform_chunks(marianne_segmentation):
marianne_segmentation = pd.DataFrame(marianne_segmentation)
marianne_segmentation = marianne_segmentation[marianne_segmentation['entity_group'] != 'separator']
marianne_segmentation['word'] = marianne_segmentation['word'].astype(str).str.replace('¶', '\n', regex=False)
marianne_segmentation['word'] = marianne_segmentation['word'].astype(str).apply(preprocess_text)
marianne_segmentation = marianne_segmentation[marianne_segmentation['word'].notna() & (marianne_segmentation['word'] != '') & (marianne_segmentation['word'] != ' ')]
html_output = []
for _, row in marianne_segmentation.iterrows():
entity_group = row['entity_group']
word = row['word']
if entity_group == 'title':
html_output.append(f'<div class="manuscript"><div class="annotation">{entity_group}</div><div class="content"><h3>{word}</h3></div></div>')
else:
html_output.append(f'<div class="manuscript"><div class="annotation">{entity_group}</div><div class="content">{word}</div></div>')
final_html = '\n'.join(html_output)
return final_html
# Class to encapsulate the Falcon chatbot
class MistralChatBot:
def __init__(self, system_prompt="Le dialogue suivant est une conversation"):
self.system_prompt = system_prompt
def predict(self, user_message):
editorial_text = re.sub("\n", " ¶ ", user_message)
num_tokens = len(tokenizer.tokenize(editorial_text))
if num_tokens > 500:
batch_prompts = split_text(editorial_text, max_tokens=500)
else:
batch_prompts = [editorial_text]
out = token_classifier(batch_prompts)
out = transform_chunks(out[0])
generated_text = f'{css}<h2 style="text-align:center">Réponse</h2>\n<div class="generation">{out}</div>'
return generated_text
# Create the Falcon chatbot instance
mistral_bot = MistralChatBot()
# Define the Gradio interface
title = "Éditorialisation"
description = "Un outil expérimental d'identification de la structure du texte à partir d'un encoder (Deberta)"
examples = [
[
"Qui peut bénéficier de l'AIP?", # user_message
0.7 # temperature
]
]
demo = gr.Blocks()
with gr.Blocks(theme='JohnSmith9982/small_and_pretty') as demo:
gr.HTML("""<h1 style="text-align:center">Correction d'OCR</h1>""")
text_input = gr.Textbox(label="Votre texte.", type="text", lines=1)
text_button = gr.Button("Identifier les structures éditoriales")
text_output = gr.HTML(label="Le texte corrigé")
text_button.click(mistral_bot.predict, inputs=text_input, outputs=[text_output])
if __name__ == "__main__":
demo.queue().launch() |