prithivMLmods commited on
Commit
310fc8c
Β·
verified Β·
1 Parent(s): 246bfe9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -92
app.py CHANGED
@@ -1,6 +1,5 @@
 
1
  import gradio as gr
2
- from openai import OpenAI
3
- import os
4
  from fpdf import FPDF
5
  from docx import Document
6
 
@@ -12,106 +11,85 @@ footer {
12
  }
13
  '''
14
 
15
- ACCESS_TOKEN = os.getenv("HF_TOKEN")
16
 
17
- client = OpenAI(
18
- base_url="https://api-inference.huggingface.co/v1/",
19
- api_key=ACCESS_TOKEN,
20
- )
 
 
 
 
 
21
 
22
- def respond(
23
- message,
24
- history: list[tuple[str, str]],
25
- system_message,
26
- max_tokens,
27
- temperature,
28
- top_p,
29
- ):
30
- messages = [{"role": "system", "content": system_message}]
31
-
32
- for val in history:
33
- if val[0]:
34
- messages.append({"role": "user", "content": val[0]})
35
- if val[1]:
36
- messages.append({"role": "assistant", "content": val[1]})
37
-
38
- messages.append({"role": "user", "content": message})
39
-
40
- response = ""
41
-
42
- for message in client.chat.completions.create(
43
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
44
- max_tokens=max_tokens,
45
- stream=True,
46
- temperature=temperature,
47
- top_p=top_p,
48
- messages=messages,
49
- ):
50
- token = message.choices[0].delta.content
51
- response += token
52
- yield response
53
-
54
- def save_to_file(history, file_format):
55
- if file_format == "PDF":
56
  pdf = FPDF()
57
  pdf.add_page()
58
  pdf.set_auto_page_break(auto=True, margin=15)
59
  pdf.set_font("Arial", size=12)
60
- for user_message, assistant_message in history:
61
- pdf.multi_cell(0, 10, f"User: {user_message}")
62
- pdf.multi_cell(0, 10, f"Assistant: {assistant_message}")
63
- file_name = "chat_history.pdf"
64
- pdf.output(file_name)
65
-
66
- elif file_format == "DOCX":
67
  doc = Document()
68
- for user_message, assistant_message in history:
69
- doc.add_paragraph(f"User: {user_message}")
70
- doc.add_paragraph(f"Assistant: {assistant_message}")
71
- file_name = "chat_history.docx"
72
- doc.save(file_name)
73
-
74
- elif file_format == "TXT":
75
- file_name = "chat_history.txt"
76
- with open(file_name, "w") as file:
77
- for user_message, assistant_message in history:
78
- file.write(f"User: {user_message}\n")
79
- file.write(f"Assistant: {assistant_message}\n")
80
-
81
- return file_name
82
 
83
- # Gradio Interface Setup
84
- with gr.Blocks(css=css) as demo:
85
- system_message = gr.Textbox(value="", label="System message")
86
- max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
87
- temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
88
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
89
- save_as = gr.Radio(["PDF", "DOCX", "TXT"], label="Save As")
90
-
91
- chat = gr.Chatbot()
92
- msg = gr.Textbox(label="Your message")
93
 
94
- def respond_wrapper(message, history):
95
- response_generator = respond(
96
- message,
97
- history,
98
- system_message.value,
99
- max_tokens.value,
100
- temperature.value,
101
- top_p.value
102
- )
103
- response = next(response_generator)
104
- return history + [(message, response)]
105
 
106
- msg.submit(respond_wrapper, [msg, chat], [chat])
107
-
108
- save_button = gr.Button("Save Conversation")
109
- output_file = gr.File(label="Download File")
 
 
 
 
110
 
111
- def handle_save(history, file_format):
112
- return save_to_file(history, file_format)
 
 
113
 
114
- save_button.click(handle_save, inputs=[chat, save_as], outputs=output_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
- if __name__ == "__main__":
117
- demo.launch()
 
1
+ from huggingface_hub import InferenceClient
2
  import gradio as gr
 
 
3
  from fpdf import FPDF
4
  from docx import Document
5
 
 
11
  }
12
  '''
13
 
14
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
15
 
16
+ def format_prompt(message, history, system_prompt=None):
17
+ prompt = "<s>"
18
+ for user_prompt, bot_response in history:
19
+ prompt += f"[INST] {user_prompt} [/INST]"
20
+ prompt += f" {bot_response}</s> "
21
+ if system_prompt:
22
+ prompt += f"[SYS] {system_prompt} [/SYS]"
23
+ prompt += f"[INST] {message} [/INST]"
24
+ return prompt
25
 
26
+ def save_to_file(content, filename, format):
27
+ if format == "pdf":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  pdf = FPDF()
29
  pdf.add_page()
30
  pdf.set_auto_page_break(auto=True, margin=15)
31
  pdf.set_font("Arial", size=12)
32
+ pdf.multi_cell(0, 10, content)
33
+ pdf.output(filename)
34
+ elif format == "docx":
 
 
 
 
35
  doc = Document()
36
+ doc.add_paragraph(content)
37
+ doc.save(filename)
38
+ elif format == "txt":
39
+ with open(filename, 'w') as file:
40
+ file.write(content)
 
 
 
 
 
 
 
 
 
41
 
42
+ def generate(
43
+ prompt, history, system_prompt=None, temperature=0.2, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,
44
+ save_format="txt", save_file=None
45
+ ):
46
+ temperature = float(temperature)
47
+ if temperature < 1e-2:
48
+ temperature = 1e-2
49
+ top_p = float(top_p)
 
 
50
 
51
+ generate_kwargs = dict(
52
+ temperature=temperature,
53
+ max_new_tokens=max_new_tokens,
54
+ top_p=top_p,
55
+ repetition_penalty=repetition_penalty,
56
+ do_sample=True,
57
+ seed=42,
58
+ )
 
 
 
59
 
60
+ formatted_prompt = format_prompt(prompt, history, system_prompt)
61
+
62
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
63
+ output = ""
64
+
65
+ for response in stream:
66
+ output += response.token.text
67
+ yield output
68
 
69
+ # Save content to file
70
+ if save_file:
71
+ save_to_file(output, save_file, save_format)
72
+ return output
73
 
74
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
75
+ gr.Markdown("# GRAB DOC")
76
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter your text here...", lines=4)
77
+ history = gr.State([])
78
+ system_prompt = gr.Textbox(label="System Prompt", placeholder="Enter system instructions (optional)...", lines=2, visible=False)
79
+ temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.2)
80
+ max_new_tokens = gr.Slider(label="Max New Tokens", minimum=1, maximum=1024, step=1, value=1024)
81
+ top_p = gr.Slider(label="Top P", minimum=0.0, maximum=1.0, step=0.05, value=0.95)
82
+ repetition_penalty = gr.Slider(label="Repetition Penalty", minimum=0.0, maximum=2.0, step=0.1, value=1.0)
83
+ save_format = gr.Dropdown(label="Save Format", choices=["txt", "pdf", "docx"], value="txt")
84
+ save_file = gr.Textbox(label="Save File Name", placeholder="Enter the filename (without extension)...", lines=1)
85
+
86
+ submit = gr.Button("Generate")
87
+ output = gr.Textbox(label="Generated Output", lines=20)
88
+
89
+ submit.click(
90
+ generate,
91
+ inputs=[prompt, history, system_prompt, temperature, max_new_tokens, top_p, repetition_penalty, save_format, save_file],
92
+ outputs=output
93
+ )
94
 
95
+ demo.queue().launch(show_api=False)