openfree commited on
Commit
bee778b
β€’
1 Parent(s): 566e2ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -19
app.py CHANGED
@@ -7,13 +7,15 @@ from typing import List, Tuple
7
  # LLM Models Definition
8
  LLM_MODELS = {
9
  "Cohere c4ai-crp-08-2024": "CohereForAI/c4ai-command-r-plus-08-2024", # Default
10
- "Meta Llama3.3-70B": "meta-llama/Llama-3.3-70B-Instruct",
11
- "Mistral Nemo 2407": "mistralai/Mistral-Nemo-Instruct-2407",
12
- "Alibaba Qwen QwQ-32B": "Qwen/QwQ-32B-Preview"
13
  }
14
 
15
- def get_client(model_name):
16
- return InferenceClient(LLM_MODELS[model_name], token=os.getenv("HF_TOKEN"))
 
 
 
 
17
 
18
  def analyze_file_content(content, file_type):
19
  """Analyze file content and return structural summary"""
@@ -94,7 +96,7 @@ def format_history(history):
94
  formatted_history.append({"role": "assistant", "content": assistant_msg})
95
  return formatted_history
96
 
97
- def chat(message, history, uploaded_file, model_name, system_message="", max_tokens=4000, temperature=0.7, top_p=0.9):
98
  system_prefix = """You are a file analysis expert. Analyze the uploaded file in depth from the following perspectives:
99
  1. πŸ“‹ Overall structure and composition
100
  2. πŸ“Š Key content and pattern analysis
@@ -144,7 +146,7 @@ Please provide detailed analysis from these perspectives:
144
  messages.append({"role": "user", "content": message})
145
 
146
  try:
147
- client = get_client(model_name)
148
  partial_message = ""
149
  current_history = []
150
 
@@ -176,8 +178,6 @@ css = """
176
  footer {visibility: hidden}
177
  """
178
 
179
- # ... (이전 μ½”λ“œ 동일)
180
-
181
  with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, title="EveryChat πŸ€–") as demo:
182
  gr.HTML(
183
  """
@@ -206,13 +206,6 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, title="EveryChat
206
  send = gr.Button("Send πŸ“€")
207
 
208
  with gr.Column(scale=1):
209
- model_name = gr.Radio(
210
- choices=list(LLM_MODELS.keys()),
211
- value="Cohere c4ai-crp-08-2024",
212
- label="Select LLM Model πŸ€–",
213
- info="Choose your preferred AI model"
214
- )
215
-
216
  gr.Markdown("### Upload File πŸ“\nSupport: Text, Code, CSV, Parquet files")
217
  file_upload = gr.File(
218
  label="Upload File",
@@ -229,7 +222,7 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, title="EveryChat
229
  # Event bindings
230
  msg.submit(
231
  chat,
232
- inputs=[msg, chatbot, file_upload, model_name, system_message, max_tokens, temperature, top_p],
233
  outputs=[msg, chatbot],
234
  queue=True
235
  ).then(
@@ -240,7 +233,7 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, title="EveryChat
240
 
241
  send.click(
242
  chat,
243
- inputs=[msg, chatbot, file_upload, model_name, system_message, max_tokens, temperature, top_p],
244
  outputs=[msg, chatbot],
245
  queue=True
246
  ).then(
@@ -252,7 +245,7 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, title="EveryChat
252
  # Auto-analysis on file upload
253
  file_upload.change(
254
  chat,
255
- inputs=[gr.Textbox(value="Starting file analysis..."), chatbot, file_upload, model_name, system_message, max_tokens, temperature, top_p],
256
  outputs=[msg, chatbot],
257
  queue=True
258
  )
 
7
  # LLM Models Definition
8
  LLM_MODELS = {
9
  "Cohere c4ai-crp-08-2024": "CohereForAI/c4ai-command-r-plus-08-2024", # Default
10
+ "Meta Llama3.3-70B": "meta-llama/Llama-3.3-70B-Instruct" # Backup model
 
 
11
  }
12
 
13
+ def get_client(model_name="Cohere c4ai-crp-08-2024"):
14
+ try:
15
+ return InferenceClient(LLM_MODELS[model_name], token=os.getenv("HF_TOKEN"))
16
+ except Exception:
17
+ # If primary model fails, try backup model
18
+ return InferenceClient(LLM_MODELS["Meta Llama3.3-70B"], token=os.getenv("HF_TOKEN"))
19
 
20
  def analyze_file_content(content, file_type):
21
  """Analyze file content and return structural summary"""
 
96
  formatted_history.append({"role": "assistant", "content": assistant_msg})
97
  return formatted_history
98
 
99
+ def chat(message, history, uploaded_file, system_message="", max_tokens=4000, temperature=0.7, top_p=0.9):
100
  system_prefix = """You are a file analysis expert. Analyze the uploaded file in depth from the following perspectives:
101
  1. πŸ“‹ Overall structure and composition
102
  2. πŸ“Š Key content and pattern analysis
 
146
  messages.append({"role": "user", "content": message})
147
 
148
  try:
149
+ client = get_client()
150
  partial_message = ""
151
  current_history = []
152
 
 
178
  footer {visibility: hidden}
179
  """
180
 
 
 
181
  with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, title="EveryChat πŸ€–") as demo:
182
  gr.HTML(
183
  """
 
206
  send = gr.Button("Send πŸ“€")
207
 
208
  with gr.Column(scale=1):
 
 
 
 
 
 
 
209
  gr.Markdown("### Upload File πŸ“\nSupport: Text, Code, CSV, Parquet files")
210
  file_upload = gr.File(
211
  label="Upload File",
 
222
  # Event bindings
223
  msg.submit(
224
  chat,
225
+ inputs=[msg, chatbot, file_upload, system_message, max_tokens, temperature, top_p],
226
  outputs=[msg, chatbot],
227
  queue=True
228
  ).then(
 
233
 
234
  send.click(
235
  chat,
236
+ inputs=[msg, chatbot, file_upload, system_message, max_tokens, temperature, top_p],
237
  outputs=[msg, chatbot],
238
  queue=True
239
  ).then(
 
245
  # Auto-analysis on file upload
246
  file_upload.change(
247
  chat,
248
+ inputs=[gr.Textbox(value="Starting file analysis..."), chatbot, file_upload, system_message, max_tokens, temperature, top_p],
249
  outputs=[msg, chatbot],
250
  queue=True
251
  )