Spaces:
Running
on
Zero
Running
on
Zero
cutechicken
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -241,317 +241,6 @@ def read_uploaded_file(file):
|
|
241 |
except Exception as e:
|
242 |
return f"β νμΌ μ½κΈ° μ€λ₯: {str(e)}", "error"
|
243 |
|
244 |
-
def chat(message, history, uploaded_file, system_message="", max_tokens=4000, temperature=0.7, top_p=0.9):
|
245 |
-
if not message:
|
246 |
-
return "", history
|
247 |
-
|
248 |
-
system_prefix = """μ λ μ¬λ¬λΆμ μΉκ·Όνκ³ μ§μ μΈ AI μ΄μμ€ν΄νΈ 'GiniGEN'μ
λλ€.. λ€μκ³Ό κ°μ μμΉμΌλ‘ μν΅νκ² μ΅λλ€:
|
249 |
-
1. π€ μΉκ·Όνκ³ κ³΅κ°μ μΈ νλλ‘ λν
|
250 |
-
2. π‘ λͺ
ννκ³ μ΄ν΄νκΈ° μ¬μ΄ μ€λͺ
μ 곡
|
251 |
-
3. π― μ§λ¬Έμ μλλ₯Ό μ νν νμ
νμ¬ λ§μΆ€ν λ΅λ³
|
252 |
-
4. π νμν κ²½μ° μ
λ‘λλ νμΌ λ΄μ©μ μ°Έκ³ νμ¬ κ΅¬μ²΄μ μΈ λμ μ 곡
|
253 |
-
5. β¨ μΆκ°μ μΈ ν΅μ°°κ³Ό μ μμ ν΅ν κ°μΉ μλ λν
|
254 |
-
νμ μμ λ°λ₯΄κ³ μΉμ νκ² μλ΅νλ©°, νμν κ²½μ° κ΅¬μ²΄μ μΈ μμλ μ€λͺ
μ μΆκ°νμ¬
|
255 |
-
μ΄ν΄λ₯Ό λκ² μ΅λλ€."""
|
256 |
-
|
257 |
-
try:
|
258 |
-
# 첫 λ©μμ§μΌ λ λͺ¨λΈ λ‘λ©
|
259 |
-
model_manager.ensure_model_loaded()
|
260 |
-
|
261 |
-
if uploaded_file:
|
262 |
-
content, file_type = read_uploaded_file(uploaded_file)
|
263 |
-
if file_type == "error":
|
264 |
-
error_message = content
|
265 |
-
chat_history.add_conversation(message, error_message)
|
266 |
-
return "", history + [[message, error_message]]
|
267 |
-
|
268 |
-
file_summary = analyze_file_content(content, file_type)
|
269 |
-
|
270 |
-
if file_type in ['parquet', 'csv']:
|
271 |
-
system_message += f"\n\nνμΌ λ΄μ©:\n```markdown\n{content}\n```"
|
272 |
-
else:
|
273 |
-
system_message += f"\n\nνμΌ λ΄μ©:\n```\n{content}\n```"
|
274 |
-
|
275 |
-
if message == "νμΌ λΆμμ μμν©λλ€...":
|
276 |
-
message = f"""[νμΌ κ΅¬μ‘° λΆμ] {file_summary}
|
277 |
-
λ€μ κ΄μ μμ λμμ λλ¦¬κ² μ΅λλ€:
|
278 |
-
1. π μ λ°μ μΈ λ΄μ© νμ
|
279 |
-
2. π‘ μ£Όμ νΉμ§ μ€λͺ
|
280 |
-
3. π― μ€μ©μ μΈ νμ© λ°©μ
|
281 |
-
4. β¨ κ°μ μ μ
|
282 |
-
5. π¬ μΆκ° μ§λ¬Έμ΄λ νμν μ€λͺ
"""
|
283 |
-
|
284 |
-
messages = [{"role": "system", "content": system_prefix + system_message}]
|
285 |
-
|
286 |
-
if history:
|
287 |
-
for user_msg, assistant_msg in history:
|
288 |
-
messages.append({"role": "user", "content": user_msg})
|
289 |
-
messages.append({"role": "assistant", "content": assistant_msg})
|
290 |
-
|
291 |
-
messages.append({"role": "user", "content": message})
|
292 |
-
|
293 |
-
partial_message = ""
|
294 |
-
|
295 |
-
for response in model_manager.generate_response(
|
296 |
-
messages,
|
297 |
-
max_tokens=max_tokens,
|
298 |
-
temperature=temperature,
|
299 |
-
top_p=top_p
|
300 |
-
):
|
301 |
-
token = response.choices[0].delta.get('content', '')
|
302 |
-
if token:
|
303 |
-
partial_message += token
|
304 |
-
current_history = history + [[message, partial_message]]
|
305 |
-
yield "", current_history
|
306 |
-
|
307 |
-
chat_history.add_conversation(message, partial_message)
|
308 |
-
|
309 |
-
except Exception as e:
|
310 |
-
error_msg = f"β μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}"
|
311 |
-
chat_history.add_conversation(message, error_msg)
|
312 |
-
yield "", history + [[message, error_msg]]import os
|
313 |
-
from dotenv import load_dotenv
|
314 |
-
import gradio as gr
|
315 |
-
import pandas as pd
|
316 |
-
import json
|
317 |
-
from datetime import datetime
|
318 |
-
import torch
|
319 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
320 |
-
import spaces
|
321 |
-
from threading import Thread
|
322 |
-
|
323 |
-
# νκ²½ λ³μ μ€μ
|
324 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
325 |
-
MODEL_ID = "CohereForAI/c4ai-command-r7b-12-2024"
|
326 |
-
|
327 |
-
class ModelManager:
|
328 |
-
def __init__(self):
|
329 |
-
self.tokenizer = None
|
330 |
-
self.model = None
|
331 |
-
# μ΄κΈ°νλ 첫 μμ² μμ μν
|
332 |
-
|
333 |
-
def ensure_model_loaded(self):
|
334 |
-
if self.model is None or self.tokenizer is None:
|
335 |
-
self.setup_model()
|
336 |
-
|
337 |
-
@spaces.GPU
|
338 |
-
def setup_model(self):
|
339 |
-
try:
|
340 |
-
print("ν ν¬λμ΄μ λ‘λ© μμ...")
|
341 |
-
self.tokenizer = AutoTokenizer.from_pretrained(
|
342 |
-
MODEL_ID,
|
343 |
-
use_fast=True,
|
344 |
-
token=HF_TOKEN,
|
345 |
-
trust_remote_code=True
|
346 |
-
)
|
347 |
-
if not self.tokenizer.pad_token:
|
348 |
-
self.tokenizer.pad_token = self.tokenizer.eos_token
|
349 |
-
print("ν ν¬λμ΄μ λ‘λ© μλ£")
|
350 |
-
|
351 |
-
print("λͺ¨λΈ λ‘λ© μμ...")
|
352 |
-
self.model = AutoModelForCausalLM.from_pretrained(
|
353 |
-
MODEL_ID,
|
354 |
-
token=HF_TOKEN,
|
355 |
-
torch_dtype=torch.bfloat16,
|
356 |
-
device_map="auto",
|
357 |
-
trust_remote_code=True,
|
358 |
-
low_cpu_mem_usage=True
|
359 |
-
)
|
360 |
-
self.model.eval()
|
361 |
-
print("λͺ¨λΈ λ‘λ© μλ£")
|
362 |
-
|
363 |
-
except Exception as e:
|
364 |
-
print(f"λͺ¨λΈ λ‘λ© μ€ μ€λ₯ λ°μ: {e}")
|
365 |
-
raise Exception(f"λͺ¨λΈ λ‘λ© μ€ν¨: {e}")
|
366 |
-
|
367 |
-
@spaces.GPU
|
368 |
-
def generate_response(self, messages, max_tokens=4000, temperature=0.7, top_p=0.9):
|
369 |
-
try:
|
370 |
-
# λͺ¨λΈμ΄ λ‘λλμ΄ μλμ§ νμΈ
|
371 |
-
self.ensure_model_loaded()
|
372 |
-
|
373 |
-
# μ
λ ₯ ν
μ€νΈ μ€λΉ
|
374 |
-
prompt = ""
|
375 |
-
for msg in messages:
|
376 |
-
role = msg["role"]
|
377 |
-
content = msg["content"]
|
378 |
-
if role == "system":
|
379 |
-
prompt += f"System: {content}\n"
|
380 |
-
elif role == "user":
|
381 |
-
prompt += f"Human: {content}\n"
|
382 |
-
elif role == "assistant":
|
383 |
-
prompt += f"Assistant: {content}\n"
|
384 |
-
prompt += "Assistant: "
|
385 |
-
|
386 |
-
# μ
λ ₯ μΈμ½λ©
|
387 |
-
input_ids = self.tokenizer.encode(
|
388 |
-
prompt,
|
389 |
-
return_tensors="pt",
|
390 |
-
add_special_tokens=True
|
391 |
-
).to(self.model.device)
|
392 |
-
|
393 |
-
# μλ΅ μμ±
|
394 |
-
with torch.no_grad():
|
395 |
-
output_ids = self.model.generate(
|
396 |
-
input_ids,
|
397 |
-
max_new_tokens=max_tokens,
|
398 |
-
do_sample=True,
|
399 |
-
temperature=temperature,
|
400 |
-
top_p=top_p,
|
401 |
-
pad_token_id=self.tokenizer.pad_token_id,
|
402 |
-
eos_token_id=self.tokenizer.eos_token_id,
|
403 |
-
num_return_sequences=1
|
404 |
-
)
|
405 |
-
|
406 |
-
# μλ΅ λμ½λ©
|
407 |
-
generated_text = self.tokenizer.decode(
|
408 |
-
output_ids[0][input_ids.shape[1]:],
|
409 |
-
skip_special_tokens=True
|
410 |
-
)
|
411 |
-
|
412 |
-
# λ¨μ΄ λ¨μλ‘ μ€νΈλ¦¬λ°
|
413 |
-
words = generated_text.split()
|
414 |
-
for word in words:
|
415 |
-
yield type('Response', (), {
|
416 |
-
'choices': [type('Choice', (), {
|
417 |
-
'delta': {'content': word + " "}
|
418 |
-
})()]
|
419 |
-
})()
|
420 |
-
|
421 |
-
except Exception as e:
|
422 |
-
print(f"μλ΅ μμ± μ€ μ€λ₯ λ°μ: {e}")
|
423 |
-
raise Exception(f"μλ΅ μμ± μ€ν¨: {e}")
|
424 |
-
|
425 |
-
class ChatHistory:
|
426 |
-
def __init__(self):
|
427 |
-
self.history = []
|
428 |
-
self.history_file = "/tmp/chat_history.json"
|
429 |
-
self.load_history()
|
430 |
-
|
431 |
-
def add_conversation(self, user_msg: str, assistant_msg: str):
|
432 |
-
conversation = {
|
433 |
-
"timestamp": datetime.now().isoformat(),
|
434 |
-
"messages": [
|
435 |
-
{"role": "user", "content": user_msg},
|
436 |
-
{"role": "assistant", "content": assistant_msg}
|
437 |
-
]
|
438 |
-
}
|
439 |
-
self.history.append(conversation)
|
440 |
-
self.save_history()
|
441 |
-
|
442 |
-
def format_for_display(self):
|
443 |
-
formatted = []
|
444 |
-
for conv in self.history:
|
445 |
-
formatted.append([
|
446 |
-
conv["messages"][0]["content"],
|
447 |
-
conv["messages"][1]["content"]
|
448 |
-
])
|
449 |
-
return formatted
|
450 |
-
|
451 |
-
def get_messages_for_api(self):
|
452 |
-
messages = []
|
453 |
-
for conv in self.history:
|
454 |
-
messages.extend([
|
455 |
-
{"role": "user", "content": conv["messages"][0]["content"]},
|
456 |
-
{"role": "assistant", "content": conv["messages"][1]["content"]}
|
457 |
-
])
|
458 |
-
return messages
|
459 |
-
|
460 |
-
def clear_history(self):
|
461 |
-
self.history = []
|
462 |
-
self.save_history()
|
463 |
-
|
464 |
-
def save_history(self):
|
465 |
-
try:
|
466 |
-
with open(self.history_file, 'w', encoding='utf-8') as f:
|
467 |
-
json.dump(self.history, f, ensure_ascii=False, indent=2)
|
468 |
-
except Exception as e:
|
469 |
-
print(f"νμ€ν 리 μ μ₯ μ€ν¨: {e}")
|
470 |
-
|
471 |
-
def load_history(self):
|
472 |
-
try:
|
473 |
-
if os.path.exists(self.history_file):
|
474 |
-
with open(self.history_file, 'r', encoding='utf-8') as f:
|
475 |
-
self.history = json.load(f)
|
476 |
-
except Exception as e:
|
477 |
-
print(f"νμ€ν 리 λ‘λ μ€ν¨: {e}")
|
478 |
-
self.history = []
|
479 |
-
|
480 |
-
# μ μ μΈμ€ν΄μ€ μμ±
|
481 |
-
chat_history = ChatHistory()
|
482 |
-
model_manager = ModelManager()
|
483 |
-
|
484 |
-
def analyze_file_content(content, file_type):
|
485 |
-
"""Analyze file content and return structural summary"""
|
486 |
-
if file_type in ['parquet', 'csv']:
|
487 |
-
try:
|
488 |
-
lines = content.split('\n')
|
489 |
-
header = lines[0]
|
490 |
-
columns = header.count('|') - 1
|
491 |
-
rows = len(lines) - 3
|
492 |
-
return f"π λ°μ΄ν°μ
ꡬ쑰: {columns}κ° μ»¬λΌ, {rows}κ° λ°μ΄ν°"
|
493 |
-
except:
|
494 |
-
return "β λ°μ΄ν°μ
ꡬ쑰 λΆμ μ€ν¨"
|
495 |
-
|
496 |
-
lines = content.split('\n')
|
497 |
-
total_lines = len(lines)
|
498 |
-
non_empty_lines = len([line for line in lines if line.strip()])
|
499 |
-
|
500 |
-
if any(keyword in content.lower() for keyword in ['def ', 'class ', 'import ', 'function']):
|
501 |
-
functions = len([line for line in lines if 'def ' in line])
|
502 |
-
classes = len([line for line in lines if 'class ' in line])
|
503 |
-
imports = len([line for line in lines if 'import ' in line or 'from ' in line])
|
504 |
-
return f"π» μ½λ ꡬ쑰: {total_lines}μ€ (ν¨μ: {functions}, ν΄λμ€: {classes}, μν¬νΈ: {imports})"
|
505 |
-
|
506 |
-
paragraphs = content.count('\n\n') + 1
|
507 |
-
words = len(content.split())
|
508 |
-
return f"π λ¬Έμ ꡬ쑰: {total_lines}μ€, {paragraphs}λ¨λ½, μ½ {words}λ¨μ΄"
|
509 |
-
|
510 |
-
def read_uploaded_file(file):
|
511 |
-
if file is None:
|
512 |
-
return "", ""
|
513 |
-
try:
|
514 |
-
file_ext = os.path.splitext(file.name)[1].lower()
|
515 |
-
|
516 |
-
if file_ext == '.parquet':
|
517 |
-
df = pd.read_parquet(file.name, engine='pyarrow')
|
518 |
-
content = df.head(10).to_markdown(index=False)
|
519 |
-
return content, "parquet"
|
520 |
-
elif file_ext == '.csv':
|
521 |
-
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1']
|
522 |
-
for encoding in encodings:
|
523 |
-
try:
|
524 |
-
df = pd.read_csv(file.name, encoding=encoding)
|
525 |
-
content = f"π λ°μ΄ν° 미리보기:\n{df.head(10).to_markdown(index=False)}\n\n"
|
526 |
-
content += f"\nπ λ°μ΄ν° μ 보:\n"
|
527 |
-
content += f"- μ 체 ν μ: {len(df)}\n"
|
528 |
-
content += f"- μ 체 μ΄ μ: {len(df.columns)}\n"
|
529 |
-
content += f"- μ»¬λΌ λͺ©λ‘: {', '.join(df.columns)}\n"
|
530 |
-
content += f"\nπ μ»¬λΌ λ°μ΄ν° νμ
:\n"
|
531 |
-
for col, dtype in df.dtypes.items():
|
532 |
-
content += f"- {col}: {dtype}\n"
|
533 |
-
null_counts = df.isnull().sum()
|
534 |
-
if null_counts.any():
|
535 |
-
content += f"\nβ οΈ κ²°μΈ‘μΉ:\n"
|
536 |
-
for col, null_count in null_counts[null_counts > 0].items():
|
537 |
-
content += f"- {col}: {null_count}κ° λλ½\n"
|
538 |
-
return content, "csv"
|
539 |
-
except UnicodeDecodeError:
|
540 |
-
continue
|
541 |
-
raise UnicodeDecodeError(f"β μ§μλλ μΈμ½λ©μΌλ‘ νμΌμ μ½μ μ μμ΅λλ€ ({', '.join(encodings)})")
|
542 |
-
else:
|
543 |
-
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1']
|
544 |
-
for encoding in encodings:
|
545 |
-
try:
|
546 |
-
with open(file.name, 'r', encoding=encoding) as f:
|
547 |
-
content = f.read()
|
548 |
-
return content, "text"
|
549 |
-
except UnicodeDecodeError:
|
550 |
-
continue
|
551 |
-
raise UnicodeDecodeError(f"β μ§μλλ μΈμ½λ©μΌλ‘ νμΌμ μ½μ μ μμ΅λλ€ ({', '.join(encodings)})")
|
552 |
-
except Exception as e:
|
553 |
-
return f"β νμΌ μ½κΈ° μ€λ₯: {str(e)}", "error"
|
554 |
-
|
555 |
def chat(message, history, uploaded_file, system_message="", max_tokens=4000, temperature=0.7, top_p=0.9):
|
556 |
if not message:
|
557 |
return "", history
|
|
|
241 |
except Exception as e:
|
242 |
return f"β νμΌ μ½κΈ° μ€λ₯: {str(e)}", "error"
|
243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
def chat(message, history, uploaded_file, system_message="", max_tokens=4000, temperature=0.7, top_p=0.9):
|
245 |
if not message:
|
246 |
return "", history
|