File size: 7,665 Bytes
f9b9d56 83ee74c 574f73e 705c5b5 83ee74c f9b9d56 1fd4ab2 ad9db85 1fd4ab2 bee778b ad9db85 bee778b f9b9d56 2af89cf 1fd4ab2 99d94e0 da20c1b 99d94e0 1fd4ab2 da20c1b 1fd4ab2 2af89cf da20c1b 2af89cf da20c1b 1fd4ab2 da20c1b 1fd4ab2 2af89cf 705c5b5 0997082 2af89cf 705c5b5 99d94e0 705c5b5 2af89cf 99d94e0 521288b 1fd4ab2 521288b 1fd4ab2 521288b 1fd4ab2 521288b 1fd4ab2 705c5b5 521288b 1fd4ab2 705c5b5 1fd4ab2 0997082 d57197f 4fec5a3 bee778b 4fec5a3 da20c1b 4fec5a3 705c5b5 4fec5a3 705c5b5 4fec5a3 1fd4ab2 4fec5a3 1fd4ab2 4fec5a3 63c5e29 b45e256 4fec5a3 a3b8f2f b45e256 7ffca43 4fec5a3 7ffca43 4fec5a3 7ffca43 b45e256 4fec5a3 63c5e29 4fec5a3 7ffca43 4fec5a3 99d94e0 d57197f 7ffca43 63c5e29 4fec5a3 63c5e29 4fec5a3 63c5e29 4fec5a3 63c5e29 f9b9d56 b45e256 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import pandas as pd
from typing import List, Tuple
# LLM Models Definition
LLM_MODELS = {
"Cohere c4ai-crp-08-2024": "CohereForAI/c4ai-command-r-plus-08-2024", # Default
"Meta Llama3.3-70B": "meta-llama/Llama-3.3-70B-Instruct" # Backup model
}
def get_client(model_name="Cohere c4ai-crp-08-2024"):
try:
return InferenceClient(LLM_MODELS[model_name], token=os.getenv("HF_TOKEN"))
except Exception:
# If primary model fails, try backup model
return InferenceClient(LLM_MODELS["Meta Llama3.3-70B"], token=os.getenv("HF_TOKEN"))
def analyze_file_content(content, file_type):
"""Analyze file content and return structural summary"""
if file_type in ['parquet', 'csv']:
try:
lines = content.split('\n')
header = lines[0]
columns = header.count('|') - 1
rows = len(lines) - 3
return f"π Dataset Structure: {columns} columns, {rows} data samples"
except:
return "β Dataset structure analysis failed"
lines = content.split('\n')
total_lines = len(lines)
non_empty_lines = len([line for line in lines if line.strip()])
if any(keyword in content.lower() for keyword in ['def ', 'class ', 'import ', 'function']):
functions = len([line for line in lines if 'def ' in line])
classes = len([line for line in lines if 'class ' in line])
imports = len([line for line in lines if 'import ' in line or 'from ' in line])
return f"π» Code Structure: {total_lines} lines (Functions: {functions}, Classes: {classes}, Imports: {imports})"
paragraphs = content.count('\n\n') + 1
words = len(content.split())
return f"π Document Structure: {total_lines} lines, {paragraphs} paragraphs, ~{words} words"
def read_uploaded_file(file):
if file is None:
return "", ""
try:
file_ext = os.path.splitext(file.name)[1].lower()
if file_ext == '.parquet':
df = pd.read_parquet(file.name, engine='pyarrow')
content = df.head(10).to_markdown(index=False)
return content, "parquet"
elif file_ext == '.csv':
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1']
for encoding in encodings:
try:
df = pd.read_csv(file.name, encoding=encoding)
content = f"π Data Preview:\n{df.head(10).to_markdown(index=False)}\n\n"
content += f"\nπ Data Information:\n"
content += f"- Total Rows: {len(df)}\n"
content += f"- Total Columns: {len(df.columns)}\n"
content += f"- Column List: {', '.join(df.columns)}\n"
content += f"\nπ Column Data Types:\n"
for col, dtype in df.dtypes.items():
content += f"- {col}: {dtype}\n"
null_counts = df.isnull().sum()
if null_counts.any():
content += f"\nβ οΈ Missing Values:\n"
for col, null_count in null_counts[null_counts > 0].items():
content += f"- {col}: {null_count} missing\n"
return content, "csv"
except UnicodeDecodeError:
continue
raise UnicodeDecodeError(f"β Unable to read file with supported encodings ({', '.join(encodings)})")
else:
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1']
for encoding in encodings:
try:
with open(file.name, 'r', encoding=encoding) as f:
content = f.read()
return content, "text"
except UnicodeDecodeError:
continue
raise UnicodeDecodeError(f"β Unable to read file with supported encodings ({', '.join(encodings)})")
except Exception as e:
return f"β Error reading file: {str(e)}", "error"
def format_history(history):
formatted_history = []
for user_msg, assistant_msg in history:
formatted_history.append({"role": "user", "content": user_msg})
if assistant_msg:
formatted_history.append({"role": "assistant", "content": assistant_msg})
return formatted_history
# μμ€ν
ν둬ννΈ μμ
def chat(message, history, uploaded_file, system_message="", max_tokens=4000, temperature=0.7, top_p=0.9):
system_prefix = """μ λ μ¬λ¬λΆμ μΉκ·Όνκ³ μ§μ μΈ AI μ΄μμ€ν΄νΈμ
λλ€. λ€μκ³Ό κ°μ μμΉμΌλ‘ μν΅νκ² μ΅λλ€:
1. π€ μΉκ·Όνκ³ κ³΅κ°μ μΈ νλλ‘ λν
2. π‘ λͺ
ννκ³ μ΄ν΄νκΈ° μ¬μ΄ μ€λͺ
μ 곡
3. π― μ§λ¬Έμ μλλ₯Ό μ νν νμ
νμ¬ λ§μΆ€ν λ΅λ³
4. π νμν κ²½μ° μ
λ‘λλ νμΌ λ΄μ©μ μ°Έκ³ νμ¬ κ΅¬μ²΄μ μΈ λμ μ 곡
5. β¨ μΆκ°μ μΈ ν΅μ°°κ³Ό μ μμ ν΅ν κ°μΉ μλ λν
νμ μμ λ°λ₯΄κ³ μΉμ νκ² μλ΅νλ©°, νμν κ²½μ° κ΅¬μ²΄μ μΈ μμλ μ€λͺ
μ μΆκ°νμ¬
μ΄ν΄λ₯Ό λκ² μ΅λλ€."""
# UI ν
μ€νΈ νκΈν
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", title="AI μ΄μμ€ν΄νΈ π€") as demo:
gr.HTML(
"""
<div style="text-align: center; max-width: 800px; margin: 0 auto;">
<h1 style="font-size: 3em; font-weight: 600; margin: 0.5em;">AI μ΄μμ€ν΄νΈ π€</h1>
<h3 style="font-size: 1.2em; margin: 1em;">λΉμ μ λ λ ν λν ννΈλ π¬</h3>
</div>
"""
)
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(
height=600,
label="λνμ°½ π¬",
type="messages"
)
msg = gr.Textbox(
label="λ©μμ§ μ
λ ₯",
show_label=False,
placeholder="무μμ΄λ λ¬Όμ΄λ³΄μΈμ... π",
container=False
)
with gr.Row():
clear = gr.ClearButton([msg, chatbot], value="λνλ΄μ© μ§μ°κΈ°")
send = gr.Button("보λ΄κΈ° π€")
with gr.Column(scale=1):
gr.Markdown("### νμΌ μ
λ‘λ π\nμ§μ νμ: ν
μ€νΈ, μ½λ, CSV, Parquet νμΌ")
file_upload = gr.File(
label="νμΌ μ ν",
file_types=["text", ".csv", ".parquet"],
type="filepath"
)
with gr.Accordion("κ³ κΈ μ€μ βοΈ", open=False):
system_message = gr.Textbox(label="μμ€ν
λ©μμ§ π", value="")
max_tokens = gr.Slider(minimum=1, maximum=8000, value=4000, label="μ΅λ ν ν° μ π")
temperature = gr.Slider(minimum=0, maximum=1, value=0.7, label="μ°½μμ± μμ€ π‘οΈ")
top_p = gr.Slider(minimum=0, maximum=1, value=0.9, label="μλ΅ λ€μμ± π")
# μμ μ§λ¬Έ μμ
gr.Examples(
examples=[
["μλ
νμΈμ! μ΄λ€ λμμ΄ νμνμ κ°μ? π€"],
["μ΄ λ΄μ©μ λν΄ μ’ λ μμΈν μ€λͺ
ν΄ μ£Όμ€ μ μλμ? π‘"],
["μ κ° μ΄ν΄νκΈ° μ½κ² μ€λͺ
ν΄ μ£Όμκ² μ΄μ? π"],
["μ΄ λ΄μ©μ μ€μ λ‘ μ΄λ»κ² νμ©ν μ μμκΉμ? π―"],
["μΆκ°λ‘ μ‘°μΈν΄ μ£Όμ€ λ΄μ©μ΄ μμΌμ κ°μ? β¨"],
["κΆκΈν μ μ΄ λ μλλ° μ¬μ€λ΄λ λ κΉμ? π€"],
],
inputs=msg,
)
if __name__ == "__main__":
demo.launch() |