Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import gradio as gr | |
import requests | |
import json | |
import os | |
from datetime import datetime, timedelta | |
from concurrent.futures import ThreadPoolExecutor, as_completed | |
from functools import lru_cache | |
from requests.adapters import HTTPAdapter | |
from requests.packages.urllib3.util.retry import Retry | |
from openai import OpenAI | |
from bs4 import BeautifulSoup | |
import re | |
import pathlib | |
import sqlite3 | |
import pytz | |
# List of target companies/keywords | |
KOREAN_COMPANIES = [ | |
"NVIDIA", | |
"ALPHABET", | |
"APPLE", | |
"TESLA", | |
"AMAZON", | |
"MICROSOFT", | |
"META", | |
"INTEL", | |
"SAMSUNG", | |
"HYNIX", | |
"BITCOIN", | |
"crypto", | |
"stock", | |
"Economics", | |
"Finance", | |
"investing" | |
] | |
def convert_to_seoul_time(timestamp_str): | |
""" | |
Convert a given timestamp string (UTC) to Seoul time (KST). | |
""" | |
try: | |
dt = datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S') | |
seoul_tz = pytz.timezone('Asia/Seoul') | |
seoul_time = seoul_tz.localize(dt) | |
return seoul_time.strftime('%Y-%m-%d %H:%M:%S KST') | |
except Exception as e: | |
print(f"Time conversion error: {str(e)}") | |
return timestamp_str | |
def analyze_sentiment_batch(articles, client): | |
""" | |
Perform a comprehensive sentiment analysis of the news articles using the OpenAI API. | |
""" | |
try: | |
# Combine all articles into a single text | |
combined_text = "\n\n".join([ | |
f"Title: {article.get('title', '')}\nContent: {article.get('snippet', '')}" | |
for article in articles | |
]) | |
prompt = f"""Please perform an overall sentiment analysis of the following collection of news articles: | |
News content: | |
{combined_text} | |
Please follow this format: | |
1. Overall Sentiment: [Positive/Negative/Neutral] | |
2. Key Positive Factors: | |
- [Item1] | |
- [Item2] | |
3. Key Negative Factors: | |
- [Item1] | |
- [Item2] | |
4. Summary: [Detailed explanation] | |
""" | |
response = client.chat.completions.create( | |
model="CohereForAI/c4ai-command-r-plus-08-2024", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.3, | |
max_tokens=1000 | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
return f"Sentiment analysis failed: {str(e)}" | |
# Initialize the database | |
def init_db(): | |
""" | |
Initialize the SQLite database (search_results.db) if it doesn't already exist. | |
""" | |
db_path = pathlib.Path("search_results.db") | |
conn = sqlite3.connect(db_path) | |
c = conn.cursor() | |
c.execute('''CREATE TABLE IF NOT EXISTS searches | |
(id INTEGER PRIMARY KEY AUTOINCREMENT, | |
keyword TEXT, | |
country TEXT, | |
results TEXT, | |
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP)''') | |
conn.commit() | |
conn.close() | |
def save_to_db(keyword, country, results): | |
""" | |
Save the search results for a specific (keyword, country) combination into the database. | |
""" | |
conn = sqlite3.connect("search_results.db") | |
c = conn.cursor() | |
seoul_tz = pytz.timezone('Asia/Seoul') | |
now = datetime.now(seoul_tz) | |
timestamp = now.strftime('%Y-%m-%d %H:%M:%S') | |
c.execute("""INSERT INTO searches | |
(keyword, country, results, timestamp) | |
VALUES (?, ?, ?, ?)""", | |
(keyword, country, json.dumps(results), timestamp)) | |
conn.commit() | |
conn.close() | |
def load_from_db(keyword, country): | |
""" | |
Load the most recent search results for a specific (keyword, country) combination from the database. | |
Returns the data and the timestamp. | |
""" | |
conn = sqlite3.connect("search_results.db") | |
c = conn.cursor() | |
c.execute( | |
"SELECT results, timestamp FROM searches WHERE keyword=? AND country=? ORDER BY timestamp DESC LIMIT 1", | |
(keyword, country) | |
) | |
result = c.fetchone() | |
conn.close() | |
if result: | |
return json.loads(result[0]), convert_to_seoul_time(result[1]) | |
return None, None | |
def display_results(articles): | |
""" | |
Convert a list of news articles into a Markdown string for display. | |
""" | |
output = "" | |
for idx, article in enumerate(articles, 1): | |
output += f"### {idx}. {article['title']}\n" | |
output += f"Source: {article['channel']}\n" | |
output += f"Time: {article['time']}\n" | |
output += f"Link: {article['link']}\n" | |
output += f"Summary: {article['snippet']}\n\n" | |
return output | |
######################################## | |
# 1) Search => Articles + Analysis, then save to DB | |
######################################## | |
def search_company(company): | |
""" | |
For a single company (or keyword), search US news. | |
1) Retrieve a list of articles | |
2) Perform sentiment analysis | |
3) Save results to DB | |
4) Return (articles + analysis) in a single output. | |
""" | |
error_message, articles = serphouse_search(company, "United States") | |
if not error_message and articles: | |
# Perform sentiment analysis | |
analysis = analyze_sentiment_batch(articles, client) | |
# Prepare data to save in DB | |
store_dict = { | |
"articles": articles, | |
"analysis": analysis | |
} | |
save_to_db(company, "United States", store_dict) | |
# Prepare output for display | |
output = display_results(articles) | |
output += f"\n\n### Analysis Report\n{analysis}\n" | |
return output | |
return f"No search results found for {company}." | |
######################################## | |
# 2) Load => Return articles + analysis from DB | |
######################################## | |
def load_company(company): | |
""" | |
Load the most recent US news search results for the given company (or keyword) from the database, | |
and return the articles + analysis in a single output. | |
""" | |
data, timestamp = load_from_db(company, "United States") | |
if data: | |
articles = data.get("articles", []) | |
analysis = data.get("analysis", "") | |
output = f"### {company} Search Results\nLast Updated: {timestamp}\n\n" | |
output += display_results(articles) | |
output += f"\n\n### Analysis Report\n{analysis}\n" | |
return output | |
return f"No saved results for {company}." | |
######################################## | |
# 3) Updated show_stats() with new title | |
######################################## | |
def show_stats(): | |
""" | |
For each company in KOREAN_COMPANIES: | |
- Retrieve the most recent timestamp in DB | |
- Number of articles | |
- Sentiment analysis result | |
Return these in a report format. | |
Title changed to: "EarnBOT Analysis Report" | |
""" | |
conn = sqlite3.connect("search_results.db") | |
c = conn.cursor() | |
output = "## EarnBOT Analysis Report\n\n" | |
data_list = [] | |
for company in KOREAN_COMPANIES: | |
c.execute(""" | |
SELECT results, timestamp | |
FROM searches | |
WHERE keyword = ? | |
ORDER BY timestamp DESC | |
LIMIT 1 | |
""", (company,)) | |
row = c.fetchone() | |
if row: | |
results_json, timestamp = row | |
data_list.append((company, timestamp, results_json)) | |
conn.close() | |
def analyze_data(item): | |
comp, tstamp, results_json = item | |
data = json.loads(results_json) | |
articles = data.get("articles", []) | |
analysis = data.get("analysis", "") | |
count_articles = len(articles) | |
return (comp, tstamp, count_articles, analysis) | |
results_list = [] | |
with ThreadPoolExecutor(max_workers=5) as executor: | |
futures = [executor.submit(analyze_data, dl) for dl in data_list] | |
for future in as_completed(futures): | |
results_list.append(future.result()) | |
for comp, tstamp, count, analysis in results_list: | |
seoul_time = convert_to_seoul_time(tstamp) | |
output += f"### {comp}\n" | |
output += f"- Last updated: {seoul_time}\n" | |
output += f"- Number of articles stored: {count}\n\n" | |
if analysis: | |
output += "#### News Sentiment Analysis\n" | |
output += f"{analysis}\n\n" | |
output += "---\n\n" | |
return output | |
def search_all_companies(): | |
""" | |
Search all companies in KOREAN_COMPANIES (in parallel), | |
perform sentiment analysis + save to DB => return Markdown of all results. | |
""" | |
overall_result = "# [Search Results for All Companies]\n\n" | |
def do_search(comp): | |
return comp, search_company(comp) | |
with ThreadPoolExecutor(max_workers=5) as executor: | |
futures = [executor.submit(do_search, c) for c in KOREAN_COMPANIES] | |
for future in as_completed(futures): | |
comp, res_text = future.result() | |
overall_result += f"## {comp}\n" | |
overall_result += res_text + "\n\n" | |
return overall_result | |
def load_all_companies(): | |
""" | |
Load articles + analysis for all companies in KOREAN_COMPANIES from the DB => return Markdown. | |
""" | |
overall_result = "# [All Companies Data Output]\n\n" | |
for comp in KOREAN_COMPANIES: | |
overall_result += f"## {comp}\n" | |
overall_result += load_company(comp) | |
overall_result += "\n" | |
return overall_result | |
def full_summary_report(): | |
""" | |
1) Search all companies (in parallel) -> 2) Load results -> 3) Show sentiment analysis stats | |
Return a combined report with all three steps. | |
""" | |
# 1) Search all companies => store to DB | |
search_result_text = search_all_companies() | |
# 2) Load all results => from DB | |
load_result_text = load_all_companies() | |
# 3) Show stats => EarnBOT Analysis Report | |
stats_text = show_stats() | |
combined_report = ( | |
"# Full Analysis Summary Report\n\n" | |
"Executed in the following order:\n" | |
"1. Search all companies (parallel) + sentiment analysis => 2. Load results from DB => 3. Show overall sentiment analysis stats\n\n" | |
f"{search_result_text}\n\n" | |
f"{load_result_text}\n\n" | |
"## [Overall Sentiment Analysis Stats]\n\n" | |
f"{stats_text}" | |
) | |
return combined_report | |
######################################## | |
# Additional feature: User custom search | |
######################################## | |
def search_custom(query, country): | |
""" | |
For a user-provided (query, country): | |
1) Search + sentiment analysis => save to DB | |
2) Load from DB => display articles + analysis | |
""" | |
error_message, articles = serphouse_search(query, country) | |
if error_message: | |
return f"An error occurred: {error_message}" | |
if not articles: | |
return "No results were found for your query." | |
# 1) Perform analysis | |
analysis = analyze_sentiment_batch(articles, client) | |
# 2) Save to DB | |
save_data = { | |
"articles": articles, | |
"analysis": analysis | |
} | |
save_to_db(query, country, save_data) | |
# 3) Reload from DB | |
loaded_data, timestamp = load_from_db(query, country) | |
if not loaded_data: | |
return "Failed to load data from DB." | |
# 4) Prepare final output | |
out = f"## [Custom Search Results]\n\n" | |
out += f"**Keyword**: {query}\n\n" | |
out += f"**Country**: {country}\n\n" | |
out += f"**Timestamp**: {timestamp}\n\n" | |
arts = loaded_data.get("articles", []) | |
analy = loaded_data.get("analysis", "") | |
out += display_results(arts) | |
out += f"### News Sentiment Analysis\n{analy}\n" | |
return out | |
######################################## | |
# API Authentication | |
######################################## | |
ACCESS_TOKEN = os.getenv("HF_TOKEN") | |
if not ACCESS_TOKEN: | |
raise ValueError("HF_TOKEN environment variable is not set") | |
client = OpenAI( | |
base_url="https://api-inference.huggingface.co/v1/", | |
api_key=ACCESS_TOKEN, | |
) | |
API_KEY = os.getenv("SERPHOUSE_API_KEY") | |
######################################## | |
# Country-specific settings | |
######################################## | |
COUNTRY_LANGUAGES = { | |
"United States": "en", | |
"KOREA": "ko", | |
"United Kingdom": "en", | |
"Taiwan": "zh-TW", | |
"Canada": "en", | |
"Australia": "en", | |
"Germany": "de", | |
"France": "fr", | |
"Japan": "ja", | |
"China": "zh", | |
"India": "hi", | |
"Brazil": "pt", | |
"Mexico": "es", | |
"Russia": "ru", | |
"Italy": "it", | |
"Spain": "es", | |
"Netherlands": "nl", | |
"Singapore": "en", | |
"Hong Kong": "zh-HK", | |
"Indonesia": "id", | |
"Malaysia": "ms", | |
"Philippines": "tl", | |
"Thailand": "th", | |
"Vietnam": "vi", | |
"Belgium": "nl", | |
"Denmark": "da", | |
"Finland": "fi", | |
"Ireland": "en", | |
"Norway": "no", | |
"Poland": "pl", | |
"Sweden": "sv", | |
"Switzerland": "de", | |
"Austria": "de", | |
"Czech Republic": "cs", | |
"Greece": "el", | |
"Hungary": "hu", | |
"Portugal": "pt", | |
"Romania": "ro", | |
"Turkey": "tr", | |
"Israel": "he", | |
"Saudi Arabia": "ar", | |
"United Arab Emirates": "ar", | |
"South Africa": "en", | |
"Argentina": "es", | |
"Chile": "es", | |
"Colombia": "es", | |
"Peru": "es", | |
"Venezuela": "es", | |
"New Zealand": "en", | |
"Bangladesh": "bn", | |
"Pakistan": "ur", | |
"Egypt": "ar", | |
"Morocco": "ar", | |
"Nigeria": "en", | |
"Kenya": "sw", | |
"Ukraine": "uk", | |
"Croatia": "hr", | |
"Slovakia": "sk", | |
"Bulgaria": "bg", | |
"Serbia": "sr", | |
"Estonia": "et", | |
"Latvia": "lv", | |
"Lithuania": "lt", | |
"Slovenia": "sl", | |
"Luxembourg": "Luxembourg", | |
"Malta": "Malta", | |
"Cyprus": "Cyprus", | |
"Iceland": "Iceland" | |
} | |
COUNTRY_LOCATIONS = { | |
"United States": "United States", | |
"KOREA": "kr", | |
"United Kingdom": "United Kingdom", | |
"Taiwan": "Taiwan", | |
"Canada": "Canada", | |
"Australia": "Australia", | |
"Germany": "Germany", | |
"France": "France", | |
"Japan": "Japan", | |
"China": "China", | |
"India": "India", | |
"Brazil": "Brazil", | |
"Mexico": "Mexico", | |
"Russia": "Russia", | |
"Italy": "Italy", | |
"Spain": "Spain", | |
"Netherlands": "Netherlands", | |
"Singapore": "Singapore", | |
"Hong Kong": "Hong Kong", | |
"Indonesia": "Indonesia", | |
"Malaysia": "Malaysia", | |
"Philippines": "Philippines", | |
"Thailand": "Thailand", | |
"Vietnam": "Vietnam", | |
"Belgium": "Belgium", | |
"Denmark": "Denmark", | |
"Finland": "Finland", | |
"Ireland": "Ireland", | |
"Norway": "Norway", | |
"Poland": "Poland", | |
"Sweden": "Sweden", | |
"Switzerland": "Switzerland", | |
"Austria": "Austria", | |
"Czech Republic": "Czech Republic", | |
"Greece": "Greece", | |
"Hungary": "Hungary", | |
"Portugal": "Portugal", | |
"Romania": "Romania", | |
"Turkey": "Turkey", | |
"Israel": "Israel", | |
"Saudi Arabia": "Saudi Arabia", | |
"United Arab Emirates": "United Arab Emirates", | |
"South Africa": "South Africa", | |
"Argentina": "Argentina", | |
"Chile": "Chile", | |
"Colombia": "Colombia", | |
"Peru": "Peru", | |
"Venezuela": "Venezuela", | |
"New Zealand": "New Zealand", | |
"Bangladesh": "Bangladesh", | |
"Pakistan": "Pakistan", | |
"Egypt": "Egypt", | |
"Morocco": "Morocco", | |
"Nigeria": "Nigeria", | |
"Kenya": "Kenya", | |
"Ukraine": "Ukraine", | |
"Croatia": "Croatia", | |
"Slovakia": "Slovakia", | |
"Bulgaria": "Bulgaria", | |
"Serbia": "Serbia", | |
"Estonia": "et", | |
"Latvia": "lv", | |
"Lithuania": "lt", | |
"Slovenia": "sl", | |
"Luxembourg": "Luxembourg", | |
"Malta": "Malta", | |
"Cyprus": "Cyprus", | |
"Iceland": "Iceland" | |
} | |
def translate_query(query, country): | |
""" | |
Use the unofficial Google Translation API to translate the query into the target country's language. | |
If the query is already in English, or if translation fails, return the original query. | |
""" | |
try: | |
if is_english(query): | |
return query | |
if country in COUNTRY_LANGUAGES: | |
if country == "South Korea": | |
return query | |
target_lang = COUNTRY_LANGUAGES[country] | |
url = "https://translate.googleapis.com/translate_a/single" | |
params = { | |
"client": "gtx", | |
"sl": "auto", | |
"tl": target_lang, | |
"dt": "t", | |
"q": query | |
} | |
session = requests.Session() | |
retries = Retry(total=3, backoff_factor=0.5) | |
session.mount('https://', HTTPAdapter(max_retries=retries)) | |
response = session.get(url, params=params, timeout=(5, 10)) | |
translated_text = response.json()[0][0][0] | |
return translated_text | |
return query | |
except Exception as e: | |
print(f"Translation error: {str(e)}") | |
return query | |
def is_english(text): | |
""" | |
Check if a string is (mostly) English by verifying character code ranges. | |
""" | |
return all(ord(char) < 128 for char in text.replace(' ', '').replace('-', '').replace('_', '')) | |
def search_serphouse(query, country, page=1, num_result=10): | |
""" | |
Send a real-time search request to the SerpHouse API, | |
specifying the 'news' tab (sort_by=date) for the given query. | |
Returns a dict with 'results' or 'error'. | |
""" | |
url = "https://api.serphouse.com/serp/live" | |
now = datetime.utcnow() | |
yesterday = now - timedelta(days=1) | |
date_range = f"{yesterday.strftime('%Y-%m-%d')},{now.strftime('%Y-%m-%d')}" | |
translated_query = translate_query(query, country) | |
payload = { | |
"data": { | |
"q": translated_query, | |
"domain": "google.com", | |
"loc": COUNTRY_LOCATIONS.get(country, "United States"), | |
"lang": COUNTRY_LANGUAGES.get(country, "en"), | |
"device": "desktop", | |
"serp_type": "news", | |
"page": str(page), | |
"num": "100", | |
"date_range": date_range, | |
"sort_by": "date" | |
} | |
} | |
headers = { | |
"accept": "application/json", | |
"content-type": "application/json", | |
"authorization": f"Bearer {API_KEY}" | |
} | |
try: | |
session = requests.Session() | |
retries = Retry( | |
total=5, | |
backoff_factor=1, | |
status_forcelist=[500, 502, 503, 504, 429], | |
allowed_methods=["POST"] | |
) | |
adapter = HTTPAdapter(max_retries=retries) | |
session.mount('http://', adapter) | |
session.mount('https://', adapter) | |
response = session.post( | |
url, | |
json=payload, | |
headers=headers, | |
timeout=(30, 30) | |
) | |
response.raise_for_status() | |
return {"results": response.json(), "translated_query": translated_query} | |
except requests.exceptions.Timeout: | |
return { | |
"error": "Search timed out. Please try again later.", | |
"translated_query": query | |
} | |
except requests.exceptions.RequestException as e: | |
return { | |
"error": f"Error during search: {str(e)}", | |
"translated_query": query | |
} | |
except Exception as e: | |
return { | |
"error": f"Unexpected error occurred: {str(e)}", | |
"translated_query": query | |
} | |
def format_results_from_raw(response_data): | |
""" | |
Process the SerpHouse API response data and return (error_message, article_list). | |
""" | |
if "error" in response_data: | |
return "Error: " + response_data["error"], [] | |
try: | |
results = response_data["results"] | |
translated_query = response_data["translated_query"] | |
news_results = results.get('results', {}).get('results', {}).get('news', []) | |
if not news_results: | |
return "No search results found.", [] | |
# Filter out Korean domains and Korean keywords (example filtering) | |
korean_domains = [ | |
'.kr', 'korea', 'korean', 'yonhap', 'hankyung', 'chosun', | |
'donga', 'joins', 'hani', 'koreatimes', 'koreaherald' | |
] | |
korean_keywords = [ | |
'korea', 'korean', 'seoul', 'busan', 'incheon', 'daegu', | |
'gwangju', 'daejeon', 'ulsan', 'sejong' | |
] | |
filtered_articles = [] | |
for idx, result in enumerate(news_results, 1): | |
url = result.get("url", result.get("link", "")).lower() | |
title = result.get("title", "").lower() | |
channel = result.get("channel", result.get("source", "")).lower() | |
is_korean_content = ( | |
any(domain in url or domain in channel for domain in korean_domains) or | |
any(keyword in title for keyword in korean_keywords) | |
) | |
# Exclude Korean content | |
if not is_korean_content: | |
filtered_articles.append({ | |
"index": idx, | |
"title": result.get("title", "No Title"), | |
"link": url, | |
"snippet": result.get("snippet", "No Content"), | |
"channel": result.get("channel", result.get("source", "Unknown")), | |
"time": result.get("time", result.get("date", "Unknown Time")), | |
"image_url": result.get("img", result.get("thumbnail", "")), | |
"translated_query": translated_query | |
}) | |
return "", filtered_articles | |
except Exception as e: | |
return f"Error processing results: {str(e)}", [] | |
def serphouse_search(query, country): | |
""" | |
Helper function to search and then format results. | |
Returns (error_message, article_list). | |
""" | |
response_data = search_serphouse(query, country) | |
return format_results_from_raw(response_data) | |
# Refined, modern, and sleek custom CSS | |
css = """ | |
body { | |
background: linear-gradient(to bottom right, #f9fafb, #ffffff); | |
font-family: 'Arial', sans-serif; | |
} | |
/* Hide default Gradio footer */ | |
footer { | |
visibility: hidden; | |
} | |
/* Header/Status area */ | |
#status_area { | |
background: rgba(255, 255, 255, 0.9); | |
padding: 15px; | |
border-bottom: 1px solid #ddd; | |
margin-bottom: 20px; | |
box-shadow: 0 2px 5px rgba(0,0,0,0.1); | |
} | |
/* Results area */ | |
#results_area { | |
padding: 10px; | |
margin-top: 10px; | |
} | |
/* Tabs style */ | |
.tabs { | |
border-bottom: 2px solid #ddd !important; | |
margin-bottom: 20px !important; | |
} | |
.tab-nav { | |
border-bottom: none !important; | |
margin-bottom: 0 !important; | |
} | |
.tab-nav button { | |
font-weight: bold !important; | |
padding: 10px 20px !important; | |
background-color: #f0f0f0 !important; | |
border: 1px solid #ccc !important; | |
border-radius: 5px !important; | |
margin-right: 5px !important; | |
} | |
.tab-nav button.selected { | |
border-bottom: 2px solid #1f77b4 !important; | |
background-color: #e6f2fa !important; | |
color: #1f77b4 !important; | |
} | |
/* Status message styling */ | |
#status_area .markdown-text { | |
font-size: 1.1em; | |
color: #2c3e50; | |
padding: 10px 0; | |
} | |
/* Main container grouping */ | |
.group { | |
border: 1px solid #eee; | |
padding: 15px; | |
margin-bottom: 15px; | |
border-radius: 5px; | |
background: white; | |
transition: all 0.3s ease; | |
opacity: 0; | |
transform: translateY(20px); | |
} | |
.group.visible { | |
opacity: 1; | |
transform: translateY(0); | |
} | |
/* Buttons */ | |
.primary-btn { | |
background: #1f77b4 !important; | |
border: none !important; | |
color: #fff !important; | |
border-radius: 5px !important; | |
padding: 10px 20px !important; | |
cursor: pointer !important; | |
} | |
.primary-btn:hover { | |
background: #155a8c !important; | |
} | |
.secondary-btn { | |
background: #f0f0f0 !important; | |
border: 1px solid #ccc !important; | |
color: #333 !important; | |
border-radius: 5px !important; | |
padding: 10px 20px !important; | |
cursor: pointer !important; | |
} | |
.secondary-btn:hover { | |
background: #e0e0e0 !important; | |
} | |
/* Input fields */ | |
.textbox { | |
border: 1px solid #ddd !important; | |
border-radius: 4px !important; | |
} | |
/* Progress bar container */ | |
.progress-container { | |
position: fixed; | |
top: 0; | |
left: 0; | |
width: 100%; | |
height: 6px; | |
background: #e0e0e0; | |
z-index: 1000; | |
} | |
/* Progress bar */ | |
.progress-bar { | |
height: 100%; | |
background: linear-gradient(90deg, #2196F3, #00BCD4); | |
box-shadow: 0 0 10px rgba(33, 150, 243, 0.5); | |
transition: width 0.3s ease; | |
animation: progress-glow 1.5s ease-in-out infinite; | |
} | |
/* Progress text */ | |
.progress-text { | |
position: fixed; | |
top: 8px; | |
left: 50%; | |
transform: translateX(-50%); | |
background: #333; | |
color: white; | |
padding: 4px 12px; | |
border-radius: 15px; | |
font-size: 14px; | |
z-index: 1001; | |
box-shadow: 0 2px 5px rgba(0,0,0,0.2); | |
} | |
/* Progress bar animation */ | |
@keyframes progress-glow { | |
0% { | |
box-shadow: 0 0 5px rgba(33, 150, 243, 0.5); | |
} | |
50% { | |
box-shadow: 0 0 20px rgba(33, 150, 243, 0.8); | |
} | |
100% { | |
box-shadow: 0 0 5px rgba(33, 150, 243, 0.5); | |
} | |
} | |
/* Loading state */ | |
.loading { | |
opacity: 0.7; | |
pointer-events: none; | |
transition: opacity 0.3s ease; | |
} | |
/* Responsive design for smaller screens */ | |
@media (max-width: 768px) { | |
.group { | |
padding: 10px; | |
margin-bottom: 15px; | |
} | |
.progress-text { | |
font-size: 12px; | |
padding: 3px 10px; | |
} | |
} | |
/* Example section styling */ | |
.examples-table { | |
margin-top: 10px !important; | |
margin-bottom: 20px !important; | |
} | |
.examples-table button { | |
background-color: #f0f0f0 !important; | |
border: 1px solid #ddd !important; | |
border-radius: 4px !important; | |
padding: 5px 10px !important; | |
margin: 2px !important; | |
transition: all 0.3s ease !important; | |
} | |
.examples-table button:hover { | |
background-color: #e0e0e0 !important; | |
transform: translateY(-1px) !important; | |
box-shadow: 0 2px 5px rgba(0,0,0,0.1) !important; | |
} | |
.examples-table .label { | |
font-weight: bold !important; | |
color: #444 !important; | |
margin-bottom: 5px !important; | |
} | |
""" | |
# --- Gradio Interface (UI portion only) --- | |
with gr.Blocks(css=css, title="NewsAI Service") as iface: | |
# Initialize the database first (keeping the call to init_db(), unchanged) | |
init_db() | |
with gr.Tabs(): | |
with gr.Tab("MoneyRadar"): | |
# Added usage instructions and feature explanations here: | |
gr.Markdown( | |
""" | |
## MoneyRadar: Implies scanning the market to spot money-making opportunities. | |
**How to Use This Service**: | |
1. **Custom Search**: Enter any keyword and choose a target country to fetch the latest news. The system automatically performs sentiment analysis and stores results in the database. | |
2. **Generate Full Analysis Summary Report**: This will automatically: | |
- Search all predefined companies (in parallel), | |
- Store the articles and sentiment analysis, | |
- Display a combined overall report. | |
3. **Individual Companies**: | |
- **Search**: Fetch and analyze the latest news from Google (for the chosen company). | |
- **Load from DB**: Retrieve the most recent saved news and sentiment analysis from the local database. | |
**Features**: | |
- **Real-time News Scraping**: Retrieves fresh articles from multiple regions. | |
- **Advanced Sentiment Analysis**: Uses state-of-the-art NLP models via the OpenAI API. | |
- **Data Persistence**: Automatically saves and retrieves search results in a local SQLite database for quick reference. | |
- **Flexible**: Ability to search any keyword/country or select from predefined Big Tech & finance-related terms. | |
--- | |
""" | |
) | |
# User custom search section | |
with gr.Group(): | |
gr.Markdown("### Custom Search") | |
with gr.Row(): | |
with gr.Column(): | |
user_input = gr.Textbox( | |
label="Enter your keyword", | |
placeholder="e.g., Apple, Samsung, etc.", | |
elem_classes="textbox" | |
) | |
with gr.Column(): | |
country_selection = gr.Dropdown( | |
choices=list(COUNTRY_LOCATIONS.keys()), | |
value="United States", | |
label="Select Country" | |
) | |
with gr.Column(): | |
custom_search_btn = gr.Button( | |
"Search", | |
variant="primary", | |
elem_classes="primary-btn" | |
) | |
custom_search_output = gr.Markdown() | |
custom_search_btn.click( | |
fn=search_custom, | |
inputs=[user_input, country_selection], | |
outputs=custom_search_output | |
) | |
# Button to generate a full report | |
with gr.Row(): | |
full_report_btn = gr.Button( | |
"Generate Full Analysis Summary Report", | |
variant="primary", | |
elem_classes="primary-btn" | |
) | |
full_report_display = gr.Markdown() | |
full_report_btn.click( | |
fn=full_summary_report, | |
outputs=full_report_display | |
) | |
# Individual search/load for companies in KOREAN_COMPANIES | |
with gr.Column(): | |
for i in range(0, len(KOREAN_COMPANIES), 2): | |
with gr.Row(): | |
# Left column | |
with gr.Column(): | |
company = KOREAN_COMPANIES[i] | |
with gr.Group(): | |
gr.Markdown(f"### {company}") | |
with gr.Row(): | |
search_btn = gr.Button( | |
"Search", | |
variant="primary", | |
elem_classes="primary-btn" | |
) | |
load_btn = gr.Button( | |
"Load from DB", | |
variant="secondary", | |
elem_classes="secondary-btn" | |
) | |
result_display = gr.Markdown() | |
search_btn.click( | |
fn=lambda c=company: search_company(c), | |
outputs=result_display | |
) | |
load_btn.click( | |
fn=lambda c=company: load_company(c), | |
outputs=result_display | |
) | |
# Right column (if exists) | |
if i + 1 < len(KOREAN_COMPANIES): | |
with gr.Column(): | |
company = KOREAN_COMPANIES[i + 1] | |
with gr.Group(): | |
gr.Markdown(f"### {company}") | |
with gr.Row(): | |
search_btn = gr.Button( | |
"Search", | |
variant="primary", | |
elem_classes="primary-btn" | |
) | |
load_btn = gr.Button( | |
"Load from DB", | |
variant="secondary", | |
elem_classes="secondary-btn" | |
) | |
result_display = gr.Markdown() | |
search_btn.click( | |
fn=lambda c=company: search_company(c), | |
outputs=result_display | |
) | |
load_btn.click( | |
fn=lambda c=company: load_company(c), | |
outputs=result_display | |
) | |
# Launch the Gradio interface | |
iface.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=True, | |
ssl_verify=False, | |
show_error=True | |
) | |