Spaces:
Runtime error
Runtime error
import gradio as gr | |
from bs4 import BeautifulSoup | |
import requests | |
from acogsphere import acf | |
from bcogsphere import bcf | |
from ecogsphere import ecf | |
import pandas as pd | |
import math | |
import json | |
import sqlite3 | |
import huggingface_hub | |
#import pandas as pd | |
import shutil | |
import os | |
import datetime | |
from apscheduler.schedulers.background import BackgroundScheduler | |
import random | |
import time | |
#import requests | |
from huggingface_hub import hf_hub_download | |
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./reviews.csv") | |
from huggingface_hub import login | |
from datasets import load_dataset | |
#dataset = load_dataset("csv", data_files="./data.csv") | |
DB_FILE = "./reviewsitr.db" | |
TOKEN = os.environ.get('HF_KEYY') | |
repo = huggingface_hub.Repository( | |
local_dir="data", | |
repo_type="dataset", | |
clone_from="CognitiveScience/csdhdata", | |
use_auth_token=TOKEN | |
) | |
repo.git_pull() | |
#TOKEN2 = HF_TOKEN | |
#login(token=TOKEN2) | |
# Set db to latest | |
#shutil.copyfile("./reviews2.db", DB_FILE) | |
# Create table if it doesn't already exist | |
db = sqlite3.connect(DB_FILE) | |
try: | |
db.execute("SELECT * FROM reviews").fetchall() | |
#db.execute("SELECT * FROM reviews2").fetchall() | |
db.close() | |
except sqlite3.OperationalError: | |
db.execute( | |
''' | |
CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, | |
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, | |
name TEXT, view TEXT, duration TEXT) | |
''') | |
db.commit() | |
db.close() | |
db = sqlite3.connect(DB_FILE) | |
def get_latest_reviews(db: sqlite3.Connection): | |
reviews = db.execute("SELECT * FROM reviews ORDER BY id DESC limit 100").fetchall() | |
total_reviews = db.execute("Select COUNT(id) from reviews").fetchone()[0] | |
reviews = pd.DataFrame(reviews, columns=["id", "date_created", "name", "view", "duration"]) | |
return reviews, total_reviews | |
def get_latest_reviews2(db: sqlite3.Connection): | |
reviews2 = db.execute("SELECT * FROM reviews2 ORDER BY id DESC limit 100").fetchall() | |
total_reviews2 = db.execute("Select COUNT(id) from reviews2").fetchone()[0] | |
reviews2 = pd.DataFrame(reviews2, columns=["id","title", "link","channel", "description", "views", "uploaded", "duration", "durationString"]) | |
return reviews2, total_reviews2 | |
def ccogsphere(name: str, rate: int, celsci: str): | |
db = sqlite3.connect(DB_FILE) | |
cursor = db.cursor() | |
#try: | |
celsci2=celsci.split() | |
print("split",celsci2,celsci) | |
celsci2=celsci2[0] + "+" + celsci2[1] | |
celsci2=ecf(celsci2) | |
df=pd.DataFrame.from_dict(celsci2["videos"]) | |
celsci2=json.dumps(celsci2["videos"]) | |
for index, row in df.iterrows(): | |
view = str(row["views"]) | |
duration = str(row["duration"]) | |
print(view, duration) | |
#celsci=celsci+celsci2 | |
cursor.execute("INSERT INTO reviews(name, view, duration) VALUES(?,?,?)", [celsci+str(index+1), view, duration]) | |
db.commit() | |
reviews, total_reviews = get_latest_reviews(db) | |
db.close() | |
r = requests.post(url='https://ccml-persistent-data2.hf.space/api/predict/', json={"data": [celsci + " ", celsci2]}) | |
return reviews, total_reviews | |
def run_actr(): | |
from python_actr import log_everything | |
#code1="tim = MyAgent()" | |
#code2="subway=MyEnv()" | |
#code3="subway.agent=tim" | |
#code4="log_everything(subway)"] | |
from dcogsphere import RockPaperScissors | |
from dcogsphere import ProceduralPlayer | |
#from dcogsphere import logy | |
env=RockPaperScissors() | |
env.model1=ProceduralPlayer() | |
env.model1.choice=env.choice1 | |
env.model2=ProceduralPlayer() | |
env.model2.choice=env.choice2 | |
env.run() | |
def run_ecs(inp): | |
try: | |
result=ecf(inp) | |
df=pd.DataFrame.from_dict(result["videos"]) | |
except sqlite3.OperationalError: | |
print ("db error") | |
df=df.drop(df.columns[4], axis=1) | |
db = sqlite3.connect(DB_FILE) | |
#cursor = db.cursor() | |
#cursor.execute("INSERT INTO reviews2(title, link, thumbnail,channel, description, views, uploaded, duration, durationString) VALUES(?,?,?,?,?,?,?,?,?)", [title, link, thumbnail,channel, description, views, uploaded, duration, durationString]) | |
df.to_sql('reviews2', db, if_exists='replace', index=False) | |
#db.commit() | |
reviews2, total_reviews2 = get_latest_reviews(db) | |
db.close() | |
#print ("print000", total_reviews2,reviews2) | |
return reviews2, total_reviews2 | |
def load_data(): | |
db = sqlite3.connect(DB_FILE) | |
reviews, total_reviews = get_latest_reviews(db) | |
db.close() | |
return reviews, total_reviews | |
def load_data2(): | |
db = sqlite3.connect(DB_FILE) | |
reviews2, total_reviews2 = get_latest_reviews2(db) | |
db.close() | |
return reviews2, total_reviews2 | |
css="footer {visibility: hidden}" | |
# Applying style to highlight the maximum value in each row | |
#styler = df.style.highlight_max(color = 'lightgreen', axis = 0) | |
with gr.Blocks(css=css) as demo: | |
with gr.Row(): | |
with gr.Column(): | |
data = gr.Dataframe() #styler) | |
count = gr.Number(label="Rates!", visible=False) | |
with gr.Row(): | |
with gr.Column(): | |
name = gr.Textbox(label="a", visible=False) #, placeholder="What is your name?") | |
rate = gr.Textbox(label="b", visible=False) #, placeholder="What is your name?") #gr.Radio(label="How satisfied are you with using gradio?", choices=[1, 2, 3, 4, 5]) | |
celsci = gr.Textbox(label="c", visible=False) #, lines=10, placeholder="Do you have any feedback on gradio?") | |
#run_actr() | |
submit = gr.Button(value=".", visible=False) | |
submit.click(ccogsphere, [name, rate, celsci], [data, count]) | |
demo.load(load_data, None, [data, count]) | |
def secwork(name): | |
#if name=="abc": | |
#run_code() | |
load_data() | |
#return "Hello " + name + "!" | |
def backup_db(): | |
shutil.copyfile(DB_FILE, "./reviews01.db") | |
db = sqlite3.connect(DB_FILE) | |
reviews = db.execute("SELECT * FROM reviews").fetchall() | |
pd.DataFrame(reviews).to_csv("./reviews.csv", index=False) | |
print("updating db") | |
repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.datetime.now()}") | |
def backup_db_csv(): | |
shutil.copyfile(DB_FILE, "./reviews02.db") | |
db = sqlite3.connect(DB_FILE) | |
reviews = db.execute("SELECT * FROM reviews").fetchall() | |
pd.DataFrame(reviews).to_csv("./reviews2.csv", index=False) | |
print("updating db csv") | |
dataset = load_dataset("csv", data_files="./reviews2.csv") | |
repo.push_to_hub("CognitiveScience/csdhdata", blocking=False) #, commit_message=f"Updating data-csv at {datetime.datetime.now()}") | |
#path1=hf_hub_url() | |
#print (path1) | |
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.csv") | |
#hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.db") | |
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.md") | |
#hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.md") | |
#def load_data2(): | |
# db = sqlite3.connect(DB_FILE) | |
# reviews, total_reviews = get_latest_reviews(db) | |
# #db.close() | |
# demo.load(load_data,None, [reviews, total_reviews]) | |
# #return reviews, total_reviews | |
scheduler1 = BackgroundScheduler() | |
scheduler1.add_job(func=run_actr, trigger="interval", seconds=61000000000) | |
scheduler1.start() | |
scheduler1 = BackgroundScheduler() | |
scheduler1.add_job(func=load_data, trigger="interval", seconds=91000000000) | |
scheduler1.start() | |
scheduler2 = BackgroundScheduler() | |
scheduler2.add_job(func=backup_db, trigger="interval", seconds=131000000000) | |
scheduler2.start() | |
scheduler3 = BackgroundScheduler() | |
scheduler3.add_job(func=backup_db_csv, trigger="interval", seconds=161000000000) | |
scheduler3.start() | |
demo.launch() |