Spaces:
Running
Running
import gradio as gr | |
import random | |
import time | |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler | |
from langchain.chat_models import ChatOpenAI | |
from langchain.llms import OpenAI | |
import os | |
import json | |
import openai | |
import random | |
import asyncio | |
from langchain.prompts import ( | |
ChatPromptTemplate, | |
HumanMessagePromptTemplate | |
) | |
from ast import literal_eval | |
from langchain.chat_models import ChatOpenAI | |
from langchain.schema import ( | |
HumanMessage, | |
) | |
import json | |
from langchain.chat_models import ChatOpenAI | |
from langchain.llms import OpenAI | |
from langchain.chat_models import ChatAnthropic | |
from langchain.output_parsers import PydanticOutputParser | |
from pydantic import BaseModel, Field | |
from typing import List | |
import json | |
from ast import literal_eval | |
import os | |
import openai | |
import random | |
import time | |
import copy | |
import asyncio | |
from prompts import system_structure_template, system_epics_template, story_cards_template, schema_template, entities_template, update_schema_template, check_message_template, update_story_cards_template | |
import helper | |
myl = [] | |
os.environ['OPENAI_API_KEY'] = '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' | |
openai.api_key = "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" | |
epics = [] | |
latest_nums = [] | |
# Deleting all the files after each update | |
for file in os.listdir(): | |
if file.endswith('_gradio.json'): | |
helper.delete(file) | |
async def random_response(message, history, my_list=myl): | |
print(message) | |
if helper.Validating_Input(message): # Check if the question about gpt | |
return helper.apologise(random.randint(0,3)) | |
if not helper.is_all_english_alpha_numeric_or_space(message): # Check if the non english text sent | |
return helper.apologise_lang(random.randint(0,3)) | |
# ============================================================================================ | |
if message.isnumeric(): | |
latest_nums.append((int(message)+1)) | |
if int(message)==1: | |
return helper.create_user_story() | |
elif int(message)==2: | |
latest_nums.append(-1) | |
return "Sorry, this option will be available in the next version!" + helper.choose() | |
else: | |
latest_nums.append(-1) | |
return 'return a valid number please' | |
if len(myl) == 0: | |
latest_nums.append(1) | |
myl.append(1) | |
return "Welcome to فزاع \n\n" + "Please write your systm discreption in order to build a system" | |
else: | |
if latest_nums[-1] not in [1,2,3]: | |
latest_nums.pop() | |
return 'return a valid number. ' + helper.choose() | |
if latest_nums[-1] == 1: | |
myl.append(1) | |
# First Prompt System Structure | |
start_time = time.time() | |
response = helper.first_prompt(system_name = message) | |
elapsed_time = time.time() - start_time | |
print(f"first_prompt {elapsed_time:.2f} seconds to run.") | |
# Second Prompt Epics | |
start_time = time.time() | |
epics = await helper.second_prompt(response, system_name = message) | |
elapsed_time = time.time() - start_time | |
print(f"epics {elapsed_time:.2f} seconds to run.") | |
# Third Prompt Entities | |
start_time = time.time() | |
entities = helper.third_prompt(epics) | |
elapsed_time = time.time() - start_time | |
print(f"entities {elapsed_time:.2f} seconds to run.") | |
# Fourth Prompt Data Schema | |
start_time = time.time() | |
all_schemas = await helper.fourth_prompt(entities, epics) | |
elapsed_time = time.time() - start_time | |
print(f"all_schemas {elapsed_time:.2f} seconds to run.") | |
# return markdown | |
output = helper.markdown('epics_gradio.json') + helper.choose() | |
latest_nums.append(-1) | |
return output | |
if latest_nums[-1] == 2: | |
# res = check('user story card', message).lower() | |
# print(res) | |
# if res == 'no': | |
# return apologise(random.randint(0,3))+"\n\n"+create_user_story() | |
start_time = time.time() | |
helper.affected_parts(user_request = message) | |
elapsed_time = time.time() - start_time | |
print(f"affected_parts {elapsed_time:.2f} seconds to run.") | |
output = "" | |
start_time = time.time() | |
new_card = helper.generate_story_cards(user_request = message) | |
elapsed_time = time.time() - start_time | |
print(f"generate_story_cards {elapsed_time:.2f} seconds to run.") | |
start_time = time.time() | |
output += helper.user_story_to_markdown(new_card) + "\n\n\n" | |
elapsed_time = time.time() - start_time | |
print(f"user_story_to_markdown {elapsed_time:.2f} seconds to run.") | |
start_time = time.time() | |
story_ids = helper.check_affected_story_ids() | |
elapsed_time = time.time() - start_time | |
print(f"check_affected_story_ids {elapsed_time:.2f} seconds to run.") | |
start_time = time.time() | |
if len(story_ids) != 0: | |
updated_cards = await helper.update_affected_story_cards(story_ids) | |
output += "=============================== The following is the affected user story card ==================================\n\n" | |
for card in updated_cards: | |
output += helper.user_story_to_markdown(card) | |
elapsed_time = time.time() - start_time | |
print(f"update_affected_story_cards {elapsed_time:.2f} seconds to run.") | |
output += helper.choose() | |
latest_nums.append(-1) | |
return output | |