Spaces:
Running
on
Zero
Running
on
Zero
test
Browse files- mysite/interpreter/prompt.py +57 -0
- mysite/routers/fastapi.py +3 -2
mysite/interpreter/prompt.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from langchain_core.prompts import (
|
5 |
+
ChatPromptTemplate,
|
6 |
+
HumanMessagePromptTemplate,
|
7 |
+
MessagesPlaceholder,
|
8 |
+
)
|
9 |
+
from langchain_core.messages import SystemMessage
|
10 |
+
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
11 |
+
from langchain_groq import ChatGroq
|
12 |
+
|
13 |
+
|
14 |
+
def prompt_genalate(word):
|
15 |
+
# Get Groq API key
|
16 |
+
groq_api_key = os.getenv("api_key")
|
17 |
+
groq_chat = ChatGroq(groq_api_key=groq_api_key, model_name="llama3-70b-8192")
|
18 |
+
|
19 |
+
system_prompt = "あなたはプロンプト作成の優秀なアシスタントです。答えは日本語で答えます"
|
20 |
+
conversational_memory_length = 50
|
21 |
+
|
22 |
+
memory = ConversationBufferWindowMemory(
|
23 |
+
k=conversational_memory_length, memory_key="chat_history", return_messages=True
|
24 |
+
)
|
25 |
+
|
26 |
+
#while True:
|
27 |
+
user_question = word#input("質問を入力してください: ")
|
28 |
+
|
29 |
+
if user_question.lower() == "exit":
|
30 |
+
print("Goodbye!")
|
31 |
+
break
|
32 |
+
|
33 |
+
if user_question:
|
34 |
+
# Construct a chat prompt template using various components
|
35 |
+
prompt = ChatPromptTemplate.from_messages(
|
36 |
+
[
|
37 |
+
# 毎回必ず含まれるSystemプロンプトを追加
|
38 |
+
SystemMessage(content=system_prompt),
|
39 |
+
# ConversationBufferWindowMemoryをプロンプトに追加
|
40 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
41 |
+
# ユーザーの入力をプロンプトに追加
|
42 |
+
HumanMessagePromptTemplate.from_template("{human_input}"),
|
43 |
+
]
|
44 |
+
)
|
45 |
+
|
46 |
+
conversation = LLMChain(
|
47 |
+
llm=groq_chat,
|
48 |
+
prompt=prompt,
|
49 |
+
verbose=False,
|
50 |
+
memory=memory,
|
51 |
+
)
|
52 |
+
response = conversation.predict(human_input=user_question)
|
53 |
+
|
54 |
+
print("User: ", user_question)
|
55 |
+
print("Assistant:", response)
|
56 |
+
|
57 |
+
return user_question+"[役割]"+rsponse
|
mysite/routers/fastapi.py
CHANGED
@@ -11,7 +11,7 @@ import pkgutil
|
|
11 |
from mysite.libs.utilities import validate_signature, no_process_file
|
12 |
#from mysite.database.database import ride,create_ride
|
13 |
from controllers.gra_04_database.rides import test_set_lide
|
14 |
-
|
15 |
|
16 |
logger = logging.getLogger(__name__)
|
17 |
|
@@ -92,10 +92,11 @@ def setup_webhook_routes(app: FastAPI):
|
|
92 |
logger.info("------------------------------------------")
|
93 |
first_line = text.split('\n')[0]
|
94 |
logger.info(f"User ID: {user_id}, Text: {text}")
|
|
|
95 |
#test_set_lide(text,"a1")
|
96 |
#no_process_file(text, "ai")
|
97 |
title = """本番テスト 入力内容のみ設定 データ精査をしないと返信には使えない """
|
98 |
-
subtitle =
|
99 |
link_text = "test"
|
100 |
link_url = "url"
|
101 |
|
|
|
11 |
from mysite.libs.utilities import validate_signature, no_process_file
|
12 |
#from mysite.database.database import ride,create_ride
|
13 |
from controllers.gra_04_database.rides import test_set_lide
|
14 |
+
from mysite.interpreter.prompt import prompt_genalate
|
15 |
|
16 |
logger = logging.getLogger(__name__)
|
17 |
|
|
|
92 |
logger.info("------------------------------------------")
|
93 |
first_line = text.split('\n')[0]
|
94 |
logger.info(f"User ID: {user_id}, Text: {text}")
|
95 |
+
promps = prompt_genalate(text)
|
96 |
#test_set_lide(text,"a1")
|
97 |
#no_process_file(text, "ai")
|
98 |
title = """本番テスト 入力内容のみ設定 データ精査をしないと返信には使えない """
|
99 |
+
subtitle = promps
|
100 |
link_text = "test"
|
101 |
link_url = "url"
|
102 |
|