Spaces:
Runtime error
Runtime error
eaglelandsonce
commited on
Commit
•
d8dc0f7
1
Parent(s):
70f6c84
Upload 4 files
Browse files- crewai/tools/gemini_tools.py +66 -0
- crewai/tools/mixtral_tools.py +82 -0
- crewai/tools/phi2_tools.py +52 -0
- crewai/tools/zephyr_tools.py +86 -0
crewai/tools/gemini_tools.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools created using gemini
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
import google.generativeai as genai
|
7 |
+
from google.api_core import exceptions
|
8 |
+
|
9 |
+
# Retrieve API Key from Environment Variable
|
10 |
+
GOOGLE_AI_STUDIO = os.environ.get('GOOGLE_API_KEY')
|
11 |
+
|
12 |
+
# Ensure the API key is available
|
13 |
+
if not GOOGLE_AI_STUDIO:
|
14 |
+
raise ValueError("API key not found. Please set the GOOGLE_AI_STUDIO2 environment variable.")
|
15 |
+
|
16 |
+
import requests
|
17 |
+
from langchain.tools import tool
|
18 |
+
|
19 |
+
# Rest of your code remains the same
|
20 |
+
genai.configure(api_key=GOOGLE_AI_STUDIO)
|
21 |
+
model = genai.GenerativeModel('gemini-pro')
|
22 |
+
|
23 |
+
class GeminiSearchTools():
|
24 |
+
@tool("Gemini search the internet")
|
25 |
+
def gemini_search(query):
|
26 |
+
"""
|
27 |
+
Searches for content based on the provided query using the Gemini model.
|
28 |
+
Handles DeadlineExceeded exceptions from the Google API.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
query (str): The search query.
|
32 |
+
|
33 |
+
Returns:
|
34 |
+
str: The response text from the Gemini model or an error message.
|
35 |
+
"""
|
36 |
+
try:
|
37 |
+
response = model.generate_content(query)
|
38 |
+
return response.text
|
39 |
+
except exceptions.DeadlineExceeded as e:
|
40 |
+
# Handle the DeadlineExceeded exception here
|
41 |
+
print("Error: Deadline Exceeded -", str(e))
|
42 |
+
# You can return a custom message or take other appropriate actions
|
43 |
+
return "Error: The request timed out. Please try again later."
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
@tool("Gemini search news on the internet")
|
48 |
+
def gemini_search_news(query):
|
49 |
+
"""
|
50 |
+
Searches for content based on the provided query using the Gemini model.
|
51 |
+
Handles DeadlineExceeded exceptions from the Google API.
|
52 |
+
|
53 |
+
Args:
|
54 |
+
query (str): The search query.
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
str: The response text from the Gemini model or an error message.
|
58 |
+
"""
|
59 |
+
try:
|
60 |
+
response = model.generate_content(query)
|
61 |
+
return response.text
|
62 |
+
except exceptions.DeadlineExceeded as e:
|
63 |
+
# Handle the DeadlineExceeded exception here
|
64 |
+
print("Error: Deadline Exceeded -", str(e))
|
65 |
+
# You can return a custom message or take other appropriate actions
|
66 |
+
return "Error: The request timed out. Please try again later."
|
crewai/tools/mixtral_tools.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools created using Mixtral
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
from huggingface_hub import InferenceClient
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
client = InferenceClient(
|
10 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
11 |
+
)
|
12 |
+
|
13 |
+
# Helper Method
|
14 |
+
|
15 |
+
def format_prompt(message, history):
|
16 |
+
prompt = "<s>"
|
17 |
+
for user_prompt, bot_response in history:
|
18 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
19 |
+
prompt += f" {bot_response}</s> "
|
20 |
+
prompt += f"[INST] {message} [/INST]"
|
21 |
+
return prompt
|
22 |
+
|
23 |
+
|
24 |
+
import requests
|
25 |
+
from langchain.tools import tool
|
26 |
+
|
27 |
+
history = ""
|
28 |
+
|
29 |
+
class MixtralSearchTools():
|
30 |
+
@tool("Mixtral Normal")
|
31 |
+
def mixtral_normal(prompt, histroy="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
32 |
+
"""
|
33 |
+
Searches for content based on the provided query using the Mixtral model.
|
34 |
+
Args:
|
35 |
+
query (str): The search query.
|
36 |
+
Returns:
|
37 |
+
str: The response text from the Mixtral model or an error message.
|
38 |
+
"""
|
39 |
+
generate_kwargs = {
|
40 |
+
"temperature": temperature,
|
41 |
+
"max_new_tokens": max_new_tokens,
|
42 |
+
"top_p": top_p,
|
43 |
+
"repetition_penalty": repetition_penalty,
|
44 |
+
"do_sample": True,
|
45 |
+
"seed": 42,
|
46 |
+
}
|
47 |
+
|
48 |
+
formatted_prompt = format_prompt(prompt, history)
|
49 |
+
|
50 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
51 |
+
output = ""
|
52 |
+
for response in stream:
|
53 |
+
output += response.token.text
|
54 |
+
yield output
|
55 |
+
return output
|
56 |
+
|
57 |
+
|
58 |
+
@tool("Mixtral Crazy")
|
59 |
+
def mixtral_crazy(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
60 |
+
"""
|
61 |
+
Searches for content based on the provided query using the Mixtral model but has the gaurd rails removed,
|
62 |
+
and responses are crazy and off the wall and sometimes scary.
|
63 |
+
Args:
|
64 |
+
query (str): The search query.
|
65 |
+
Returns:
|
66 |
+
str: The response text from the Mixtral model or an error message.
|
67 |
+
"""
|
68 |
+
generate_kwargs = {
|
69 |
+
"temperature": temperature,
|
70 |
+
"max_new_tokens": max_new_tokens,
|
71 |
+
"top_p": top_p,
|
72 |
+
"repetition_penalty": repetition_penalty,
|
73 |
+
"do_sample": True,
|
74 |
+
"seed": 42,
|
75 |
+
}
|
76 |
+
|
77 |
+
stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
78 |
+
output = ""
|
79 |
+
for response in stream:
|
80 |
+
output += response.token.text
|
81 |
+
yield output
|
82 |
+
return output
|
crewai/tools/phi2_tools.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools created using Phi2
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
import requests
|
7 |
+
from langchain.tools import tool
|
8 |
+
|
9 |
+
import spaces
|
10 |
+
import torch
|
11 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
12 |
+
from threading import Thread
|
13 |
+
device = "cpu"
|
14 |
+
if torch.cuda.is_available():
|
15 |
+
device = "cuda"
|
16 |
+
if torch.backends.mps.is_available():
|
17 |
+
device = "mps"
|
18 |
+
|
19 |
+
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
|
21 |
+
model = AutoModelForCausalLM.from_pretrained(
|
22 |
+
"microsoft/phi-2",
|
23 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
24 |
+
trust_remote_code=True,
|
25 |
+
).to(device)
|
26 |
+
|
27 |
+
|
28 |
+
#@spaces.GPU(enable_queue=True)
|
29 |
+
class Phi2SearchTools():
|
30 |
+
@tool("Phi2 Normal")
|
31 |
+
def phi2_search(text, temperature=.75, maxLen=2048):
|
32 |
+
"""
|
33 |
+
Searches for content based on the provided query using the Gemini model.
|
34 |
+
Handles DeadlineExceeded exceptions from the Google API.
|
35 |
+
Args:
|
36 |
+
query (str): The search query.
|
37 |
+
Returns:
|
38 |
+
str: The response text from the Gemini model or an error message.
|
39 |
+
"""
|
40 |
+
inputs = tokenizer([text], return_tensors="pt").to(device)
|
41 |
+
streamer = TextIteratorStreamer(tokenizer)
|
42 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=maxLen, temperature=temperature)
|
43 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
44 |
+
thread.start()
|
45 |
+
t = ""
|
46 |
+
toks = 0
|
47 |
+
for out in streamer:
|
48 |
+
t += out
|
49 |
+
yield t
|
50 |
+
|
51 |
+
|
52 |
+
|
crewai/tools/zephyr_tools.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools created using Zephyr
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
from huggingface_hub import InferenceClient
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
client = InferenceClient(
|
10 |
+
"HuggingFaceH4/zephyr-7b-beta"
|
11 |
+
)
|
12 |
+
|
13 |
+
# Helper Method
|
14 |
+
|
15 |
+
def format_prompt(message, history):
|
16 |
+
prompt = "<s>"
|
17 |
+
for user_prompt, bot_response in history:
|
18 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
19 |
+
prompt += f" {bot_response}</s> "
|
20 |
+
prompt += f"[INST] {message} [/INST]"
|
21 |
+
return prompt
|
22 |
+
|
23 |
+
|
24 |
+
import requests
|
25 |
+
from langchain.tools import tool
|
26 |
+
|
27 |
+
history = ""
|
28 |
+
|
29 |
+
class ZephyrSearchTools():
|
30 |
+
@tool("Zephyr Normal")
|
31 |
+
def zephyr_normal(prompt, histroy="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
32 |
+
"""
|
33 |
+
Searches for content based on the provided query using the Zephyr model.
|
34 |
+
Args:
|
35 |
+
query (str): The search query.
|
36 |
+
Returns:
|
37 |
+
str: The response text from the Zephyr model or an error message.
|
38 |
+
"""
|
39 |
+
generate_kwargs = {
|
40 |
+
"temperature": temperature,
|
41 |
+
"max_new_tokens": max_new_tokens,
|
42 |
+
"top_p": top_p,
|
43 |
+
"repetition_penalty": repetition_penalty,
|
44 |
+
"do_sample": True,
|
45 |
+
"seed": 42,
|
46 |
+
}
|
47 |
+
|
48 |
+
formatted_prompt = format_prompt(prompt, history)
|
49 |
+
|
50 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
51 |
+
output = ""
|
52 |
+
for response in stream:
|
53 |
+
output += response.token.text
|
54 |
+
yield output
|
55 |
+
return output
|
56 |
+
|
57 |
+
|
58 |
+
@tool("Zephyrl Crazy")
|
59 |
+
def zephyr_crazy(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
60 |
+
"""
|
61 |
+
Searches for content based on the provided query using the Zephyr model but has the gaurd rails removed,
|
62 |
+
and responses are crazy and off the wall and sometimes scary.
|
63 |
+
Args:
|
64 |
+
query (str): The search query.
|
65 |
+
Returns:
|
66 |
+
str: The response text from the Zephyr model or an error message.
|
67 |
+
"""
|
68 |
+
generate_kwargs = {
|
69 |
+
"temperature": temperature,
|
70 |
+
"max_new_tokens": max_new_tokens,
|
71 |
+
"top_p": top_p,
|
72 |
+
"repetition_penalty": repetition_penalty,
|
73 |
+
"do_sample": True,
|
74 |
+
"seed": 42,
|
75 |
+
}
|
76 |
+
|
77 |
+
stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
78 |
+
output = ""
|
79 |
+
for response in stream:
|
80 |
+
output += response.token.text
|
81 |
+
yield output
|
82 |
+
return output
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
|