Spaces:
Runtime error
Runtime error
Commit
·
2d6909b
1
Parent(s):
f716a54
fixed gemini client
Browse files- ai_generate.py +22 -9
- app.py +3 -0
ai_generate.py
CHANGED
@@ -3,6 +3,10 @@ from openai import OpenAI
|
|
3 |
import os
|
4 |
from transformers import pipeline
|
5 |
from groq import Groq
|
|
|
|
|
|
|
|
|
6 |
import google.generativeai as genai
|
7 |
import anthropic
|
8 |
from langchain_community.document_loaders import PyMuPDFLoader
|
@@ -31,9 +35,8 @@ openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
|
31 |
# give access to all APIs for GCP instance
|
32 |
# gcloud auth application-default login
|
33 |
genai.configure(api_key=os.environ.get("GENAI_API_KEY"))
|
34 |
-
|
35 |
-
|
36 |
-
)
|
37 |
claude_client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
|
38 |
|
39 |
|
@@ -109,14 +112,24 @@ def generate_openai(text, model, openai_client):
|
|
109 |
|
110 |
|
111 |
def generate_gemini(text, model, gemini_client):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
response = gemini_client.generate_content(
|
113 |
-
text,
|
114 |
-
generation_config=
|
115 |
-
|
116 |
-
|
117 |
-
},
|
118 |
)
|
119 |
-
return response.text
|
120 |
|
121 |
|
122 |
def generate_claude(text, model, claude_client):
|
|
|
3 |
import os
|
4 |
from transformers import pipeline
|
5 |
from groq import Groq
|
6 |
+
import base64
|
7 |
+
import vertexai
|
8 |
+
from vertexai.generative_models import GenerativeModel, Part, FinishReason
|
9 |
+
import vertexai.preview.generative_models as generative_models
|
10 |
import google.generativeai as genai
|
11 |
import anthropic
|
12 |
from langchain_community.document_loaders import PyMuPDFLoader
|
|
|
35 |
# give access to all APIs for GCP instance
|
36 |
# gcloud auth application-default login
|
37 |
genai.configure(api_key=os.environ.get("GENAI_API_KEY"))
|
38 |
+
vertexai.init(project="proprietary-info-detection", location="us-central1")
|
39 |
+
gemini_client = GenerativeModel("gemini-1.5-pro-001")
|
|
|
40 |
claude_client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
|
41 |
|
42 |
|
|
|
112 |
|
113 |
|
114 |
def generate_gemini(text, model, gemini_client):
|
115 |
+
safety_settings = {
|
116 |
+
generative_models.HarmCategory.HARM_CATEGORY_HATE_SPEECH: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
117 |
+
generative_models.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
118 |
+
generative_models.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
119 |
+
generative_models.HarmCategory.HARM_CATEGORY_HARASSMENT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
120 |
+
}
|
121 |
+
generation_config = {
|
122 |
+
"max_output_tokens": 1024,
|
123 |
+
"temperature": 1.0,
|
124 |
+
"top_p": 1.0,
|
125 |
+
}
|
126 |
response = gemini_client.generate_content(
|
127 |
+
[text],
|
128 |
+
generation_config=generation_config,
|
129 |
+
safety_settings=safety_settings,
|
130 |
+
stream=False,
|
|
|
131 |
)
|
132 |
+
return response.text
|
133 |
|
134 |
|
135 |
def generate_claude(text, model, claude_client):
|
app.py
CHANGED
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
1 |
import openai
|
2 |
import gradio as gr
|
3 |
from typing import Dict, List
|
|
|
1 |
+
"""
|
2 |
+
nohup python3 app.py &
|
3 |
+
"""
|
4 |
import openai
|
5 |
import gradio as gr
|
6 |
from typing import Dict, List
|