Phoenix21 commited on
Commit
54fafa1
·
verified ·
1 Parent(s): db87ae8

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +12 -0
pipeline.py CHANGED
@@ -13,11 +13,15 @@ from langchain.llms.base import LLM # Import LLM
13
 
14
  # Mistral Client Setup
15
  from mistralai import Mistral # Import the Mistral client
 
16
 
17
  # Initialize Mistral API client
18
  mistral_api_key = os.environ.get("MISTRAL_API_KEY") # Ensure your Mistral API key is set
19
  client = Mistral(api_key=mistral_api_key)
20
 
 
 
 
21
  # Load spaCy model for NER and download the spaCy model if not already installed
22
  def install_spacy_model():
23
  try:
@@ -40,6 +44,14 @@ def moderate_text(query: str) -> str:
40
  Classifies the query as harmful or not using Mistral Moderation via Mistral API.
41
  Returns "OutOfScope" if harmful, otherwise returns the original query.
42
  """
 
 
 
 
 
 
 
 
43
  # Use the moderation API to evaluate if the query is harmful
44
  response = client.classifiers.moderate_chat(
45
  model="mistral-moderation-latest",
 
13
 
14
  # Mistral Client Setup
15
  from mistralai import Mistral # Import the Mistral client
16
+ from pydantic_ai import Agent # Import Pydantic AI's Agent
17
 
18
  # Initialize Mistral API client
19
  mistral_api_key = os.environ.get("MISTRAL_API_KEY") # Ensure your Mistral API key is set
20
  client = Mistral(api_key=mistral_api_key)
21
 
22
+ # Initialize Pydantic AI Agent (for text validation)
23
+ pydantic_agent = Agent('mistral:mistral-large-latest', result_type=str)
24
+
25
  # Load spaCy model for NER and download the spaCy model if not already installed
26
  def install_spacy_model():
27
  try:
 
44
  Classifies the query as harmful or not using Mistral Moderation via Mistral API.
45
  Returns "OutOfScope" if harmful, otherwise returns the original query.
46
  """
47
+ # Validate the text type using Pydantic AI's Agent
48
+ try:
49
+ # Use Pydantic AI agent to ensure correct text type
50
+ pydantic_agent.run_sync(query)
51
+ except Exception as e:
52
+ print(f"Error validating text with Pydantic AI: {e}")
53
+ return "Invalid text format."
54
+
55
  # Use the moderation API to evaluate if the query is harmful
56
  response = client.classifiers.moderate_chat(
57
  model="mistral-moderation-latest",