Spaces:
Running
Running
Update pipeline.py
Browse files- pipeline.py +4 -1
pipeline.py
CHANGED
@@ -55,7 +55,10 @@ def moderate_text(query: str) -> str:
|
|
55 |
Classifies the query as harmful or not using Mistral Moderation via Pydantic AI.
|
56 |
Returns "OutOfScope" if harmful, otherwise returns the original query.
|
57 |
"""
|
58 |
-
|
|
|
|
|
|
|
59 |
categories = response['results'][0]['categories']
|
60 |
|
61 |
# Check if harmful content is flagged in moderation categories
|
|
|
55 |
Classifies the query as harmful or not using Mistral Moderation via Pydantic AI.
|
56 |
Returns "OutOfScope" if harmful, otherwise returns the original query.
|
57 |
"""
|
58 |
+
# Use the agent to interact with the moderation model, here assuming predict() method
|
59 |
+
response = mistral_agent.predict({"inputs": [query]}) # Adjust if necessary
|
60 |
+
|
61 |
+
# Assuming response contains a 'results' field similar to the previous example
|
62 |
categories = response['results'][0]['categories']
|
63 |
|
64 |
# Check if harmful content is flagged in moderation categories
|