url
stringlengths 30
161
| markdown
stringlengths 27
670k
| last_modified
stringclasses 1
value |
---|---|---|
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/__init__.py | from neo4j_semantic_ollama.agent import agent_executor
__all__ = ["agent_executor"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/agent.py | import os
from typing import List, Tuple
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_log_to_messages
from langchain.agents.output_parsers import (
ReActJsonSingleInputOutputParser,
)
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.render import render_text_description_and_args
from langchain_community.chat_models import ChatOllama
from langchain_core.messages import AIMessage, HumanMessage
from neo4j_semantic_ollama.information_tool import InformationTool
from neo4j_semantic_ollama.memory_tool import MemoryTool
from neo4j_semantic_ollama.recommendation_tool import RecommenderTool
from neo4j_semantic_ollama.smalltalk_tool import SmalltalkTool
llm = ChatOllama(
model="mixtral",
temperature=0,
base_url=os.environ["OLLAMA_BASE_URL"],
streaming=True,
)
chat_model_with_stop = llm.bind(stop=["\nObservation"])
tools = [InformationTool(), RecommenderTool(), MemoryTool(), SmalltalkTool()]
# Inspiration taken from hub.pull("hwchase17/react-json")
system_message = f"""Answer the following questions as best you can.
You can answer directly if the user is greeting you or similar.
Otherise, you have access to the following tools:
{render_text_description_and_args(tools).replace('{', '{{').replace('}', '}}')}
The way you use the tools is by specifying a json blob.
Specifically, this json should have a `action` key (with the name of the tool to use)
and a `action_input` key (with the input to the tool going here).
The only values that should be in the "action" field are: {[t.name for t in tools]}
The $JSON_BLOB should only contain a SINGLE action,
do NOT return a list of multiple actions.
Here is an example of a valid $JSON_BLOB:
```
{{{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}}}
```
The $JSON_BLOB must always be enclosed with triple backticks!
ALWAYS use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action:```
$JSON_BLOB
```
Observation: the result of the action...
(this Thought/Action/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Reminder to always use the exact characters `Final Answer` when responding.'
"""
prompt = ChatPromptTemplate.from_messages(
[
(
"user",
system_message,
),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
def _format_chat_history(chat_history: List[Tuple[str, str]]):
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_log_to_messages(x["intermediate_steps"]),
"chat_history": lambda x: (
_format_chat_history(x["chat_history"]) if x.get("chat_history") else []
),
}
| prompt
| chat_model_with_stop
| ReActJsonSingleInputOutputParser()
)
# Add typing for input
class AgentInput(BaseModel):
input: str
chat_history: List[Tuple[str, str]] = Field(
..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}}
)
agent_executor = AgentExecutor(agent=agent, tools=tools).with_types(
input_type=AgentInput
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/information_tool.py | from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
# Import things that are needed generically
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
from neo4j_semantic_ollama.utils import get_candidates, graph
description_query = """
MATCH (m:Movie|Person)
WHERE m.title = $candidate OR m.name = $candidate
MATCH (m)-[r:ACTED_IN|DIRECTED|HAS_GENRE]-(t)
WITH m, type(r) as type, collect(coalesce(t.name, t.title)) as names
WITH m, type+": "+reduce(s="", n IN names | s + n + ", ") as types
WITH m, collect(types) as contexts
WITH m, "type:" + labels(m)[0] + "\ntitle: "+ coalesce(m.title, m.name)
+ "\nyear: "+coalesce(m.released,"") +"\n" +
reduce(s="", c in contexts | s + substring(c, 0, size(c)-2) +"\n") as context
RETURN context LIMIT 1
"""
def get_information(entity: str, type: str) -> str:
candidates = get_candidates(entity, type)
if not candidates:
return "No information was found about the movie or person in the database"
elif len(candidates) > 1:
newline = "\n"
return (
"Need additional information, which of these "
f"did you mean: {newline + newline.join(str(d) for d in candidates)}"
)
data = graph.query(
description_query, params={"candidate": candidates[0]["candidate"]}
)
return data[0]["context"]
class InformationInput(BaseModel):
entity: str = Field(description="movie or a person mentioned in the question")
entity_type: str = Field(
description="type of the entity. Available options are 'movie' or 'person'"
)
class InformationTool(BaseTool):
name = "Information"
description = (
"useful for when you need to answer questions about various actors or movies"
)
args_schema: Type[BaseModel] = InformationInput
def _run(
self,
entity: str,
entity_type: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return get_information(entity, entity_type)
async def _arun(
self,
entity: str,
entity_type: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return get_information(entity, entity_type)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py | from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
# Import things that are needed generically
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
from neo4j_semantic_ollama.utils import get_candidates, get_user_id, graph
store_rating_query = """
MERGE (u:User {userId:$user_id})
WITH u
UNWIND $candidates as row
MATCH (m:Movie {title: row.candidate})
MERGE (u)-[r:RATED]->(m)
SET r.rating = toFloat($rating)
RETURN distinct
'Create a final answer saying that preference has been stored' AS response
"""
def store_movie_rating(movie: str, rating: int):
user_id = get_user_id()
candidates = get_candidates(movie, "movie")
if not candidates:
return "This movie is not in our database"
response = graph.query(
store_rating_query,
params={"user_id": user_id, "candidates": candidates, "rating": rating},
)
try:
return response[0]["response"]
except Exception as e:
print(e)
return "Something went wrong"
class MemoryInput(BaseModel):
movie: str = Field(description="movie the user liked")
rating: int = Field(
description=(
"Rating from 1 to 5, where one represents heavy dislike "
"and 5 represent the user loved the movie"
)
)
class MemoryTool(BaseTool):
name = "Memory"
description = "useful for memorizing which movies the user liked"
args_schema: Type[BaseModel] = MemoryInput
def _run(
self,
movie: str,
rating: int,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return store_movie_rating(movie, rating)
async def _arun(
self,
movie: str,
rating: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return store_movie_rating(movie, rating)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/recommendation_tool.py | from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
from neo4j_semantic_ollama.utils import get_candidates, get_user_id, graph
recommendation_query_db_history = """
MERGE (u:User {userId:$user_id})
WITH u
// get recommendation candidates
OPTIONAL MATCH (u)-[r1:RATED]->()<-[r2:RATED]-()-[r3:RATED]->(recommendation)
WHERE r1.rating > 3.5 AND r2.rating > 3.5 AND r3.rating > 3.5
AND NOT EXISTS {(u)-[:RATED]->(recommendation)}
// rank and limit recommendations
WITH u, recommendation, count(*) AS count
ORDER BY count DESC LIMIT 3
RETURN 'title:' + recommendation.title + '\nactors:' +
apoc.text.join([(recommendation)<-[:ACTED_IN]-(a) | a.name], ',') +
'\ngenre:' + apoc.text.join([(recommendation)-[:IN_GENRE]->(a) | a.name], ',')
AS movie
"""
recommendation_query_genre = """
MATCH (m:Movie)-[:IN_GENRE]->(g:Genre {name:$genre})
// filter out already seen movies by the user
WHERE NOT EXISTS {
(m)<-[:RATED]-(:User {userId:$user_id})
}
// rank and limit recommendations
WITH m AS recommendation
ORDER BY recommendation.imdbRating DESC LIMIT 3
RETURN 'title:' + recommendation.title + '\nactors:' +
apoc.text.join([(recommendation)<-[:ACTED_IN]-(a) | a.name], ',') +
'\ngenre:' + apoc.text.join([(recommendation)-[:IN_GENRE]->(a) | a.name], ',')
AS movie
"""
def recommendation_query_movie(genre: bool) -> str:
return f"""
MATCH (m1:Movie)<-[r1:RATED]-()-[r2:RATED]->(m2:Movie)
WHERE r1.rating > 3.5 AND r2.rating > 3.5 and m1.title IN $movieTitles
// filter out already seen movies by the user
AND NOT EXISTS {{
(m2)<-[:RATED]-(:User {{userId:$user_id}})
}}
{'AND EXISTS {(m2)-[:IN_GENRE]->(:Genre {name:$genre})}' if genre else ''}
// rank and limit recommendations
WITH m2 AS recommendation, count(*) AS count
ORDER BY count DESC LIMIT 3
RETURN 'title:' + recommendation.title + '\nactors:' +
apoc.text.join([(recommendation)<-[:ACTED_IN]-(a) | a.name], ',') +
'\ngenre:' + apoc.text.join([(recommendation)-[:IN_GENRE]->(a) | a.name], ',')
AS movie
"""
nl = "\n"
def recommend_movie(movie: Optional[str] = None, genre: Optional[str] = None) -> str:
"""
Recommends movies based on user's history and preference
for a specific movie and/or genre.
Returns:
str: A string containing a list of recommended movies, or an error message.
"""
user_id = get_user_id()
params = {"user_id": user_id, "genre": genre}
if not movie and not genre:
# Try to recommend a movie based on the information in the db
response = graph.query(recommendation_query_db_history, params)
try:
return (
'Recommended movies are: '
f'{f"###Movie {nl}".join([el["movie"] for el in response])}'
)
except Exception:
return "Can you tell us about some of the movies you liked?"
if not movie and genre:
# Recommend top voted movies in the genre the user haven't seen before
response = graph.query(recommendation_query_genre, params)
try:
return (
'Recommended movies are: '
f'{f"###Movie {nl}".join([el["movie"] for el in response])}'
)
except Exception:
return "Something went wrong"
candidates = get_candidates(movie, "movie")
if not candidates:
return "The movie you mentioned wasn't found in the database"
params["movieTitles"] = [el["candidate"] for el in candidates]
query = recommendation_query_movie(bool(genre))
response = graph.query(query, params)
try:
return (
'Recommended movies are: '
f'{f"###Movie {nl}".join([el["movie"] for el in response])}'
)
except Exception:
return "Something went wrong"
all_genres = [
"Action",
"Adventure",
"Animation",
"Children",
"Comedy",
"Crime",
"Documentary",
"Drama",
"Fantasy",
"Film-Noir",
"Horror",
"IMAX",
"Musical",
"Mystery",
"Romance",
"Sci-Fi",
"Thriller",
"War",
"Western",
]
class RecommenderInput(BaseModel):
movie: Optional[str] = Field(description="movie used for recommendation")
genre: Optional[str] = Field(
description=(
"genre used for recommendation. Available options are:" f"{all_genres}"
)
)
class RecommenderTool(BaseTool):
name = "Recommender"
description = "useful for when you need to recommend a movie"
args_schema: Type[BaseModel] = RecommenderInput
def _run(
self,
movie: Optional[str] = None,
genre: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return recommend_movie(movie, genre)
async def _arun(
self,
movie: Optional[str] = None,
genre: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return recommend_movie(movie, genre)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/smalltalk_tool.py | from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
response = (
"Create a final answer that says if they "
"have any questions about movies or actors"
)
class SmalltalkInput(BaseModel):
query: Optional[str] = Field(description="user query")
class SmalltalkTool(BaseTool):
name = "Smalltalk"
description = "useful for when user greets you or wants to smalltalk"
args_schema: Type[BaseModel] = SmalltalkInput
def _run(
self,
query: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return response
async def _arun(
self,
query: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return response
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/utils.py | from typing import Dict, List
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
def get_user_id() -> int:
"""
Placeholder for a function that would normally retrieve
a user's ID
"""
return 1
def remove_lucene_chars(text: str) -> str:
"""Remove Lucene special characters"""
special_chars = [
"+",
"-",
"&",
"|",
"!",
"(",
")",
"{",
"}",
"[",
"]",
"^",
'"',
"~",
"*",
"?",
":",
"\\",
]
for char in special_chars:
if char in text:
text = text.replace(char, " ")
return text.strip()
def generate_full_text_query(input: str) -> str:
"""
Generate a full-text search query for a given input string.
This function constructs a query string suitable for a full-text search.
It processes the input string by splitting it into words and appending a
similarity threshold (~0.8) to each word, then combines them using the AND
operator. Useful for mapping movies and people from user questions
to database values, and allows for some misspelings.
"""
full_text_query = ""
words = [el for el in remove_lucene_chars(input).split() if el]
for word in words[:-1]:
full_text_query += f" {word}~0.8 AND"
full_text_query += f" {words[-1]}~0.8"
return full_text_query.strip()
candidate_query = """
CALL db.index.fulltext.queryNodes($index, $fulltextQuery, {limit: $limit})
YIELD node
RETURN coalesce(node.name, node.title) AS candidate,
[el in labels(node) WHERE el IN ['Person', 'Movie'] | el][0] AS label
"""
def get_candidates(input: str, type: str, limit: int = 3) -> List[Dict[str, str]]:
"""
Retrieve a list of candidate entities from database based on the input string.
This function queries the Neo4j database using a full-text search. It takes the
input string, generates a full-text query, and executes this query against the
specified index in the database. The function returns a list of candidates
matching the query, with each candidate being a dictionary containing their name
(or title) and label (either 'Person' or 'Movie').
"""
ft_query = generate_full_text_query(input)
candidates = graph.query(
candidate_query, {"fulltextQuery": ft_query, "index": type, "limit": limit}
)
return candidates
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/README.md | # neo4j-semantic-layer
This template is designed to implement an agent capable of interacting with a graph database like Neo4j through a semantic layer using OpenAI function calling.
The semantic layer equips the agent with a suite of robust tools, allowing it to interact with the graph databas based on the user's intent.
Learn more about the semantic layer template in the [corresponding blog post](https://medium.com/towards-data-science/enhancing-interaction-between-language-models-and-graph-databases-via-a-semantic-layer-0a78ad3eba49).
![Diagram illustrating the workflow of the Neo4j semantic layer with an agent interacting with tools like Information, Recommendation, and Memory, connected to a knowledge graph.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-semantic-layer/static/workflow.png "Neo4j Semantic Layer Workflow Diagram")
## Tools
The agent utilizes several tools to interact with the Neo4j graph database effectively:
1. **Information tool**:
- Retrieves data about movies or individuals, ensuring the agent has access to the latest and most relevant information.
2. **Recommendation Tool**:
- Provides movie recommendations based upon user preferences and input.
3. **Memory Tool**:
- Stores information about user preferences in the knowledge graph, allowing for a personalized experience over multiple interactions.
## Environment Setup
You need to define the following environment variables
```
OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
NEO4J_URI=<YOUR_NEO4J_URI>
NEO4J_USERNAME=<YOUR_NEO4J_USERNAME>
NEO4J_PASSWORD=<YOUR_NEO4J_PASSWORD>
```
## Populating with data
If you want to populate the DB with an example movie dataset, you can run `python ingest.py`.
The script import information about movies and their rating by users.
Additionally, the script creates two [fulltext indices](https://neo4j.com/docs/cypher-manual/current/indexes-for-full-text-search/), which are used to map information from user input to the database.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U "langchain-cli[serve]"
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package neo4j-semantic-layer
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add neo4j-semantic-layer
```
And add the following code to your `server.py` file:
```python
from neo4j_semantic_layer import agent_executor as neo4j_semantic_agent
add_routes(app, neo4j_semantic_agent, path="/neo4j-semantic-layer")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/neo4j-semantic-layer/playground](http://127.0.0.1:8000/neo4j-semantic-layer/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/neo4j-semantic-layer")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/ingest.py | from langchain_community.graphs import Neo4jGraph
# Instantiate connection to Neo4j
graph = Neo4jGraph()
# Define unique constraints
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (g:Genre) REQUIRE g.name IS UNIQUE;")
# Import movie information
movies_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies.csv'
AS row
CALL {
WITH row
MERGE (m:Movie {id:row.movieId})
SET m.released = date(row.released),
m.title = row.title,
m.imdbRating = toFloat(row.imdbRating)
FOREACH (director in split(row.director, '|') |
MERGE (p:Person {name:trim(director)})
MERGE (p)-[:DIRECTED]->(m))
FOREACH (actor in split(row.actors, '|') |
MERGE (p:Person {name:trim(actor)})
MERGE (p)-[:ACTED_IN]->(m))
FOREACH (genre in split(row.genres, '|') |
MERGE (g:Genre {name:trim(genre)})
MERGE (m)-[:IN_GENRE]->(g))
} IN TRANSACTIONS
"""
graph.query(movies_query)
# Import rating information
rating_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/ratings.csv'
AS row
CALL {
WITH row
MATCH (m:Movie {id:row.movieId})
MERGE (u:User {id:row.userId})
MERGE (u)-[r:RATED]->(m)
SET r.rating = toFloat(row.rating),
r.timestamp = row.timestamp
} IN TRANSACTIONS OF 10000 ROWS
"""
graph.query(rating_query)
# Define fulltext indices
graph.query("CREATE FULLTEXT INDEX movie IF NOT EXISTS FOR (m:Movie) ON EACH [m.title]")
graph.query(
"CREATE FULLTEXT INDEX person IF NOT EXISTS FOR (p:Person) ON EACH [p.name]"
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/main.py | from neo4j_semantic_layer import agent_executor
if __name__ == "__main__":
original_query = "What do you know about person John?"
followup_query = "John Travolta"
chat_history = [
(
"What do you know about person John?",
"I found multiple people named John. Could you please specify "
"which one you are interested in? Here are some options:"
"\n\n1. John Travolta\n2. John McDonough",
)
]
print(agent_executor.invoke({"input": original_query}))
print(
agent_executor.invoke({"input": followup_query, "chat_history": chat_history})
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/neo4j_semantic_layer/__init__.py | from neo4j_semantic_layer.agent import agent_executor
__all__ = ["agent_executor"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/neo4j_semantic_layer/agent.py | from typing import List, Tuple
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.render import format_tool_to_openai_function
from langchain_community.chat_models import ChatOpenAI
from langchain_core.messages import AIMessage, HumanMessage
from neo4j_semantic_layer.information_tool import InformationTool
from neo4j_semantic_layer.memory_tool import MemoryTool
from neo4j_semantic_layer.recommendation_tool import RecommenderTool
llm = ChatOpenAI(temperature=0, model="gpt-4")
tools = [InformationTool(), RecommenderTool(), MemoryTool()]
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant that finds information about movies "
" and recommends them. If tools require follow up questions, "
"make sure to ask the user for clarification. Make sure to include any "
"available options that need to be clarified in the follow up questions",
),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
def _format_chat_history(chat_history: List[Tuple[str, str]]):
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
agent = (
{
"input": lambda x: x["input"],
"chat_history": lambda x: (
_format_chat_history(x["chat_history"]) if x.get("chat_history") else []
),
"agent_scratchpad": lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
# Add typing for input
class AgentInput(BaseModel):
input: str
chat_history: List[Tuple[str, str]] = Field(
..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}}
)
agent_executor = AgentExecutor(agent=agent, tools=tools).with_types(
input_type=AgentInput
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/neo4j_semantic_layer/information_tool.py | from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
# Import things that are needed generically
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
from neo4j_semantic_layer.utils import get_candidates, graph
description_query = """
MATCH (m:Movie|Person)
WHERE m.title = $candidate OR m.name = $candidate
MATCH (m)-[r:ACTED_IN|DIRECTED|HAS_GENRE]-(t)
WITH m, type(r) as type, collect(coalesce(t.name, t.title)) as names
WITH m, type+": "+reduce(s="", n IN names | s + n + ", ") as types
WITH m, collect(types) as contexts
WITH m, "type:" + labels(m)[0] + "\ntitle: "+ coalesce(m.title, m.name)
+ "\nyear: "+coalesce(m.released,"") +"\n" +
reduce(s="", c in contexts | s + substring(c, 0, size(c)-2) +"\n") as context
RETURN context LIMIT 1
"""
def get_information(entity: str, type: str) -> str:
candidates = get_candidates(entity, type)
if not candidates:
return "No information was found about the movie or person in the database"
elif len(candidates) > 1:
newline = "\n"
return (
"Need additional information, which of these "
f"did you mean: {newline + newline.join(str(d) for d in candidates)}"
)
data = graph.query(
description_query, params={"candidate": candidates[0]["candidate"]}
)
return data[0]["context"]
class InformationInput(BaseModel):
entity: str = Field(description="movie or a person mentioned in the question")
entity_type: str = Field(
description="type of the entity. Available options are 'movie' or 'person'"
)
class InformationTool(BaseTool):
name = "Information"
description = (
"useful for when you need to answer questions about various actors or movies"
)
args_schema: Type[BaseModel] = InformationInput
def _run(
self,
entity: str,
entity_type: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return get_information(entity, entity_type)
async def _arun(
self,
entity: str,
entity_type: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return get_information(entity, entity_type)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py | from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
# Import things that are needed generically
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
from neo4j_semantic_layer.utils import get_candidates, get_user_id, graph
store_rating_query = """
MERGE (u:User {userId:$user_id})
WITH u
UNWIND $candidates as row
MATCH (m:Movie {title: row.candidate})
MERGE (u)-[r:RATED]->(m)
SET r.rating = toFloat($rating)
RETURN distinct 'Noted' AS response
"""
def store_movie_rating(movie: str, rating: int):
user_id = get_user_id()
candidates = get_candidates(movie, "movie")
if not candidates:
return "This movie is not in our database"
response = graph.query(
store_rating_query,
params={"user_id": user_id, "candidates": candidates, "rating": rating},
)
try:
return response[0]["response"]
except Exception as e:
print(e)
return "Something went wrong"
class MemoryInput(BaseModel):
movie: str = Field(description="movie the user liked")
rating: int = Field(
description=(
"Rating from 1 to 5, where one represents heavy dislike "
"and 5 represent the user loved the movie"
)
)
class MemoryTool(BaseTool):
name = "Memory"
description = "useful for memorizing which movies the user liked"
args_schema: Type[BaseModel] = MemoryInput
def _run(
self,
movie: str,
rating: int,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return store_movie_rating(movie, rating)
async def _arun(
self,
movie: str,
rating: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return store_movie_rating(movie, rating)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/neo4j_semantic_layer/recommendation_tool.py | from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
from neo4j_semantic_layer.utils import get_candidates, get_user_id, graph
recommendation_query_db_history = """
MERGE (u:User {userId:$user_id})
WITH u
// get recommendation candidates
OPTIONAL MATCH (u)-[r1:RATED]->()<-[r2:RATED]-()-[r3:RATED]->(recommendation)
WHERE r1.rating > 3.5 AND r2.rating > 3.5 AND r3.rating > 3.5
AND NOT EXISTS {(u)-[:RATED]->(recommendation)}
// rank and limit recommendations
WITH u, recommendation, count(*) AS count
ORDER BY count DESC LIMIT 3
RETURN recommendation.title AS movie
"""
recommendation_query_genre = """
MATCH (m:Movie)-[:IN_GENRE]->(g:Genre {name:$genre})
// filter out already seen movies by the user
WHERE NOT EXISTS {
(m)<-[:RATED]-(:User {userId:$user_id})
}
// rank and limit recommendations
WITH m
ORDER BY m.imdbRating DESC LIMIT 3
RETURN m.title AS movie
"""
def recommendation_query_movie(genre: bool) -> str:
return f"""
MATCH (m1:Movie)<-[r1:RATED]-()-[r2:RATED]->(m2:Movie)
WHERE r1.rating > 3.5 AND r2.rating > 3.5 and m1.title IN $movieTitles
// filter out already seen movies by the user
AND NOT EXISTS {{
(m2)<-[:RATED]-(:User {{userId:$user_id}})
}}
{'AND EXISTS {(m2)-[:IN_GENRE]->(:Genre {name:$genre})}' if genre else ''}
// rank and limit recommendations
WITH m2, count(*) AS count
ORDER BY count DESC LIMIT 3
RETURN m2.title As movie
"""
def recommend_movie(movie: Optional[str] = None, genre: Optional[str] = None) -> str:
"""
Recommends movies based on user's history and preference
for a specific movie and/or genre.
Returns:
str: A string containing a list of recommended movies, or an error message.
"""
user_id = get_user_id()
params = {"user_id": user_id, "genre": genre}
if not movie and not genre:
# Try to recommend a movie based on the information in the db
response = graph.query(recommendation_query_db_history, params)
try:
return ", ".join([el["movie"] for el in response])
except Exception:
return "Can you tell us about some of the movies you liked?"
if not movie and genre:
# Recommend top voted movies in the genre the user haven't seen before
response = graph.query(recommendation_query_genre, params)
try:
return ", ".join([el["movie"] for el in response])
except Exception:
return "Something went wrong"
candidates = get_candidates(movie, "movie")
if not candidates:
return "The movie you mentioned wasn't found in the database"
params["movieTitles"] = [el["candidate"] for el in candidates]
query = recommendation_query_movie(bool(genre))
response = graph.query(query, params)
try:
return ", ".join([el["movie"] for el in response])
except Exception:
return "Something went wrong"
all_genres = [
"Action",
"Adventure",
"Animation",
"Children",
"Comedy",
"Crime",
"Documentary",
"Drama",
"Fantasy",
"Film-Noir",
"Horror",
"IMAX",
"Musical",
"Mystery",
"Romance",
"Sci-Fi",
"Thriller",
"War",
"Western",
]
class RecommenderInput(BaseModel):
movie: Optional[str] = Field(description="movie used for recommendation")
genre: Optional[str] = Field(
description=(
"genre used for recommendation. Available options are:" f"{all_genres}"
)
)
class RecommenderTool(BaseTool):
name = "Recommender"
description = "useful for when you need to recommend a movie"
args_schema: Type[BaseModel] = RecommenderInput
def _run(
self,
movie: Optional[str] = None,
genre: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return recommend_movie(movie, genre)
async def _arun(
self,
movie: Optional[str] = None,
genre: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return recommend_movie(movie, genre)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-semantic-layer/neo4j_semantic_layer/utils.py | from typing import Dict, List
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
def get_user_id() -> int:
"""
Placeholder for a function that would normally retrieve
a user's ID
"""
return 1
def remove_lucene_chars(text: str) -> str:
"""Remove Lucene special characters"""
special_chars = [
"+",
"-",
"&",
"|",
"!",
"(",
")",
"{",
"}",
"[",
"]",
"^",
'"',
"~",
"*",
"?",
":",
"\\",
]
for char in special_chars:
if char in text:
text = text.replace(char, " ")
return text.strip()
def generate_full_text_query(input: str) -> str:
"""
Generate a full-text search query for a given input string.
This function constructs a query string suitable for a full-text search.
It processes the input string by splitting it into words and appending a
similarity threshold (~0.8) to each word, then combines them using the AND
operator. Useful for mapping movies and people from user questions
to database values, and allows for some misspelings.
"""
full_text_query = ""
words = [el for el in remove_lucene_chars(input).split() if el]
for word in words[:-1]:
full_text_query += f" {word}~0.8 AND"
full_text_query += f" {words[-1]}~0.8"
return full_text_query.strip()
candidate_query = """
CALL db.index.fulltext.queryNodes($index, $fulltextQuery, {limit: $limit})
YIELD node
RETURN coalesce(node.name, node.title) AS candidate,
[el in labels(node) WHERE el IN ['Person', 'Movie'] | el][0] AS label
"""
def get_candidates(input: str, type: str, limit: int = 3) -> List[Dict[str, str]]:
"""
Retrieve a list of candidate entities from database based on the input string.
This function queries the Neo4j database using a full-text search. It takes the
input string, generates a full-text query, and executes this query against the
specified index in the database. The function returns a list of candidates
matching the query, with each candidate being a dictionary containing their name
(or title) and label (either 'Person' or 'Movie').
"""
ft_query = generate_full_text_query(input)
candidates = graph.query(
candidate_query, {"fulltextQuery": ft_query, "index": type, "limit": limit}
)
return candidates
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-parent/README.md |
# neo4j-parent
This template allows you to balance precise embeddings and context retention by splitting documents into smaller chunks and retrieving their original or larger text information.
Using a Neo4j vector index, the package queries child nodes using vector similarity search and retrieves the corresponding parent's text by defining an appropriate `retrieval_query` parameter.
## Environment Setup
You need to define the following environment variables
```
OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
NEO4J_URI=<YOUR_NEO4J_URI>
NEO4J_USERNAME=<YOUR_NEO4J_USERNAME>
NEO4J_PASSWORD=<YOUR_NEO4J_PASSWORD>
```
## Populating with data
If you want to populate the DB with some example data, you can run `python ingest.py`.
The script process and stores sections of the text from the file `dune.txt` into a Neo4j graph database.
First, the text is divided into larger chunks ("parents") and then further subdivided into smaller chunks ("children"), where both parent and child chunks overlap slightly to maintain context.
After storing these chunks in the database, embeddings for the child nodes are computed using OpenAI's embeddings and stored back in the graph for future retrieval or analysis.
Additionally, a vector index named `retrieval` is created for efficient querying of these embeddings.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package neo4j-parent
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add neo4j-parent
```
And add the following code to your `server.py` file:
```python
from neo4j_parent import chain as neo4j_parent_chain
add_routes(app, neo4j_parent_chain, path="/neo4j-parent")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/neo4j-parent/playground](http://127.0.0.1:8000/neo4j-parent/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/neo4j-parent")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-parent/ingest.py | from pathlib import Path
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.graphs import Neo4jGraph
from langchain_community.vectorstores import Neo4jVector
from langchain_text_splitters import TokenTextSplitter
txt_path = Path(__file__).parent / "dune.txt"
graph = Neo4jGraph()
# Load the text file
loader = TextLoader(str(txt_path))
documents = loader.load()
# Define chunking strategy
parent_splitter = TokenTextSplitter(chunk_size=512, chunk_overlap=24)
child_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=24)
# Store parent-child patterns into graph
parent_documents = parent_splitter.split_documents(documents)
for parent in parent_documents:
child_documents = child_splitter.split_documents([parent])
params = {
"parent": parent.page_content,
"children": [c.page_content for c in child_documents],
}
graph.query(
"""
CREATE (p:Parent {text: $parent})
WITH p
UNWIND $children AS child
CREATE (c:Child {text: child})
CREATE (c)-[:HAS_PARENT]->(p)
""",
params,
)
# Calculate embedding values on the child nodes
Neo4jVector.from_existing_graph(
OpenAIEmbeddings(),
index_name="retrieval",
node_label="Child",
text_node_properties=["text"],
embedding_node_property="embedding",
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-parent/main.py | from neo4j_parent.chain import chain
if __name__ == "__main__":
original_query = "What is the plot of the Dune?"
print(chain.invoke(original_query))
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-parent/neo4j_parent/__init__.py | from neo4j_parent.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-parent/neo4j_parent/chain.py | from langchain_community.vectorstores import Neo4jVector
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
retrieval_query = """
MATCH (node)-[:HAS_PARENT]->(parent)
WITH parent, max(score) AS score // deduplicate parents
RETURN parent.text AS text, score, {} AS metadata
"""
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
vectorstore = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
index_name="retrieval",
node_label="Child",
embedding_node_property="embedding",
retrieval_query=retrieval_query,
)
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI()
chain = (
RunnableParallel(
{"context": retriever | format_docs, "question": RunnablePassthrough()}
)
| prompt
| model
| StrOutputParser()
)
# Add typing for input
class Question(BaseModel):
__root__: str
chain = chain.with_types(input_type=Question)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-generation/README.md |
# neo4j-generation
This template pairs LLM-based knowledge graph extraction with Neo4j AuraDB, a fully managed cloud graph database.
You can create a free instance on [Neo4j Aura](https://neo4j.com/cloud/platform/aura-graph-database?utm_source=langchain&utm_content=langserve).
When you initiate a free database instance, you'll receive credentials to access the database.
This template is flexible and allows users to guide the extraction process by specifying a list of node labels and relationship types.
For more details on the functionality and capabilities of this package, please refer to [this blog post](https://blog.langchain.dev/constructing-knowledge-graphs-from-text-using-openai-functions/).
## Environment Setup
You need to set the following environment variables:
```
OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
NEO4J_URI=<YOUR_NEO4J_URI>
NEO4J_USERNAME=<YOUR_NEO4J_USERNAME>
NEO4J_PASSWORD=<YOUR_NEO4J_PASSWORD>
```
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package neo4j-generation
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add neo4j-generation
```
And add the following code to your `server.py` file:
```python
from neo4j_generation.chain import chain as neo4j_generation_chain
add_routes(app, neo4j_generation_chain, path="/neo4j-generation")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/neo4j-generation/playground](http://127.0.0.1:8000/neo4j-generation/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/neo4j-generation")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-generation/main.py | from neo4j_generation.chain import chain
if __name__ == "__main__":
text = "Harrison works at LangChain, which is located in San Francisco"
allowed_nodes = ["Person", "Organization", "Location"]
allowed_relationships = ["WORKS_AT", "LOCATED_IN"]
print(
chain(
text,
allowed_nodes=allowed_nodes,
allowed_relationships=allowed_relationships,
)
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-generation/neo4j_generation/__init__.py | from neo4j_generation.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-generation/neo4j_generation/chain.py | from typing import List, Optional
from langchain_community.graphs import Neo4jGraph
from langchain_core.documents import Document
from langchain_experimental.graph_transformers import LLMGraphTransformer
from langchain_openai import ChatOpenAI
graph = Neo4jGraph()
llm = ChatOpenAI(model="gpt-3.5-turbo-16k", temperature=0)
def chain(
text: str,
allowed_nodes: Optional[List[str]] = None,
allowed_relationships: Optional[List[str]] = None,
) -> str:
"""
Process the given text to extract graph data and constructs a graph document from the extracted information.
The constructed graph document is then added to the graph.
Parameters:
- text (str): The input text from which the information will be extracted to construct the graph.
- allowed_nodes (Optional[List[str]]): A list of node labels to guide the extraction process.
If not provided, extraction won't have specific restriction on node labels.
- allowed_relationships (Optional[List[str]]): A list of relationship types to guide the extraction process.
If not provided, extraction won't have specific restriction on relationship types.
Returns:
str: A confirmation message indicating the completion of the graph construction.
""" # noqa: E501
# Construct document based on text
documents = [Document(page_content=text)]
# Extract graph data using OpenAI functions
llm_graph_transformer = LLMGraphTransformer(
llm=llm,
allowed_nodes=allowed_nodes,
allowed_relationships=allowed_relationships,
)
graph_documents = llm_graph_transformer.convert_to_graph_documents(documents)
# Store information into a graph
graph.add_graph_documents(graph_documents)
return "Graph construction finished"
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher/README.md |
# neo4j_cypher
This template allows you to interact with a Neo4j graph database in natural language, using an OpenAI LLM.
It transforms a natural language question into a Cypher query (used to fetch data from Neo4j databases), executes the query, and provides a natural language response based on the query results.
[![Diagram showing the workflow of a user asking a question, which is processed by a Cypher generating chain, resulting in a Cypher query to the Neo4j Knowledge Graph, and then an answer generating chain that provides a generated answer based on the information from the graph.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-cypher/static/workflow.png "Neo4j Cypher Workflow Diagram")](https://medium.com/neo4j/langchain-cypher-search-tips-tricks-f7c9e9abca4d)
## Environment Setup
Define the following environment variables:
```
OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
NEO4J_URI=<YOUR_NEO4J_URI>
NEO4J_USERNAME=<YOUR_NEO4J_USERNAME>
NEO4J_PASSWORD=<YOUR_NEO4J_PASSWORD>
```
## Neo4j database setup
There are a number of ways to set up a Neo4j database.
### Neo4j Aura
Neo4j AuraDB is a fully managed cloud graph database service.
Create a free instance on [Neo4j Aura](https://neo4j.com/cloud/platform/aura-graph-database?utm_source=langchain&utm_content=langserve).
When you initiate a free database instance, you'll receive credentials to access the database.
## Populating with data
If you want to populate the DB with some example data, you can run `python ingest.py`.
This script will populate the database with sample movie data.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package neo4j-cypher
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add neo4j-cypher
```
And add the following code to your `server.py` file:
```python
from neo4j_cypher import chain as neo4j_cypher_chain
add_routes(app, neo4j_cypher_chain, path="/neo4j-cypher")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/neo4j_cypher/playground](http://127.0.0.1:8000/neo4j_cypher/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/neo4j-cypher")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher/ingest.py | from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher/main.py | from neo4j_cypher.chain import chain
if __name__ == "__main__":
original_query = "Who played in Top Gun?"
print(chain.invoke({"question": original_query}))
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher/neo4j_cypher/__init__.py | from neo4j_cypher.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher/neo4j_cypher/chain.py | from typing import List, Union
from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema
from langchain_community.graphs import Neo4jGraph
from langchain_core.messages import (
AIMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
# Connection to Neo4j
graph = Neo4jGraph()
# Cypher validation tool for relationship directions
corrector_schema = [
Schema(el["start"], el["type"], el["end"])
for el in graph.structured_schema.get("relationships")
]
cypher_validation = CypherQueryCorrector(corrector_schema)
# LLMs
cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0)
qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0)
# Generate Cypher statement based on natural language input
cypher_template = """Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question:
{schema}
Question: {question}
Cypher query:""" # noqa: E501
cypher_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Given an input question, convert it to a Cypher query. No pre-amble.",
),
("human", cypher_template),
]
)
cypher_response = (
RunnablePassthrough.assign(
schema=lambda _: graph.get_schema,
)
| cypher_prompt
| cypher_llm.bind(stop=["\nCypherResult:"])
| StrOutputParser()
)
response_system = """You are an assistant that helps to form nice and human
understandable answers based on the provided information from tools.
Do not add any other information that wasn't present in the tools, and use
very concise style in interpreting results!
"""
response_prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(content=response_system),
HumanMessagePromptTemplate.from_template("{question}"),
MessagesPlaceholder(variable_name="function_response"),
]
)
def get_function_response(
query: str, question: str
) -> List[Union[AIMessage, ToolMessage]]:
context = graph.query(cypher_validation(query))
TOOL_ID = "call_H7fABDuzEau48T10Qn0Lsh0D"
messages = [
AIMessage(
content="",
additional_kwargs={
"tool_calls": [
{
"id": TOOL_ID,
"function": {
"arguments": '{"question":"' + question + '"}',
"name": "GetInformation",
},
"type": "function",
}
]
},
),
ToolMessage(content=str(context), tool_call_id=TOOL_ID),
]
return messages
chain = (
RunnablePassthrough.assign(query=cypher_response)
| RunnablePassthrough.assign(
function_response=lambda x: get_function_response(x["query"], x["question"])
)
| response_prompt
| qa_llm
| StrOutputParser()
)
# Add typing for input
class Question(BaseModel):
question: str
chain = chain.with_types(input_type=Question)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-memory/README.md |
# neo4j-cypher-memory
This template allows you to have conversations with a Neo4j graph database in natural language, using an OpenAI LLM.
It transforms a natural language question into a Cypher query (used to fetch data from Neo4j databases), executes the query, and provides a natural language response based on the query results.
Additionally, it features a conversational memory module that stores the dialogue history in the Neo4j graph database.
The conversation memory is uniquely maintained for each user session, ensuring personalized interactions.
To facilitate this, please supply both the `user_id` and `session_id` when using the conversation chain.
![Workflow diagram illustrating the process of a user asking a question, generating a Cypher query, retrieving conversational history, executing the query on a Neo4j database, generating an answer, and storing conversational memory.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-cypher-memory/static/workflow.png "Neo4j Cypher Memory Workflow Diagram")
## Environment Setup
Define the following environment variables:
```
OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
NEO4J_URI=<YOUR_NEO4J_URI>
NEO4J_USERNAME=<YOUR_NEO4J_USERNAME>
NEO4J_PASSWORD=<YOUR_NEO4J_PASSWORD>
```
## Neo4j database setup
There are a number of ways to set up a Neo4j database.
### Neo4j Aura
Neo4j AuraDB is a fully managed cloud graph database service.
Create a free instance on [Neo4j Aura](https://neo4j.com/cloud/platform/aura-graph-database?utm_source=langchain&utm_content=langserve).
When you initiate a free database instance, you'll receive credentials to access the database.
## Populating with data
If you want to populate the DB with some example data, you can run `python ingest.py`.
This script will populate the database with sample movie data.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package neo4j-cypher-memory
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add neo4j-cypher-memory
```
And add the following code to your `server.py` file:
```python
from neo4j_cypher_memory import chain as neo4j_cypher_memory_chain
add_routes(app, neo4j_cypher_memory_chain, path="/neo4j-cypher-memory")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/neo4j_cypher_memory/playground](http://127.0.0.1:8000/neo4j_cypher/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/neo4j-cypher-memory")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-memory/ingest.py | from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
WITH a
WHERE a.name = "Tom Cruise"
MERGE (a)-[:ACTED_IN]->(:Movie {name:"Mission Impossible"})
"""
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-memory/main.py | from neo4j_cypher_memory.chain import chain
if __name__ == "__main__":
original_query = "Who played in Top Gun?"
print(
chain.invoke(
{
"question": original_query,
"user_id": "user_123",
"session_id": "session_1",
}
)
)
follow_up_query = "Did they play in any other movies?"
print(
chain.invoke(
{
"question": follow_up_query,
"user_id": "user_123",
"session_id": "session_1",
}
)
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-memory/neo4j_cypher_memory/__init__.py | from neo4j_cypher_memory.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py | from typing import Any, Dict, List, Union
from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema
from langchain.memory import ChatMessageHistory
from langchain_community.graphs import Neo4jGraph
from langchain_core.messages import (
AIMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
# Connection to Neo4j
graph = Neo4jGraph()
# Cypher validation tool for relationship directions
corrector_schema = [
Schema(el["start"], el["type"], el["end"])
for el in graph.structured_schema.get("relationships")
]
cypher_validation = CypherQueryCorrector(corrector_schema)
# LLMs
cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0)
qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0)
def convert_messages(input: List[Dict[str, Any]]) -> ChatMessageHistory:
history = ChatMessageHistory()
for item in input:
history.add_user_message(item["result"]["question"])
history.add_ai_message(item["result"]["answer"])
return history
def get_history(input: Dict[str, Any]) -> ChatMessageHistory:
input.pop("question")
# Lookback conversation window
window = 3
data = graph.query(
"""
MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}),
(s)-[:LAST_MESSAGE]->(last_message)
MATCH p=(last_message)<-[:NEXT*0.."""
+ str(window)
+ """]-()
WITH p, length(p) AS length
ORDER BY length DESC LIMIT 1
UNWIND reverse(nodes(p)) AS node
MATCH (node)-[:HAS_ANSWER]->(answer)
RETURN {question:node.text, answer:answer.text} AS result
""",
params=input,
)
history = convert_messages(data)
return history.messages
def save_history(input):
print(input)
if input.get("function_response"):
input.pop("function_response")
# store history to database
graph.query(
"""MERGE (u:User {id: $user_id})
WITH u
OPTIONAL MATCH (u)-[:HAS_SESSION]->(s:Session{id: $session_id}),
(s)-[l:LAST_MESSAGE]->(last_message)
FOREACH (_ IN CASE WHEN last_message IS NULL THEN [1] ELSE [] END |
CREATE (u)-[:HAS_SESSION]->(s1:Session {id:$session_id}),
(s1)-[:LAST_MESSAGE]->(q:Question {text:$question, cypher:$query, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}))
FOREACH (_ IN CASE WHEN last_message IS NOT NULL THEN [1] ELSE [] END |
CREATE (last_message)-[:NEXT]->(q:Question
{text:$question, cypher:$query, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}),
(s)-[:LAST_MESSAGE]->(q)
DELETE l) """,
params=input,
)
# Return LLM response to the chain
return input["output"]
# Generate Cypher statement based on natural language input
cypher_template = """This is important for my career.
Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question:
{schema}
Question: {question}
Cypher query:""" # noqa: E501
cypher_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Given an input question, convert it to a Cypher query. No pre-amble.",
),
MessagesPlaceholder(variable_name="history"),
("human", cypher_template),
]
)
cypher_response = (
RunnablePassthrough.assign(schema=lambda _: graph.get_schema, history=get_history)
| cypher_prompt
| cypher_llm.bind(stop=["\nCypherResult:"])
| StrOutputParser()
)
# Generate natural language response based on database results
response_system = """You are an assistant that helps to form nice and human
understandable answers based on the provided information from tools.
Do not add any other information that wasn't present in the tools, and use
very concise style in interpreting results!
"""
response_prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(content=response_system),
HumanMessagePromptTemplate.from_template("{question}"),
MessagesPlaceholder(variable_name="function_response"),
]
)
def get_function_response(
query: str, question: str
) -> List[Union[AIMessage, ToolMessage]]:
context = graph.query(cypher_validation(query))
TOOL_ID = "call_H7fABDuzEau48T10Qn0Lsh0D"
messages = [
AIMessage(
content="",
additional_kwargs={
"tool_calls": [
{
"id": TOOL_ID,
"function": {
"arguments": '{"question":"' + question + '"}',
"name": "GetInformation",
},
"type": "function",
}
]
},
),
ToolMessage(content=str(context), tool_call_id=TOOL_ID),
]
return messages
chain = (
RunnablePassthrough.assign(query=cypher_response)
| RunnablePassthrough.assign(
function_response=lambda x: get_function_response(x["query"], x["question"]),
)
| RunnablePassthrough.assign(
output=response_prompt | qa_llm | StrOutputParser(),
)
| save_history
)
# Add typing for input
class Question(BaseModel):
question: str
user_id: str
session_id: str
chain = chain.with_types(input_type=Question)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-ft/README.md |
# neo4j-cypher-ft
This template allows you to interact with a Neo4j graph database using natural language, leveraging OpenAI's LLM.
Its main function is to convert natural language questions into Cypher queries (the language used to query Neo4j databases), execute these queries, and provide natural language responses based on the query's results.
The package utilizes a full-text index for efficient mapping of text values to database entries, thereby enhancing the generation of accurate Cypher statements.
In the provided example, the full-text index is used to map names of people and movies from the user's query to corresponding database entries.
![Workflow diagram showing the process from a user asking a question to generating an answer using the Neo4j knowledge graph and full-text index.](https://raw.githubusercontent.com/langchain-ai/langchain/master/templates/neo4j-cypher-ft/static/workflow.png "Neo4j Cypher Workflow Diagram")
## Environment Setup
The following environment variables need to be set:
```
OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
NEO4J_URI=<YOUR_NEO4J_URI>
NEO4J_USERNAME=<YOUR_NEO4J_USERNAME>
NEO4J_PASSWORD=<YOUR_NEO4J_PASSWORD>
```
Additionally, if you wish to populate the DB with some example data, you can run `python ingest.py`.
This script will populate the database with sample movie data and create a full-text index named `entity`, which is used to map person and movies from user input to database values for precise Cypher statement generation.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package neo4j-cypher-ft
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add neo4j-cypher-ft
```
And add the following code to your `server.py` file:
```python
from neo4j_cypher_ft import chain as neo4j_cypher_ft_chain
add_routes(app, neo4j_cypher_ft_chain, path="/neo4j-cypher-ft")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/neo4j-cypher-ft/playground](http://127.0.0.1:8000/neo4j-cypher-ft/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/neo4j-cypher-ft")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-ft/ingest.py | from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
# Import sample data
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Person {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
# Create full text index for entity matching
# on Person and Movie nodes
graph.query(
"CREATE FULLTEXT INDEX entity IF NOT EXISTS"
" FOR (m:Movie|Person) ON EACH [m.title, m.name]"
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-ft/main.py | from neo4j_cypher_ft.chain import chain
if __name__ == "__main__":
original_query = "Did tom cruis act in top gun?"
print(chain.invoke({"question": original_query}))
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-ft/neo4j_cypher_ft/__init__.py | from neo4j_cypher_ft.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py | from typing import List, Optional, Union
from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema
from langchain_community.graphs import Neo4jGraph
from langchain_core.messages import (
AIMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
# Connection to Neo4j
graph = Neo4jGraph()
# Cypher validation tool for relationship directions
corrector_schema = [
Schema(el["start"], el["type"], el["end"])
for el in graph.structured_schema.get("relationships")
]
cypher_validation = CypherQueryCorrector(corrector_schema)
# LLMs
cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0)
qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0)
# Extract entities from text
class Entities(BaseModel):
"""Identifying information about entities."""
names: List[str] = Field(
...,
description="All the person, organization, or business entities that "
"appear in the text",
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are extracting organization and person entities from the text.",
),
(
"human",
"Use the given format to extract information from the following "
"input: {question}",
),
]
)
# Fulltext index query
def map_to_database(entities: Entities) -> Optional[str]:
result = ""
for entity in entities.names:
response = graph.query(
"CALL db.index.fulltext.queryNodes('entity', $entity + '*', {limit:1})"
" YIELD node,score RETURN node.name AS result",
{"entity": entity},
)
try:
result += f"{entity} maps to {response[0]['result']} in database\n"
except IndexError:
pass
return result
entity_chain = prompt | qa_llm.with_structured_output(Entities)
# Generate Cypher statement based on natural language input
cypher_template = """Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question:
{schema}
Entities in the question map to the following database values:
{entities_list}
Question: {question}
Cypher query:""" # noqa: E501
cypher_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Given an input question, convert it to a Cypher query. No pre-amble.",
),
("human", cypher_template),
]
)
cypher_response = (
RunnablePassthrough.assign(names=entity_chain)
| RunnablePassthrough.assign(
entities_list=lambda x: map_to_database(x["names"]),
schema=lambda _: graph.get_schema,
)
| cypher_prompt
| cypher_llm.bind(stop=["\nCypherResult:"])
| StrOutputParser()
)
# Generate natural language response based on database results
response_system = """You are an assistant that helps to form nice and human
understandable answers based on the provided information from tools.
Do not add any other information that wasn't present in the tools, and use
very concise style in interpreting results!
"""
response_prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(content=response_system),
HumanMessagePromptTemplate.from_template("{question}"),
MessagesPlaceholder(variable_name="function_response"),
]
)
def get_function_response(
query: str, question: str
) -> List[Union[AIMessage, ToolMessage]]:
context = graph.query(cypher_validation(query))
TOOL_ID = "call_H7fABDuzEau48T10Qn0Lsh0D"
messages = [
AIMessage(
content="",
additional_kwargs={
"tool_calls": [
{
"id": TOOL_ID,
"function": {
"arguments": '{"question":"' + question + '"}',
"name": "GetInformation",
},
"type": "function",
}
]
},
),
ToolMessage(content=str(context), tool_call_id=TOOL_ID),
]
return messages
chain = (
RunnablePassthrough.assign(query=cypher_response)
| RunnablePassthrough.assign(
function_response=lambda x: get_function_response(x["query"], x["question"])
)
| response_prompt
| qa_llm
| StrOutputParser()
)
# Add typing for input
class Question(BaseModel):
question: str
chain = chain.with_types(input_type=Question)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-advanced-rag/README.md | # neo4j-advanced-rag
This template allows you to balance precise embeddings and context retention by implementing advanced retrieval strategies.
## Strategies
1. **Typical RAG**:
- Traditional method where the exact data indexed is the data retrieved.
2. **Parent retriever**:
- Instead of indexing entire documents, data is divided into smaller chunks, referred to as Parent and Child documents.
- Child documents are indexed for better representation of specific concepts, while parent documents is retrieved to ensure context retention.
3. **Hypothetical Questions**:
- Documents are processed to determine potential questions they might answer.
- These questions are then indexed for better representation of specific concepts, while parent documents are retrieved to ensure context retention.
4. **Summaries**:
- Instead of indexing the entire document, a summary of the document is created and indexed.
- Similarly, the parent document is retrieved in a RAG application.
## Environment Setup
You need to define the following environment variables
```
OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
NEO4J_URI=<YOUR_NEO4J_URI>
NEO4J_USERNAME=<YOUR_NEO4J_USERNAME>
NEO4J_PASSWORD=<YOUR_NEO4J_PASSWORD>
```
## Populating with data
If you want to populate the DB with some example data, you can run `python ingest.py`.
The script process and stores sections of the text from the file `dune.txt` into a Neo4j graph database.
First, the text is divided into larger chunks ("parents") and then further subdivided into smaller chunks ("children"), where both parent and child chunks overlap slightly to maintain context.
After storing these chunks in the database, embeddings for the child nodes are computed using OpenAI's embeddings and stored back in the graph for future retrieval or analysis.
For every parent node, hypothetical questions and summaries are generated, embedded, and added to the database.
Additionally, a vector index for each retrieval strategy is created for efficient querying of these embeddings.
*Note that ingestion can take a minute or two due to LLMs velocity of generating hypothetical questions and summaries.*
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U "langchain-cli[serve]"
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package neo4j-advanced-rag
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add neo4j-advanced-rag
```
And add the following code to your `server.py` file:
```python
from neo4j_advanced_rag import chain as neo4j_advanced_chain
add_routes(app, neo4j_advanced_chain, path="/neo4j-advanced-rag")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/neo4j-advanced-rag/playground](http://127.0.0.1:8000/neo4j-advanced-rag/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/neo4j-advanced-rag")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-advanced-rag/ingest.py | from pathlib import Path
from typing import List
from langchain_community.document_loaders import TextLoader
from langchain_community.graphs import Neo4jGraph
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
from neo4j.exceptions import ClientError
txt_path = Path(__file__).parent / "dune.txt"
graph = Neo4jGraph()
# Embeddings & LLM models
embeddings = OpenAIEmbeddings()
embedding_dimension = 1536
llm = ChatOpenAI(temperature=0)
# Load the text file
loader = TextLoader(str(txt_path))
documents = loader.load()
# Ingest Parent-Child node pairs
parent_splitter = TokenTextSplitter(chunk_size=512, chunk_overlap=24)
child_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=24)
parent_documents = parent_splitter.split_documents(documents)
for i, parent in enumerate(parent_documents):
child_documents = child_splitter.split_documents([parent])
params = {
"parent_text": parent.page_content,
"parent_id": i,
"parent_embedding": embeddings.embed_query(parent.page_content),
"children": [
{
"text": c.page_content,
"id": f"{i}-{ic}",
"embedding": embeddings.embed_query(c.page_content),
}
for ic, c in enumerate(child_documents)
],
}
# Ingest data
graph.query(
"""
MERGE (p:Parent {id: $parent_id})
SET p.text = $parent_text
WITH p
CALL db.create.setVectorProperty(p, 'embedding', $parent_embedding)
YIELD node
WITH p
UNWIND $children AS child
MERGE (c:Child {id: child.id})
SET c.text = child.text
MERGE (c)<-[:HAS_CHILD]-(p)
WITH c, child
CALL db.create.setVectorProperty(c, 'embedding', child.embedding)
YIELD node
RETURN count(*)
""",
params,
)
# Create vector index for child
try:
graph.query(
"CALL db.index.vector.createNodeIndex('parent_document', "
"'Child', 'embedding', $dimension, 'cosine')",
{"dimension": embedding_dimension},
)
except ClientError: # already exists
pass
# Create vector index for parents
try:
graph.query(
"CALL db.index.vector.createNodeIndex('typical_rag', "
"'Parent', 'embedding', $dimension, 'cosine')",
{"dimension": embedding_dimension},
)
except ClientError: # already exists
pass
# Ingest hypothethical questions
class Questions(BaseModel):
"""Generating hypothetical questions about text."""
questions: List[str] = Field(
...,
description=(
"Generated hypothetical questions based on " "the information from the text"
),
)
questions_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
(
"You are generating hypothetical questions based on the information "
"found in the text. Make sure to provide full context in the generated "
"questions."
),
),
(
"human",
(
"Use the given format to generate hypothetical questions from the "
"following input: {input}"
),
),
]
)
question_chain = questions_prompt | llm.with_structured_output(Questions)
for i, parent in enumerate(parent_documents):
questions = question_chain.invoke(parent.page_content).questions
params = {
"parent_id": i,
"questions": [
{"text": q, "id": f"{i}-{iq}", "embedding": embeddings.embed_query(q)}
for iq, q in enumerate(questions)
if q
],
}
graph.query(
"""
MERGE (p:Parent {id: $parent_id})
WITH p
UNWIND $questions AS question
CREATE (q:Question {id: question.id})
SET q.text = question.text
MERGE (q)<-[:HAS_QUESTION]-(p)
WITH q, question
CALL db.create.setVectorProperty(q, 'embedding', question.embedding)
YIELD node
RETURN count(*)
""",
params,
)
# Create vector index
try:
graph.query(
"CALL db.index.vector.createNodeIndex('hypothetical_questions', "
"'Question', 'embedding', $dimension, 'cosine')",
{"dimension": embedding_dimension},
)
except ClientError: # already exists
pass
# Ingest summaries
summary_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
(
"You are generating concise and accurate summaries based on the "
"information found in the text."
),
),
(
"human",
("Generate a summary of the following input: {question}\n" "Summary:"),
),
]
)
summary_chain = summary_prompt | llm
for i, parent in enumerate(parent_documents):
summary = summary_chain.invoke({"question": parent.page_content}).content
params = {
"parent_id": i,
"summary": summary,
"embedding": embeddings.embed_query(summary),
}
graph.query(
"""
MERGE (p:Parent {id: $parent_id})
MERGE (p)-[:HAS_SUMMARY]->(s:Summary)
SET s.text = $summary
WITH s
CALL db.create.setVectorProperty(s, 'embedding', $embedding)
YIELD node
RETURN count(*)
""",
params,
)
# Create vector index
try:
graph.query(
"CALL db.index.vector.createNodeIndex('summary', "
"'Summary', 'embedding', $dimension, 'cosine')",
{"dimension": embedding_dimension},
)
except ClientError: # already exists
pass
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-advanced-rag/main.py | from neo4j_advanced_rag.chain import chain
if __name__ == "__main__":
original_query = "What is the plot of the Dune?"
print(
chain.invoke(
{"question": original_query},
{"configurable": {"strategy": "parent_strategy"}},
)
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-advanced-rag/neo4j_advanced_rag/__init__.py | from neo4j_advanced_rag.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-advanced-rag/neo4j_advanced_rag/chain.py | from operator import itemgetter
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import ConfigurableField, RunnableParallel
from langchain_openai import ChatOpenAI
from neo4j_advanced_rag.retrievers import (
hypothetic_question_vectorstore,
parent_vectorstore,
summary_vectorstore,
typical_rag,
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI()
retriever = typical_rag.as_retriever().configurable_alternatives(
ConfigurableField(id="strategy"),
default_key="typical_rag",
parent_strategy=parent_vectorstore.as_retriever(),
hypothetical_questions=hypothetic_question_vectorstore.as_retriever(),
summary_strategy=summary_vectorstore.as_retriever(),
)
chain = (
RunnableParallel(
{
"context": itemgetter("question") | retriever | format_docs,
"question": itemgetter("question"),
}
)
| prompt
| model
| StrOutputParser()
)
# Add typing for input
class Question(BaseModel):
question: str
chain = chain.with_types(input_type=Question)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py | from langchain_community.vectorstores import Neo4jVector
from langchain_openai import OpenAIEmbeddings
# Typical RAG retriever
typical_rag = Neo4jVector.from_existing_index(
OpenAIEmbeddings(), index_name="typical_rag"
)
# Parent retriever
parent_query = """
MATCH (node)<-[:HAS_CHILD]-(parent)
WITH parent, max(score) AS score // deduplicate parents
RETURN parent.text AS text, score, {} AS metadata LIMIT 1
"""
parent_vectorstore = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
index_name="parent_document",
retrieval_query=parent_query,
)
# Hypothetic questions retriever
hypothetic_question_query = """
MATCH (node)<-[:HAS_QUESTION]-(parent)
WITH parent, max(score) AS score // deduplicate parents
RETURN parent.text AS text, score, {} AS metadata
"""
hypothetic_question_vectorstore = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
index_name="hypothetical_questions",
retrieval_query=hypothetic_question_query,
)
# Summary retriever
summary_query = """
MATCH (node)<-[:HAS_SUMMARY]-(parent)
WITH parent, max(score) AS score // deduplicate parents
RETURN parent.text AS text, score, {} AS metadata
"""
summary_vectorstore = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
index_name="summary",
retrieval_query=summary_query,
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/mongo-parent-document-retrieval/README.md | # mongo-parent-document-retrieval
This template performs RAG using MongoDB and OpenAI.
It does a more advanced form of RAG called Parent-Document Retrieval.
In this form of retrieval, a large document is first split into medium sized chunks.
From there, those medium size chunks are split into small chunks.
Embeddings are created for the small chunks.
When a query comes in, an embedding is created for that query and compared to the small chunks.
But rather than passing the small chunks directly to the LLM for generation, the medium-sized chunks
from whence the smaller chunks came are passed.
This helps enable finer-grained search, but then passing of larger context (which can be useful during generation).
## Environment Setup
You should export two environment variables, one being your MongoDB URI, the other being your OpenAI API KEY.
If you do not have a MongoDB URI, see the `Setup Mongo` section at the bottom for instructions on how to do so.
```shell
export MONGO_URI=...
export OPENAI_API_KEY=...
```
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package mongo-parent-document-retrieval
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add mongo-parent-document-retrieval
```
And add the following code to your `server.py` file:
```python
from mongo_parent_document_retrieval import chain as mongo_parent_document_retrieval_chain
add_routes(app, mongo_parent_document_retrieval_chain, path="/mongo-parent-document-retrieval")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you DO NOT already have a Mongo Search Index you want to connect to, see `MongoDB Setup` section below before proceeding.
Note that because Parent Document Retrieval uses a different indexing strategy, it's likely you will want to run this new setup.
If you DO have a MongoDB Search index you want to connect to, edit the connection details in `mongo_parent_document_retrieval/chain.py`
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/mongo-parent-document-retrieval/playground](http://127.0.0.1:8000/mongo-parent-document-retrieval/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/mongo-parent-document-retrieval")
```
For additional context, please refer to [this notebook](https://colab.research.google.com/drive/1cr2HBAHyBmwKUerJq2if0JaNhy-hIq7I#scrollTo=TZp7_CBfxTOB).
## MongoDB Setup
Use this step if you need to setup your MongoDB account and ingest data.
We will first follow the standard MongoDB Atlas setup instructions [here](https://www.mongodb.com/docs/atlas/getting-started/).
1. Create an account (if not already done)
2. Create a new project (if not already done)
3. Locate your MongoDB URI.
This can be done by going to the deployment overview page and connecting to you database
![Screenshot highlighting the 'Connect' button in MongoDB Atlas.](_images/connect.png "MongoDB Atlas Connect Button")
We then look at the drivers available
![Screenshot showing the MongoDB Atlas drivers section for connecting to the database.](_images/driver.png "MongoDB Atlas Drivers Section")
Among which we will see our URI listed
![Screenshot displaying the MongoDB Atlas URI in the connection instructions.](_images/uri.png "MongoDB Atlas URI Display")
Let's then set that as an environment variable locally:
```shell
export MONGO_URI=...
```
4. Let's also set an environment variable for OpenAI (which we will use as an LLM)
```shell
export OPENAI_API_KEY=...
```
5. Let's now ingest some data! We can do that by moving into this directory and running the code in `ingest.py`, eg:
```shell
python ingest.py
```
Note that you can (and should!) change this to ingest data of your choice
6. We now need to set up a vector index on our data.
We can first connect to the cluster where our database lives
![cluster.png](_images%2Fcluster.png)
We can then navigate to where all our collections are listed
![collections.png](_images%2Fcollections.png)
We can then find the collection we want and look at the search indexes for that collection
![search-indexes.png](_images%2Fsearch-indexes.png)
That should likely be empty, and we want to create a new one:
![create.png](_images%2Fcreate.png)
We will use the JSON editor to create it
![json_editor.png](_images%2Fjson_editor.png)
And we will paste the following JSON in:
```text
{
"mappings": {
"dynamic": true,
"fields": {
"doc_level": [
{
"type": "token"
}
],
"embedding": {
"dimensions": 1536,
"similarity": "cosine",
"type": "knnVector"
}
}
}
}
```
![json.png](_images%2Fjson.png)
From there, hit "Next" and then "Create Search Index". It will take a little bit but you should then have an index over your data!
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/mongo-parent-document-retrieval/ingest.py | import os
import uuid
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pymongo import MongoClient
PARENT_DOC_ID_KEY = "parent_doc_id"
def parent_child_splitter(data, id_key=PARENT_DOC_ID_KEY):
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
documents = parent_splitter.split_documents(data)
doc_ids = [str(uuid.uuid4()) for _ in documents]
docs = []
for i, doc in enumerate(documents):
_id = doc_ids[i]
sub_docs = child_splitter.split_documents([doc])
for _doc in sub_docs:
_doc.metadata[id_key] = _id
_doc.metadata["doc_level"] = "child"
docs.extend(sub_docs)
doc.metadata[id_key] = _id
doc.metadata["doc_level"] = "parent"
return documents, docs
MONGO_URI = os.environ["MONGO_URI"]
# Note that if you change this, you also need to change it in `rag_mongo/chain.py`
DB_NAME = "langchain-test-2"
COLLECTION_NAME = "test"
ATLAS_VECTOR_SEARCH_INDEX_NAME = "default"
EMBEDDING_FIELD_NAME = "embedding"
client = MongoClient(MONGO_URI)
db = client[DB_NAME]
MONGODB_COLLECTION = db[COLLECTION_NAME]
if __name__ == "__main__":
# Load docs
loader = PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf")
data = loader.load()
# Split docs
parent_docs, child_docs = parent_child_splitter(data)
# Insert the documents in MongoDB Atlas Vector Search
_ = MongoDBAtlasVectorSearch.from_documents(
documents=parent_docs + child_docs,
embedding=OpenAIEmbeddings(disallowed_special=()),
collection=MONGODB_COLLECTION,
index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/__init__.py | from mongo_parent_document_retrieval.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py | import os
from langchain_community.chat_models import ChatOpenAI
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from pymongo import MongoClient
MONGO_URI = os.environ["MONGO_URI"]
PARENT_DOC_ID_KEY = "parent_doc_id"
# Note that if you change this, you also need to change it in `rag_mongo/chain.py`
DB_NAME = "langchain-test-2"
COLLECTION_NAME = "test"
ATLAS_VECTOR_SEARCH_INDEX_NAME = "default"
EMBEDDING_FIELD_NAME = "embedding"
client = MongoClient(MONGO_URI)
db = client[DB_NAME]
MONGODB_COLLECTION = db[COLLECTION_NAME]
vector_search = MongoDBAtlasVectorSearch.from_connection_string(
MONGO_URI,
DB_NAME + "." + COLLECTION_NAME,
OpenAIEmbeddings(disallowed_special=()),
index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,
)
def retrieve(query: str):
results = vector_search.similarity_search(
query,
k=4,
pre_filter={"doc_level": {"$eq": "child"}},
post_filter_pipeline=[
{"$project": {"embedding": 0}},
{
"$lookup": {
"from": COLLECTION_NAME,
"localField": PARENT_DOC_ID_KEY,
"foreignField": PARENT_DOC_ID_KEY,
"as": "parent_context",
"pipeline": [
{"$match": {"doc_level": "parent"}},
{"$limit": 1},
{"$project": {"embedding": 0}},
],
}
},
],
)
parent_docs = []
parent_doc_ids = set()
for result in results:
res = result.metadata["parent_context"][0]
text = res.pop("text")
# This causes serialization issues.
res.pop("_id")
parent_doc = Document(page_content=text, metadata=res)
if parent_doc.metadata[PARENT_DOC_ID_KEY] not in parent_doc_ids:
parent_doc_ids.add(parent_doc.metadata[PARENT_DOC_ID_KEY])
parent_docs.append(parent_doc)
return parent_docs
# RAG prompt
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# RAG
model = ChatOpenAI()
chain = (
RunnableParallel({"context": retrieve, "question": RunnablePassthrough()})
| prompt
| model
| StrOutputParser()
)
# Add typing for input
class Question(BaseModel):
__root__: str
chain = chain.with_types(input_type=Question)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/llama2-functions/README.md |
# llama2-functions
This template performs extraction of structured data from unstructured data using a [LLaMA2 model that supports a specified JSON output schema](https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md).
The extraction schema can be set in `chain.py`.
## Environment Setup
This will use a [LLaMA2-13b model hosted by Replicate](https://replicate.com/andreasjansson/llama-2-13b-chat-gguf/versions).
Ensure that `REPLICATE_API_TOKEN` is set in your environment.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package llama2-functions
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add llama2-functions
```
And add the following code to your `server.py` file:
```python
from llama2_functions import chain as llama2_functions_chain
add_routes(app, llama2_functions_chain, path="/llama2-functions")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/llama2-functions/playground](http://127.0.0.1:8000/llama2-functions/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/llama2-functions")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/llama2-functions/llama2-functions.ipynb | {
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "9faf648c-541e-4368-82a8-96287dbf34de",
"metadata": {},
"source": [
"## Run Template\n",
"\n",
"In `server.py`, set -\n",
"```\n",
"add_routes(app, chain_ext, path=\"/llama2_functions\")\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2dfe28bb-6112-459b-a77d-013964b65409",
"metadata": {},
"outputs": [],
"source": [
"from langserve.client import RemoteRunnable\n",
"\n",
"llama2_function = RemoteRunnable(\"http://0.0.0.0:8001/llama2_functions\")\n",
"llama2_function.invoke({\"question\": \"How does agent memory work?\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/llama2-functions/llama2_functions/__init__.py | from llama2_functions.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/llama2-functions/llama2_functions/chain.py | from langchain_community.llms import Replicate
from langchain_core.prompts import ChatPromptTemplate
# LLM
replicate_id = "andreasjansson/llama-2-13b-chat-gguf:60ec5dda9ff9ee0b6f786c9d1157842e6ab3cc931139ad98fe99e08a35c5d4d4" # noqa: E501
model = Replicate(
model=replicate_id,
model_kwargs={"temperature": 0.8, "max_length": 500, "top_p": 0.95},
)
# Prompt with output schema specification
template = """You are an AI language model assistant. Your task is to generate 3 different versions of the given user /
question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user /
question, your goal is to help the user overcome some of the limitations of distance-based similarity search. /
Respond with json that adheres to the following jsonschema:
{{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {{
"question_1": {{
"type": "string",
"description": "First version of the user question."
}},
"question_2": {{
"type": "string",
"description": "Second version of the user question."
}},
"question_3": {{
"type": "string",
"description": "Third version of the user question."
}}
}},
"required": ["question_1","question_2","question_3"],
"additionalProperties": false
}}""" # noqa: E501
prompt = ChatPromptTemplate.from_messages(
[("system", template), ("human", "{question}")]
)
# Chain
chain = prompt | model
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/intel-rag-xeon/README.md | # RAG example on Intel Xeon
This template performs RAG using Chroma and Text Generation Inference on Intel® Xeon® Scalable Processors.
Intel® Xeon® Scalable processors feature built-in accelerators for more performance-per-core and unmatched AI performance, with advanced security technologies for the most in-demand workload requirements—all while offering the greatest cloud choice and application portability, please check [Intel® Xeon® Scalable Processors](https://www.intel.com/content/www/us/en/products/details/processors/xeon/scalable.html).
## Environment Setup
To use [🤗 text-generation-inference](https://github.com/huggingface/text-generation-inference) on Intel® Xeon® Scalable Processors, please follow these steps:
### Launch a local server instance on Intel Xeon Server:
```bash
model=Intel/neural-chat-7b-v3-3
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4 --model-id $model
```
For gated models such as `LLAMA-2`, you will have to pass -e HUGGING_FACE_HUB_TOKEN=\<token\> to the docker run command above with a valid Hugging Face Hub read token.
Please follow this link [huggingface token](https://huggingface.co/docs/hub/security-tokens) to get the access token ans export `HUGGINGFACEHUB_API_TOKEN` environment with the token.
```bash
export HUGGINGFACEHUB_API_TOKEN=<token>
```
Send a request to check if the endpoint is wokring:
```bash
curl localhost:8080/generate -X POST -d '{"inputs":"Which NFL team won the Super Bowl in the 2010 season?","parameters":{"max_new_tokens":128, "do_sample": true}}' -H 'Content-Type: application/json'
```
More details please refer to [text-generation-inference](https://github.com/huggingface/text-generation-inference).
## Populating with data
If you want to populate the DB with some example data, you can run the below commands:
```shell
poetry install
poetry run python ingest.py
```
The script process and stores sections from Edgar 10k filings data for Nike `nke-10k-2023.pdf` into a Chroma database.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package intel-rag-xeon
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add intel-rag-xeon
```
And add the following code to your `server.py` file:
```python
from intel_rag_xeon import chain as xeon_rag_chain
add_routes(app, xeon_rag_chain, path="/intel-rag-xeon")
```
(Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/intel-rag-xeon/playground](http://127.0.0.1:8000/intel-rag-xeon/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/intel-rag-xeon")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/intel-rag-xeon/ingest.py | import os
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
def ingest_documents():
"""
Ingest PDF to Redis from the data/ directory that
contains Edgar 10k filings data for Nike.
"""
# Load list of pdfs
data_path = "data/"
doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0]
print("Parsing 10k filing doc for NIKE", doc)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1500, chunk_overlap=100, add_start_index=True
)
loader = UnstructuredFileLoader(doc, mode="single", strategy="fast")
chunks = loader.load_and_split(text_splitter)
print("Done preprocessing. Created", len(chunks), "chunks of the original pdf")
# Create vectorstore
embedder = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2"
)
documents = []
for chunk in chunks:
doc = Document(page_content=chunk.page_content, metadata=chunk.metadata)
documents.append(doc)
# Add to vectorDB
_ = Chroma.from_documents(
documents=documents,
collection_name="xeon-rag",
embedding=embedder,
persist_directory="/tmp/xeon_rag_db",
)
if __name__ == "__main__":
ingest_documents()
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/intel-rag-xeon/intel_rag_xeon.ipynb | {
"cells": [
{
"cell_type": "markdown",
"id": "681a5d1e",
"metadata": {},
"source": [
"## Connect to RAG App\n",
"\n",
"Assuming you are already running this server:\n",
"```bash\n",
"langserve start\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d774be2a",
"metadata": {},
"outputs": [],
"source": [
"from langserve.client import RemoteRunnable\n",
"\n",
"gaudi_rag = RemoteRunnable(\"http://localhost:8000/intel-rag-xeon\")\n",
"\n",
"print(gaudi_rag.invoke(\"What was Nike's revenue in 2023?\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "07ae0005",
"metadata": {},
"outputs": [],
"source": [
"print(gaudi_rag.invoke(\"How many employees work at Nike?\"))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/intel-rag-xeon/intel_rag_xeon/__init__.py | from intel_rag_xeon.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/intel-rag-xeon/intel_rag_xeon/chain.py | from langchain.callbacks import streaming_stdout
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.llms import HuggingFaceEndpoint
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_core.vectorstores import VectorStoreRetriever
# Make this look better in the docs.
class Question(BaseModel):
__root__: str
# Init Embeddings
embedder = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
knowledge_base = Chroma(
persist_directory="/tmp/xeon_rag_db",
embedding_function=embedder,
collection_name="xeon-rag",
)
query = "What was Nike's revenue in 2023?"
docs = knowledge_base.similarity_search(query)
print(docs[0].page_content)
retriever = VectorStoreRetriever(
vectorstore=knowledge_base, search_type="mmr", search_kwargs={"k": 1, "fetch_k": 5}
)
# Define our prompt
template = """
Use the following pieces of context from retrieved
dataset to answer the question. Do not make up an answer if there is no
context provided to help answer it.
Context:
---------
{context}
---------
Question: {question}
---------
Answer:
"""
prompt = ChatPromptTemplate.from_template(template)
ENDPOINT_URL = "http://localhost:8080"
callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()]
model = HuggingFaceEndpoint(
endpoint_url=ENDPOINT_URL,
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
streaming=True,
)
# RAG Chain
chain = (
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
| prompt
| model
| StrOutputParser()
).with_types(input_type=Question)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/hyde/README.md |
# hyde
This template uses HyDE with RAG.
Hyde is a retrieval method that stands for Hypothetical Document Embeddings (HyDE). It is a method used to enhance retrieval by generating a hypothetical document for an incoming query.
The document is then embedded, and that embedding is utilized to look up real documents that are similar to the hypothetical document.
The underlying concept is that the hypothetical document may be closer in the embedding space than the query.
For a more detailed description, see the paper [here](https://arxiv.org/abs/2212.10496).
## Environment Setup
Set the `OPENAI_API_KEY` environment variable to access the OpenAI models.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package hyde
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add hyde
```
And add the following code to your `server.py` file:
```python
from hyde.chain import chain as hyde_chain
add_routes(app, hyde_chain, path="/hyde")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/hyde/playground](http://127.0.0.1:8000/hyde/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/hyde")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/hyde/hyde/chain.py | from langchain_community.chat_models import ChatOpenAI
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel
from hyde.prompts import hyde_prompt
# Example for document loading (from url), splitting, and creating vectostore
"""
# Load
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
# Split
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
# Add to vectorDB
vectorstore = Chroma.from_documents(documents=all_splits,
collection_name="rag-chroma",
embedding=OpenAIEmbeddings(),
)
retriever = vectorstore.as_retriever()
"""
# Embed a single document as a test
vectorstore = Chroma.from_texts(
["harrison worked at kensho"],
collection_name="rag-chroma",
embedding=OpenAIEmbeddings(),
)
retriever = vectorstore.as_retriever()
# RAG prompt
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# LLM
model = ChatOpenAI()
# Query transformation chain
# This transforms the query into the hypothetical document
hyde_chain = hyde_prompt | model | StrOutputParser()
# RAG chain
chain = (
RunnableParallel(
{
# Generate a hypothetical document and then pass it to the retriever
"context": hyde_chain | retriever,
"question": lambda x: x["question"],
}
)
| prompt
| model
| StrOutputParser()
)
# Add input types for playground
class ChainInput(BaseModel):
question: str
chain = chain.with_types(input_type=ChainInput)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/hyde/hyde/prompts.py | from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question
Question: {question}
Passage:"""
sci_fact_template = """Please write a scientific paper passage to support/refute the claim
Claim: {question}
Passage:""" # noqa: E501
fiqa_template = """Please write a financial article passage to answer the question
Question: {question}
Passage:"""
trec_news_template = """Please write a news passage about the topic.
Topic: {question}
Passage:"""
# For the sake of this example we will use the web search template
hyde_prompt = PromptTemplate.from_template(web_search_template)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/hybrid-search-weaviate/README.md | # Hybrid Search in Weaviate
This template shows you how to use the hybrid search feature in Weaviate. Hybrid search combines multiple search algorithms to improve the accuracy and relevance of search results.
Weaviate uses both sparse and dense vectors to represent the meaning and context of search queries and documents. The results use a combination of `bm25` and vector search ranking to return the top results.
## Configurations
Connect to your hosted Weaviate Vectorstore by setting a few env variables in `chain.py`:
* `WEAVIATE_ENVIRONMENT`
* `WEAVIATE_API_KEY`
You will also need to set your `OPENAI_API_KEY` to use the OpenAI models.
## Get Started
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package hybrid-search-weaviate
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add hybrid-search-weaviate
```
And add the following code to your `server.py` file:
```python
from hybrid_search_weaviate import chain as hybrid_search_weaviate_chain
add_routes(app, hybrid_search_weaviate_chain, path="/hybrid-search-weaviate")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/hybrid-search-weaviate/playground](http://127.0.0.1:8000/hybrid-search-weaviate/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/hybrid-search-weaviate")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/hybrid-search-weaviate/hybrid_search_weaviate.ipynb | {
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "8692a430",
"metadata": {},
"source": [
"# Run Template\n",
"\n",
"In `server.py`, set -\n",
"```\n",
"add_routes(app, chain_ext, path=\"/rag-weaviate\")\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "41db5e30",
"metadata": {},
"outputs": [],
"source": [
"from langserve.client import RemoteRunnable\n",
"\n",
"rag_app_weaviate = RemoteRunnable(\"http://localhost:8000/rag-weaviate\")\n",
"rag_app_weaviate.invoke(\"How does agent memory work?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.11.6 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.6"
},
"vscode": {
"interpreter": {
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/hybrid-search-weaviate/hybrid_search_weaviate/__init__.py | from hybrid_search_weaviate.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/hybrid-search-weaviate/hybrid_search_weaviate/chain.py | import os
import weaviate
from langchain.retrievers.weaviate_hybrid_search import WeaviateHybridSearchRetriever
from langchain_community.chat_models import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
# Check env vars
if os.environ.get("WEAVIATE_API_KEY", None) is None:
raise Exception("Missing `WEAVIATE_API_KEY` environment variable.")
if os.environ.get("WEAVIATE_ENVIRONMENT", None) is None:
raise Exception("Missing `WEAVIATE_ENVIRONMENT` environment variable.")
if os.environ.get("WEAVIATE_URL", None) is None:
raise Exception("Missing `WEAVIATE_URL` environment variable.")
if os.environ.get("OPENAI_API_KEY", None) is None:
raise Exception("Missing `OPENAI_API_KEY` environment variable.")
# Initialize the retriever
WEAVIATE_INDEX_NAME = os.environ.get("WEAVIATE_INDEX", "langchain-test")
WEAVIATE_URL = os.getenv("WEAVIATE_URL")
auth_client_secret = (weaviate.AuthApiKey(api_key=os.getenv("WEAVIATE_API_KEY")),)
client = weaviate.Client(
url=WEAVIATE_URL,
additional_headers={
"X-Openai-Api-Key": os.getenv("OPENAI_API_KEY"),
},
)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=WEAVIATE_INDEX_NAME,
text_key="text",
attributes=[],
create_schema_if_missing=True,
)
# # Ingest code - you may need to run this the first time
# # Load
# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
# data = loader.load()
#
# # Split
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
# all_splits = text_splitter.split_documents(data)
#
# # Add to vectorDB
# retriever.add_documents(all_splits)
# RAG prompt
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# RAG
model = ChatOpenAI()
chain = (
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
| prompt
| model
| StrOutputParser()
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/guardrails-output-parser/README.md |
# guardrails-output-parser
This template uses [guardrails-ai](https://github.com/guardrails-ai/guardrails) to validate LLM output.
The `GuardrailsOutputParser` is set in `chain.py`.
The default example protects against profanity.
## Environment Setup
Set the `OPENAI_API_KEY` environment variable to access the OpenAI models.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package guardrails-output-parser
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add guardrails-output-parser
```
And add the following code to your `server.py` file:
```python
from guardrails_output_parser.chain import chain as guardrails_output_parser_chain
add_routes(app, guardrails_output_parser_chain, path="/guardrails-output-parser")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/guardrails-output-parser/playground](http://127.0.0.1:8000/guardrails-output-parser/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/guardrails-output-parser")
```
If Guardrails does not find any profanity, then the translated output is returned as is. If Guardrails does find profanity, then an empty string is returned.
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/guardrails-output-parser/guardrails_output_parser/chain.py | from langchain.output_parsers import GuardrailsOutputParser
from langchain_community.llms import OpenAI
from langchain_core.prompts import PromptTemplate
# Define rail string
rail_str = """
<rail version="0.1">
<output>
<string
description="Profanity-free translation"
format="is-profanity-free"
name="translated_statement"
on-fail-is-profanity-free="fix">
</string>
</output>
<prompt>
Translate the given statement into English:
${statement_to_be_translated}
${gr.complete_json_suffix}
</prompt>
</rail>
"""
# Create the GuardrailsOutputParser object from the rail string
output_parser = GuardrailsOutputParser.from_rail_string(rail_str)
# Define the prompt, model and chain
prompt = PromptTemplate(
template=output_parser.guard.prompt.escape(),
input_variables=output_parser.guard.prompt.variable_names,
)
chain = prompt | OpenAI() | output_parser
# This is needed because GuardrailsOutputParser does not have an inferrable type
chain = chain.with_types(output_type=dict)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/gemini-functions-agent/README.md |
# gemini-functions-agent
This template creates an agent that uses Google Gemini function calling to communicate its decisions on what actions to take.
This example creates an agent that can optionally look up information on the internet using Tavily's search engine.
[See an example LangSmith trace here](https://smith.langchain.com/public/0ebf1bd6-b048-4019-b4de-25efe8d3d18c/r)
## Environment Setup
The following environment variables need to be set:
Set the `TAVILY_API_KEY` environment variable to access Tavily
Set the `GOOGLE_API_KEY` environment variable to access the Google Gemini APIs.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package gemini-functions-agent
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add gemini-functions-agent
```
And add the following code to your `server.py` file:
```python
from gemini_functions_agent import agent_executor as gemini_functions_agent_chain
add_routes(app, gemini_functions_agent_chain, path="/openai-functions-agent")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/gemini-functions-agent/playground](http://127.0.0.1:8000/gemini-functions-agent/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/gemini-functions-agent")
``` | Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/gemini-functions-agent/gemini_functions_agent/__init__.py | from gemini_functions_agent.agent import agent_executor
__all__ = ["agent_executor"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/gemini-functions-agent/gemini_functions_agent/agent.py | from typing import List, Tuple
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_google_genai import ChatGoogleGenerativeAI
# Create the tool
search = TavilySearchAPIWrapper()
description = """"A search engine optimized for comprehensive, accurate, \
and trusted results. Useful for when you need to answer questions \
about current events or about recent information. \
Input should be a search query. \
If the user is asking about something that you don't know about, \
you should probably use this tool to see if that can provide any information."""
tavily_tool = TavilySearchResults(api_wrapper=search, description=description)
tools = [tavily_tool]
llm = ChatGoogleGenerativeAI(temperature=0, model="gemini-pro")
prompt = ChatPromptTemplate.from_messages(
[
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm_with_tools = llm.bind(functions=tools)
def _format_chat_history(chat_history: List[Tuple[str, str]]):
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
agent = (
{
"input": lambda x: x["input"],
"chat_history": lambda x: _format_chat_history(x["chat_history"]),
"agent_scratchpad": lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
class AgentInput(BaseModel):
input: str
chat_history: List[Tuple[str, str]] = Field(
..., extra={"widget": {"type": "chat", "input": "input", "output": "output"}}
)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True).with_types(
input_type=AgentInput
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/extraction-openai-functions/README.md |
# extraction-openai-functions
This template uses [OpenAI function calling](https://python.langchain.com/docs/modules/chains/how_to/openai_functions) for extraction of structured output from unstructured input text.
The extraction output schema can be set in `chain.py`.
## Environment Setup
Set the `OPENAI_API_KEY` environment variable to access the OpenAI models.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package extraction-openai-functions
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add extraction-openai-functions
```
And add the following code to your `server.py` file:
```python
from extraction_openai_functions import chain as extraction_openai_functions_chain
add_routes(app, extraction_openai_functions_chain, path="/extraction-openai-functions")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/extraction-openai-functions/playground](http://127.0.0.1:8000/extraction-openai-functions/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/extraction-openai-functions")
```
By default, this package is set to extract the title and author of papers, as specified in the `chain.py` file.
LLM is leveraged by the OpenAI function by default.
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/extraction-openai-functions/extraction_openai_functions.ipynb | {
"cells": [
{
"cell_type": "markdown",
"id": "16f2c32e",
"metadata": {},
"source": [
"## Document Loading\n",
"\n",
"Load a blog post on agents."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "c9fadce0",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import WebBaseLoader\n",
"\n",
"loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
"text = loader.load()"
]
},
{
"cell_type": "markdown",
"id": "4086be03",
"metadata": {},
"source": [
"## Run Template\n",
"\n",
"In `server.py`, set -\n",
"```\n",
"add_routes(app, chain_ext, path=\"/extraction_openai_functions\")\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "ed507784",
"metadata": {},
"outputs": [],
"source": [
"from langserve.client import RemoteRunnable\n",
"\n",
"oai_function = RemoteRunnable(\"http://0.0.0.0:8001/extraction_openai_functions\")"
]
},
{
"cell_type": "markdown",
"id": "68046695",
"metadata": {},
"source": [
"The function wille extract paper titles and authors from an input."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "6dace748",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'title': 'Chain of Thought', 'author': 'Wei et al. 2022'},\n",
" {'title': 'Tree of Thoughts', 'author': 'Yao et al. 2023'},\n",
" {'title': 'LLM+P', 'author': 'Liu et al. 2023'}]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"oai_function.invoke({\"input\": text[0].page_content[0:4000]})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "langserve",
"language": "python",
"name": "langserve"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/extraction-openai-functions/extraction_openai_functions/__init__.py | from extraction_openai_functions.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/extraction-openai-functions/extraction_openai_functions/chain.py | import json
from typing import List, Optional
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
template = """A article will be passed to you. Extract from it all papers that are mentioned by this article.
Do not extract the name of the article itself. If no papers are mentioned that's fine - you don't need to extract any! Just return an empty list.
Do not make up or guess ANY extra information. Only extract what exactly is in the text.""" # noqa: E501
prompt = ChatPromptTemplate.from_messages([("system", template), ("human", "{input}")])
# Function output schema
class Paper(BaseModel):
"""Information about papers mentioned."""
title: str
author: Optional[str]
class Info(BaseModel):
"""Information to extract"""
papers: List[Paper]
# Function definition
model = ChatOpenAI()
function = [convert_pydantic_to_openai_function(Info)]
chain = (
prompt
| model.bind(functions=function, function_call={"name": "Info"})
| (
lambda x: json.loads(x.additional_kwargs["function_call"]["arguments"])[
"papers"
]
)
)
# chain = prompt | model.bind(
# functions=function, function_call={"name": "Info"}
# ) | JsonKeyOutputFunctionsParser(key_name="papers")
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/extraction-anthropic-functions/README.md |
# extraction-anthropic-functions
This template enables [Anthropic function calling](https://python.langchain.com/docs/integrations/chat/anthropic_functions).
This can be used for various tasks, such as extraction or tagging.
The function output schema can be set in `chain.py`.
## Environment Setup
Set the `ANTHROPIC_API_KEY` environment variable to access the Anthropic models.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package extraction-anthropic-functions
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add extraction-anthropic-functions
```
And add the following code to your `server.py` file:
```python
from extraction_anthropic_functions import chain as extraction_anthropic_functions_chain
add_routes(app, extraction_anthropic_functions_chain, path="/extraction-anthropic-functions")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/extraction-anthropic-functions/playground](http://127.0.0.1:8000/extraction-anthropic-functions/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/extraction-anthropic-functions")
```
By default, the package will extract the title and author of papers from the information you specify in `chain.py`. This template will use `Claude2` by default.
---
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/extraction-anthropic-functions/extraction_anthropic_functions.ipynb | {
"cells": [
{
"cell_type": "markdown",
"id": "4ae4b789",
"metadata": {},
"source": [
"## Document Loading\n",
"\n",
"Load a blog post on agents."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "5d6bd62e",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import WebBaseLoader\n",
"\n",
"loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
"text = loader.load()"
]
},
{
"cell_type": "markdown",
"id": "8e21575d",
"metadata": {},
"source": [
"## Run Template\n",
"\n",
"In `server.py`, set -\n",
"```\n",
"add_routes(app, chain_ext, path=\"/extraction-anthropic-functions\")\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5fd794ec-a002-490e-8eb9-06ce3e6c2f14",
"metadata": {},
"outputs": [],
"source": [
"from langserve.client import RemoteRunnable\n",
"\n",
"anthropic_function_model = RemoteRunnable(\n",
" \"http://localhost:8001/extraction-anthropic-functions\"\n",
")\n",
"anthropic_function_model.invoke(text[0].page_content[0:1500])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "langserve",
"language": "python",
"name": "langserve"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/extraction-anthropic-functions/extraction_anthropic_functions/__init__.py | from extraction_anthropic_functions.chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/extraction-anthropic-functions/extraction_anthropic_functions/chain.py | from typing import List, Optional
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain_core.output_parsers.openai_functions import JsonKeyOutputFunctionsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_experimental.llms.anthropic_functions import AnthropicFunctions
template = """A article will be passed to you. Extract from it all papers that are mentioned by this article.
Do not extract the name of the article itself. If no papers are mentioned that's fine - you don't need to extract any! Just return an empty list.
Do not make up or guess ANY extra information. Only extract what exactly is in the text.""" # noqa: E501
prompt = ChatPromptTemplate.from_messages([("system", template), ("human", "{input}")])
# Function output schema
class Paper(BaseModel):
"""Information about papers mentioned."""
title: str
author: Optional[str]
class Info(BaseModel):
"""Information to extract"""
papers: List[Paper]
# Function definition
model = AnthropicFunctions()
function = [convert_pydantic_to_openai_function(Info)]
chain = (
prompt
| model.bind(functions=function, function_call={"name": "Info"})
| JsonKeyOutputFunctionsParser(key_name="papers")
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/elastic-query-generator/README.md |
# elastic-query-generator
This template allows interacting with Elasticsearch analytics databases in natural language using LLMs.
It builds search queries via the Elasticsearch DSL API (filters and aggregations).
## Environment Setup
Set the `OPENAI_API_KEY` environment variable to access the OpenAI models.
### Installing Elasticsearch
There are a number of ways to run Elasticsearch. However, one recommended way is through Elastic Cloud.
Create a free trial account on [Elastic Cloud](https://cloud.elastic.co/registration?utm_source=langchain&utm_content=langserve).
With a deployment, update the connection string.
Password and connection (elasticsearch url) can be found on the deployment console.
Note that the Elasticsearch client must have permissions for index listing, mapping description, and search queries.
### Populating with data
If you want to populate the DB with some example info, you can run `python ingest.py`.
This will create a `customers` index. In this package, we specify indexes to generate queries against, and we specify `["customers"]`. This is specific to setting up your Elastic index.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package elastic-query-generator
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add elastic-query-generator
```
And add the following code to your `server.py` file:
```python
from elastic_query_generator.chain import chain as elastic_query_generator_chain
add_routes(app, elastic_query_generator_chain, path="/elastic-query-generator")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/elastic-query-generator/playground](http://127.0.0.1:8000/elastic-query-generator/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/elastic-query-generator")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/elastic-query-generator/ingest.py | from elasticsearch import Elasticsearch
# Setup Elasticsearch
# This shows how to set it up for a cloud hosted version
# Password for the 'elastic' user generated by Elasticsearch
ELASTIC_PASSWORD = "..."
# Found in the 'Manage Deployment' page
CLOUD_ID = "..."
# Create the client instance
db = Elasticsearch(cloud_id=CLOUD_ID, basic_auth=("elastic", ELASTIC_PASSWORD))
customers = [
{"firstname": "Jennifer", "lastname": "Walters"},
{"firstname": "Monica", "lastname": "Rambeau"},
{"firstname": "Carol", "lastname": "Danvers"},
{"firstname": "Wanda", "lastname": "Maximoff"},
{"firstname": "Jennifer", "lastname": "Takeda"},
]
for i, customer in enumerate(customers):
db.create(index="customers", document=customer, id=i)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/elastic-query-generator/main.py | from elastic_query_generator.chain import chain
if __name__ == "__main__":
print(chain.invoke({"input": "how many customers named Carol"}))
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/elastic-query-generator/elastic_query_generator/__init__.py | from elastic_query_generator.chain import chain
__all__ = [
"chain",
]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/elastic-query-generator/elastic_query_generator/chain.py | from elasticsearch import Elasticsearch
from langchain.output_parsers.json import SimpleJsonOutputParser
from langchain_community.chat_models import ChatOpenAI
from langchain_core.pydantic_v1 import BaseModel
from .elastic_index_info import get_indices_infos
from .prompts import DSL_PROMPT
# Setup Elasticsearch
# This shows how to set it up for a cloud hosted version
# Password for the 'elastic' user generated by Elasticsearch
ELASTIC_PASSWORD = "..."
# Found in the 'Manage Deployment' page
CLOUD_ID = "..."
# Create the client instance
db = Elasticsearch(cloud_id=CLOUD_ID, basic_auth=("elastic", ELASTIC_PASSWORD))
# Specify indices to include
# If you want to use on your own indices, you will need to change this.
INCLUDE_INDICES = ["customers"]
# With the Elasticsearch connection created, we can now move on to the chain
_model = ChatOpenAI(temperature=0, model="gpt-4")
chain = (
{
"input": lambda x: x["input"],
# This line only get index info for "customers" index.
# If you are running this on your own data, you will want to change.
"indices_info": lambda _: get_indices_infos(
db, include_indices=INCLUDE_INDICES
),
"top_k": lambda x: x.get("top_k", 5),
}
| DSL_PROMPT
| _model
| SimpleJsonOutputParser()
)
# Nicely typed inputs for playground
class ChainInputs(BaseModel):
input: str
top_k: int = 5
chain = chain.with_types(input_type=ChainInputs)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/elastic-query-generator/elastic_query_generator/elastic_index_info.py | from typing import List
def _list_indices(database, include_indices=None, ignore_indices=None) -> List[str]:
all_indices = [index["index"] for index in database.cat.indices(format="json")]
if include_indices:
all_indices = [i for i in all_indices if i in include_indices]
if ignore_indices:
all_indices = [i for i in all_indices if i not in ignore_indices]
return all_indices
def get_indices_infos(
database,
sample_documents_in_index_info=5,
include_indices=None,
ignore_indices=None,
) -> str:
indices = _list_indices(
database, include_indices=include_indices, ignore_indices=ignore_indices
)
mappings = database.indices.get_mapping(index=",".join(indices))
if sample_documents_in_index_info > 0:
for k, v in mappings.items():
hits = database.search(
index=k,
query={"match_all": {}},
size=sample_documents_in_index_info,
)["hits"]["hits"]
hits = [str(hit["_source"]) for hit in hits]
mappings[k]["mappings"] = str(v) + "\n\n/*\n" + "\n".join(hits) + "\n*/"
return "\n\n".join(
[
"Mapping for index {}:\n{}".format(index, mappings[index]["mappings"])
for index in mappings
]
)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/elastic-query-generator/elastic_query_generator/prompts.py | from langchain_core.prompts.prompt import PromptTemplate
PROMPT_SUFFIX = """Only use the following Elasticsearch indices:
{indices_info}
Question: {input}
ESQuery:"""
DEFAULT_DSL_TEMPLATE = """Given an input question, create a syntactically correct Elasticsearch query to run. Always limit your query to at most {top_k} results, unless the user specifies in their question a specific number of examples they wish to obtain, or unless its implied that they want to see all. You can order the results by a relevant column to return the most interesting examples in the database.
Unless told to do not query for all the columns from a specific index, only ask for a the few relevant columns given the question.
Pay attention to use only the column names that you can see in the mapping description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which index. Return the query as valid json.
Use the following format:
Question: Question here
ESQuery: Elasticsearch Query formatted as json
""" # noqa: E501
DSL_PROMPT = PromptTemplate.from_template(DEFAULT_DSL_TEMPLATE + PROMPT_SUFFIX)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/docs/CONTRIBUTING.md | # Contributing
Thanks for taking the time to contribute a new template!
We've tried to make this process as simple and painless as possible.
If you need any help at all, please reach out!
To contribute a new template, first fork this repository.
Then clone that fork and pull it down locally.
Set up an appropriate dev environment, and make sure you are in this `templates` directory.
Make sure you have `langchain-cli` installed.
```shell
pip install -U langchain-cli
```
You can then run the following command to create a new skeleton of a package.
By convention, package names should use `-` delimiters (not `_`).
```shell
langchain template new $PROJECT_NAME
```
You can then edit the contents of the package as you desire.
Note that by default we expect the main chain to be exposed as `chain` in the `__init__.py` file of the package.
You can change this (either the name or the location), but if you do so it is important to update the `tool.langchain`
part of `pyproject.toml`.
For example, if you update the main chain exposed to be called `agent_executor`, then that section should look like:
```text
[tool.langserve]
export_module = "..."
export_attr = "agent_executor"
```
Make sure to add any requirements of the package to `pyproject.toml` (and to remove any that are not used).
Please update the `README.md` file to give some background on your package and how to set it up.
If you want to change the license of your template for whatever, you may! Note that by default it is MIT licensed.
If you want to test out your package at any point in time, you can spin up a LangServe instance directly from the package.
See instructions [here](LAUNCHING_PACKAGE.md) on how to best do that.
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/docs/INDEX.md | # Templates
Highlighting a few different categories of templates
## ⭐ Popular
These are some of the more popular templates to get started with.
- [Retrieval Augmented Generation Chatbot](../rag-conversation): Build a chatbot over your data. Defaults to OpenAI and PineconeVectorStore.
- [Extraction with OpenAI Functions](../extraction-openai-functions): Do extraction of structured data from unstructured data. Uses OpenAI function calling.
- [Local Retrieval Augmented Generation](../rag-chroma-private): Build a chatbot over your data. Uses only local tooling: Ollama, GPT4all, Chroma.
- [OpenAI Functions Agent](../openai-functions-agent): Build a chatbot that can take actions. Uses OpenAI function calling and Tavily.
- [XML Agent](../xml-agent): Build a chatbot that can take actions. Uses Anthropic and You.com.
## 📥 Advanced Retrieval
These templates cover advanced retrieval techniques, which can be used for chat and QA over databases or documents.
- [Reranking](../rag-pinecone-rerank): This retrieval technique uses Cohere's reranking endpoint to rerank documents from an initial retrieval step.
- [Anthropic Iterative Search](../anthropic-iterative-search): This retrieval technique uses iterative prompting to determine what to retrieve and whether the retriever documents are good enough.
- **Parent Document Retrieval** using [Neo4j](../neo4j-parent) or [MongoDB](../mongo-parent-document-retrieval): This retrieval technique stores embeddings for smaller chunks, but then returns larger chunks to pass to the model for generation.
- [Semi-Structured RAG](../rag-semi-structured): The template shows how to do retrieval over semi-structured data (e.g. data that involves both text and tables).
- [Temporal RAG](../rag-timescale-hybrid-search-time): The template shows how to do hybrid search over data with a time-based component using [Timescale Vector](https://www.timescale.com/ai?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral).
## 🔍Advanced Retrieval - Query Transformation
A selection of advanced retrieval methods that involve transforming the original user query, which can improve retrieval quality.
- [Hypothetical Document Embeddings](../hyde): A retrieval technique that generates a hypothetical document for a given query, and then uses the embedding of that document to do semantic search. [Paper](https://arxiv.org/abs/2212.10496).
- [Rewrite-Retrieve-Read](../rewrite-retrieve-read): A retrieval technique that rewrites a given query before passing it to a search engine. [Paper](https://arxiv.org/abs/2305.14283).
- [Step-back QA Prompting](../stepback-qa-prompting): A retrieval technique that generates a "step-back" question and then retrieves documents relevant to both that question and the original question. [Paper](https://arxiv.org/abs//2310.06117).
- [RAG-Fusion](../rag-fusion): A retrieval technique that generates multiple queries and then reranks the retrieved documents using reciprocal rank fusion. [Article](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1).
- [Multi-Query Retriever](../rag-pinecone-multi-query): This retrieval technique uses an LLM to generate multiple queries and then fetches documents for all queries.
## 🧠Advanced Retrieval - Query Construction
A selection of advanced retrieval methods that involve constructing a query in a separate DSL from natural language, which enable natural language chat over various structured databases.
- [Elastic Query Generator](../elastic-query-generator): Generate elastic search queries from natural language.
- [Neo4j Cypher Generation](../neo4j-cypher): Generate cypher statements from natural language. Available with a ["full text" option](../neo4j-cypher-ft) as well.
- [Supabase Self Query](../self-query-supabase): Parse a natural language query into a semantic query as well as a metadata filter for Supabase.
## 🦙 OSS Models
These templates use OSS models, which enable privacy for sensitive data.
- [Local Retrieval Augmented Generation](../rag-chroma-private): Build a chatbot over your data. Uses only local tooling: Ollama, GPT4all, Chroma.
- [SQL Question Answering (Replicate)](../sql-llama2): Question answering over a SQL database, using Llama2 hosted on [Replicate](https://replicate.com/).
- [SQL Question Answering (LlamaCpp)](../sql-llamacpp): Question answering over a SQL database, using Llama2 through [LlamaCpp](https://github.com/ggerganov/llama.cpp).
- [SQL Question Answering (Ollama)](../sql-ollama): Question answering over a SQL database, using Llama2 through [Ollama](https://github.com/jmorganca/ollama).
## ⛏️ Extraction
These templates extract data in a structured format based upon a user-specified schema.
- [Extraction Using OpenAI Functions](../extraction-openai-functions): Extract information from text using OpenAI Function Calling.
- [Extraction Using Anthropic Functions](../extraction-anthropic-functions): Extract information from text using a LangChain wrapper around the Anthropic endpoints intended to simulate function calling.
- [Extract BioTech Plate Data](../plate-chain): Extract microplate data from messy Excel spreadsheets into a more normalized format.
## ⛏️Summarization and tagging
These templates summarize or categorize documents and text.
- [Summarization using Anthropic](../summarize-anthropic): Uses Anthropic's Claude2 to summarize long documents.
## 🤖 Agents
These templates build chatbots that can take actions, helping to automate tasks.
- [OpenAI Functions Agent](../openai-functions-agent): Build a chatbot that can take actions. Uses OpenAI function calling and Tavily.
- [XML Agent](../xml-agent): Build a chatbot that can take actions. Uses Anthropic and You.com.
## :rotating_light: Safety and evaluation
These templates enable moderation or evaluation of LLM outputs.
- [Guardrails Output Parser](../guardrails-output-parser): Use guardrails-ai to validate LLM output.
- [Chatbot Feedback](../chat-bot-feedback): Use LangSmith to evaluate chatbot responses.
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/docs/LAUNCHING_PACKAGE.md | # Launching LangServe from a Package
You can also launch LangServe directly from a package, without having to pull it into a project.
This can be useful when you are developing a package and want to test it quickly.
The downside of this is that it gives you a little less control over how the LangServe APIs are configured,
which is why for proper projects we recommend creating a full project.
In order to do this, first change your working directory to the package itself.
For example, if you are currently in this `templates` module, you can go into the `pirate-speak` package with:
```shell
cd pirate-speak
```
Inside this package there is a `pyproject.toml` file.
This file contains a `tool.langchain` section that contains information on how this package should be used.
For example, in `pirate-speak` we see:
```text
[tool.langserve]
export_module = "pirate_speak.chain"
export_attr = "chain"
```
This information can be used to launch a LangServe instance automatically.
In order to do this, first make sure the CLI is installed:
```shell
pip install -U langchain-cli
```
You can then run:
```shell
langchain template serve
```
This will spin up endpoints, documentation, and playground for this chain.
For example, you can access the playground at [http://127.0.0.1:8000/playground/](http://127.0.0.1:8000/playground/)
![Screenshot of the LangServe Playground web interface with input and output fields.](playground.png "LangServe Playground Interface")
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/csv-agent/README.md |
# csv-agent
This template uses a [csv agent](https://python.langchain.com/docs/integrations/toolkits/csv) with tools (Python REPL) and memory (vectorstore) for interaction (question-answering) with text data.
## Environment Setup
Set the `OPENAI_API_KEY` environment variable to access the OpenAI models.
To set up the environment, the `ingest.py` script should be run to handle the ingestion into a vectorstore.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package csv-agent
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add csv-agent
```
And add the following code to your `server.py` file:
```python
from csv_agent.agent import agent_executor as csv_agent_chain
add_routes(app, csv_agent_chain, path="/csv-agent")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/csv-agent/playground](http://127.0.0.1:8000/csv-agent/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/csv-agent")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/csv-agent/ingest.py | from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
index = index_creator.from_documents(docs)
index.vectorstore.save_local("titanic_data")
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/csv-agent/main.py | from csv_agent.agent import agent_executor
if __name__ == "__main__":
question = "who was in cabin c28?"
print(agent_executor.invoke({"input": question}))
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/csv-agent/csv_agent/__init__.py | from csv_agent.agent import agent_executor
__all__ = ["agent_executor"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/csv-agent/csv_agent/agent.py | from pathlib import Path
import pandas as pd
from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain.tools.retriever import create_retriever_tool
from langchain_community.chat_models import ChatOpenAI
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_experimental.tools import PythonAstREPLTool
MAIN_DIR = Path(__file__).parents[1]
pd.set_option("display.max_rows", 20)
pd.set_option("display.max_columns", 20)
embedding_model = OpenAIEmbeddings()
vectorstore = FAISS.load_local(MAIN_DIR / "titanic_data", embedding_model)
retriever_tool = create_retriever_tool(
vectorstore.as_retriever(), "person_name_search", "Search for a person by name"
)
TEMPLATE = """You are working with a pandas dataframe in Python. The name of the dataframe is `df`.
It is important to understand the attributes of the dataframe before working with it. This is the result of running `df.head().to_markdown()`
<df>
{dhead}
</df>
You are not meant to use only these rows to answer questions - they are meant as a way of telling you about the shape and schema of the dataframe.
You also do not have use only the information here to answer questions - you can run intermediate queries to do exporatory data analysis to give you more information as needed.
You have a tool called `person_name_search` through which you can lookup a person by name and find the records corresponding to people with similar name as the query.
You should only really use this if your search term contains a persons name. Otherwise, try to solve it with code.
For example:
<question>How old is Jane?</question>
<logic>Use `person_name_search` since you can use the query `Jane`</logic>
<question>Who has id 320</question>
<logic>Use `python_repl` since even though the question is about a person, you don't know their name so you can't include it.</logic>
""" # noqa: E501
class PythonInputs(BaseModel):
query: str = Field(description="code snippet to run")
df = pd.read_csv(MAIN_DIR / "titanic.csv")
template = TEMPLATE.format(dhead=df.head().to_markdown())
prompt = ChatPromptTemplate.from_messages(
[
("system", template),
MessagesPlaceholder(variable_name="agent_scratchpad"),
("human", "{input}"),
]
)
repl = PythonAstREPLTool(
locals={"df": df},
name="python_repl",
description="Runs code and returns the output of the final line",
args_schema=PythonInputs,
)
tools = [repl, retriever_tool]
agent = OpenAIFunctionsAgent(
llm=ChatOpenAI(temperature=0, model="gpt-4"), prompt=prompt, tools=tools
)
agent_executor = AgentExecutor(
agent=agent, tools=tools, max_iterations=5, early_stopping_method="generate"
) | (lambda x: x["output"])
# Typing for playground inputs
class AgentInputs(BaseModel):
input: str
agent_executor = agent_executor.with_types(input_type=AgentInputs)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/cohere-librarian/README.md |
# cohere-librarian
This template turns Cohere into a librarian.
It demonstrates the use of a router to switch between chains that can handle different things: a vector database with Cohere embeddings; a chat bot that has a prompt with some information about the library; and finally a RAG chatbot that has access to the internet.
For a fuller demo of the book recomendation, consider replacing books_with_blurbs.csv with a larger sample from the following dataset: https://www.kaggle.com/datasets/jdobrow/57000-books-with-metadata-and-blurbs/ .
## Environment Setup
Set the `COHERE_API_KEY` environment variable to access the Cohere models.
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package cohere-librarian
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add cohere-librarian
```
And add the following code to your `server.py` file:
```python
from cohere_librarian.chain import chain as cohere_librarian_chain
add_routes(app, cohere_librarian_chain, path="/cohere-librarian")
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://localhost:8000/docs](http://localhost:8000/docs)
We can access the playground at [http://localhost:8000/cohere-librarian/playground](http://localhost:8000/cohere-librarian/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/cohere-librarian")
```
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/cohere-librarian/cohere_librarian/__init__.py | from .chain import chain
__all__ = ["chain"]
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/cohere-librarian/cohere_librarian/blurb_matcher.py | import csv
from langchain.chains.question_answering import load_qa_chain
from langchain_community.embeddings import CohereEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import PromptTemplate
from .chat import chat
csv_file = open("data/books_with_blurbs.csv", "r")
csv_reader = csv.reader(csv_file)
csv_data = list(csv_reader)
parsed_data = [
{
"id": x[0],
"title": x[1],
"author": x[2],
"year": x[3],
"publisher": x[4],
"blurb": x[5],
}
for x in csv_data
]
parsed_data[1]
embeddings = CohereEmbeddings()
docsearch = Chroma.from_texts(
[x["title"] for x in parsed_data], embeddings, metadatas=parsed_data
).as_retriever()
prompt_template = """
{context}
Use the book reccommendations to suggest books for the user to read.
Only use the titles of the books, do not make up titles. Format the response as
a bulleted list prefixed by a relevant message.
User: {message}"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "message"]
)
book_rec_chain = {
"input_documents": lambda x: docsearch.invoke(x["message"]),
"message": lambda x: x["message"],
} | load_qa_chain(chat, chain_type="stuff", prompt=PROMPT)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/cohere-librarian/cohere_librarian/chain.py | from langchain.pydantic_v1 import BaseModel
from .router import branched_chain
class ChainInput(BaseModel):
message: str
chain = branched_chain.with_types(input_type=ChainInput)
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/cohere-librarian/cohere_librarian/chat.py | from langchain_community.llms import Cohere
chat = Cohere()
| Wed, 26 Jun 2024 13:15:51 GMT |
https://github.com/langchain-ai/langchain/blob/master/templates/cohere-librarian/cohere_librarian/library_info.py | from langchain_core.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from .chat import chat
librarian_prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(
"""
You are a librarian at cohere community library. Your job is to
help recommend people books to read based on their interests and
preferences. You also give information about the library.
The library opens at 8am and closes at 9pm daily. It is closed on
Sundays.
Please answer the following message:
"""
),
HumanMessagePromptTemplate.from_template("{message}"),
]
)
library_info = librarian_prompt | chat
| Wed, 26 Jun 2024 13:15:51 GMT |